core/sync/atomic.rs
1//! Atomic types
2//!
3//! Atomic types provide primitive shared-memory communication between
4//! threads, and are the building blocks of other concurrent
5//! types.
6//!
7//! This module defines atomic versions of a select number of primitive
8//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9//! [`AtomicI8`], [`AtomicU16`], etc.
10//! Atomic types present operations that, when used correctly, synchronize
11//! updates between threads.
12//!
13//! Atomic variables are safe to share between threads (they implement [`Sync`])
14//! but they do not themselves provide the mechanism for sharing and follow the
15//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
16//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
17//! atomically-reference-counted shared pointer).
18//!
19//! [arc]: ../../../std/sync/struct.Arc.html
20//!
21//! Atomic types may be stored in static variables, initialized using
22//! the constant initializers like [`AtomicBool::new`]. Atomic statics
23//! are often used for lazy global initialization.
24//!
25//! ## Memory model for atomic accesses
26//!
27//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically the rules
28//! from the [`intro.races`][cpp-intro.races] section, without the "consume" memory ordering. Since
29//! C++ uses an object-based memory model whereas Rust is access-based, a bit of translation work
30//! has to be done to apply the C++ rules to Rust: whenever C++ talks about "the value of an
31//! object", we understand that to mean the resulting bytes obtained when doing a read. When the C++
32//! standard talks about "the value of an atomic object", this refers to the result of doing an
33//! atomic load (via the operations provided in this module). A "modification of an atomic object"
34//! refers to an atomic store.
35//!
36//! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the
37//! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being
38//! destroyed when the lifetime of the shared reference ends. The main difference is that Rust
39//! permits concurrent atomic and non-atomic reads to the same memory as those cause no issue in the
40//! C++ memory model, they are just forbidden in C++ because memory is partitioned into "atomic
41//! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object
42//! into an atomic object).
43//!
44//! The most important aspect of this model is that *data races* are undefined behavior. A data race
45//! is defined as conflicting non-synchronized accesses where at least one of the accesses is
46//! non-atomic. Here, accesses are *conflicting* if they affect overlapping regions of memory and at
47//! least one of them is a write. (A `compare_exchange` or `compare_exchange_weak` that does not
48//! succeed is not considered a write.) They are *non-synchronized* if neither of them
49//! *happens-before* the other, according to the happens-before order of the memory model.
50//!
51//! The other possible cause of undefined behavior in the memory model are mixed-size accesses: Rust
52//! inherits the C++ limitation that non-synchronized conflicting atomic accesses may not partially
53//! overlap. In other words, every pair of non-synchronized atomic accesses must be either disjoint,
54//! access the exact same memory (including using the same access size), or both be reads.
55//!
56//! Each atomic access takes an [`Ordering`] which defines how the operation interacts with the
57//! happens-before order. These orderings behave the same as the corresponding [C++20 atomic
58//! orderings][cpp_memory_order]. For more information, see the [nomicon].
59//!
60//! [cpp]: https://en.cppreference.com/w/cpp/atomic
61//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races
62//! [cpp_memory_order]: https://en.cppreference.com/w/cpp/atomic/memory_order
63//! [nomicon]: ../../../nomicon/atomics.html
64//!
65//! ```rust,no_run undefined_behavior
66//! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
67//! use std::mem::transmute;
68//! use std::thread;
69//!
70//! let atomic = AtomicU16::new(0);
71//!
72//! thread::scope(|s| {
73//! // This is UB: conflicting non-synchronized accesses, at least one of which is non-atomic.
74//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store
75//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write
76//! });
77//!
78//! thread::scope(|s| {
79//! // This is fine: the accesses do not conflict (as none of them performs any modification).
80//! // In C++ this would be disallowed since creating an `atomic_ref` precludes
81//! // further non-atomic accesses, but Rust does not have that limitation.
82//! s.spawn(|| atomic.load(Ordering::Relaxed)); // atomic load
83//! s.spawn(|| unsafe { atomic.as_ptr().read() }); // non-atomic read
84//! });
85//!
86//! thread::scope(|s| {
87//! // This is fine: `join` synchronizes the code in a way such that the atomic
88//! // store happens-before the non-atomic write.
89//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store
90//! handle.join().expect("thread won't panic"); // synchronize
91//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write
92//! });
93//!
94//! thread::scope(|s| {
95//! // This is UB: non-synchronized conflicting differently-sized atomic accesses.
96//! s.spawn(|| atomic.store(1, Ordering::Relaxed));
97//! s.spawn(|| unsafe {
98//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
99//! differently_sized.store(2, Ordering::Relaxed);
100//! });
101//! });
102//!
103//! thread::scope(|s| {
104//! // This is fine: `join` synchronizes the code in a way such that
105//! // the 1-byte store happens-before the 2-byte store.
106//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed));
107//! handle.join().expect("thread won't panic");
108//! s.spawn(|| unsafe {
109//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
110//! differently_sized.store(2, Ordering::Relaxed);
111//! });
112//! });
113//! ```
114//!
115//! # Portability
116//!
117//! All atomic types in this module are guaranteed to be [lock-free] if they're
118//! available. This means they don't internally acquire a global mutex. Atomic
119//! types and operations are not guaranteed to be wait-free. This means that
120//! operations like `fetch_or` may be implemented with a compare-and-swap loop.
121//!
122//! Atomic operations may be implemented at the instruction layer with
123//! larger-size atomics. For example some platforms use 4-byte atomic
124//! instructions to implement `AtomicI8`. Note that this emulation should not
125//! have an impact on correctness of code, it's just something to be aware of.
126//!
127//! The atomic types in this module might not be available on all platforms. The
128//! atomic types here are all widely available, however, and can generally be
129//! relied upon existing. Some notable exceptions are:
130//!
131//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
132//! `AtomicI64` types.
133//! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
134//! and `store` operations, and do not support Compare and Swap (CAS)
135//! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
136//! these CAS operations are implemented via [operating system support], which
137//! may come with a performance penalty.
138//! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
139//! and do not support Compare and Swap (CAS) operations, such as `swap`,
140//! `fetch_add`, etc.
141//!
142//! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
143//!
144//! Note that future platforms may be added that also do not have support for
145//! some atomic operations. Maximally portable code will want to be careful
146//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
147//! generally the most portable, but even then they're not available everywhere.
148//! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although
149//! `core` does not.
150//!
151//! The `#[cfg(target_has_atomic)]` attribute can be used to conditionally
152//! compile based on the target's supported bit widths. It is a key-value
153//! option set for each supported size, with values "8", "16", "32", "64",
154//! "128", and "ptr" for pointer-sized atomics.
155//!
156//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
157//!
158//! # Atomic accesses to read-only memory
159//!
160//! In general, *all* atomic accesses on read-only memory are undefined behavior. For instance, attempting
161//! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only
162//! operation) can still cause a segmentation fault if the underlying memory page is mapped read-only. Since
163//! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault
164//! on read-only memory.
165//!
166//! For the purpose of this section, "read-only memory" is defined as memory that is read-only in
167//! the underlying target, i.e., the pages are mapped with a read-only flag and any attempt to write
168//! will cause a page fault. In particular, an `&u128` reference that points to memory that is
169//! read-write mapped is *not* considered to point to "read-only memory". In Rust, almost all memory
170//! is read-write; the only exceptions are memory created by `const` items or `static` items without
171//! interior mutability, and memory that was specifically marked as read-only by the operating
172//! system via platform-specific APIs.
173//!
174//! As an exception from the general rule stated above, "sufficiently small" atomic loads with
175//! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not
176//! undefined behavior. The exact size limit for what makes a load "sufficiently small" varies
177//! depending on the target:
178//!
179//! | `target_arch` | Size limit |
180//! |---------------|---------|
181//! | `x86`, `arm`, `loongarch32`, `mips`, `mips32r6`, `powerpc`, `riscv32`, `sparc`, `hexagon` | 4 bytes |
182//! | `x86_64`, `aarch64`, `loongarch64`, `mips64`, `mips64r6`, `powerpc64`, `riscv64`, `sparc64`, `s390x` | 8 bytes |
183//!
184//! Atomics loads that are larger than this limit as well as atomic loads with ordering other
185//! than `Relaxed`, as well as *all* atomic loads on targets not listed in the table, might still be
186//! read-only under certain conditions, but that is not a stable guarantee and should not be relied
187//! upon.
188//!
189//! If you need to do an acquire load on read-only memory, you can do a relaxed load followed by an
190//! acquire fence instead.
191//!
192//! # Examples
193//!
194//! A simple spinlock:
195//!
196//! ```ignore-wasm
197//! use std::sync::Arc;
198//! use std::sync::atomic::{AtomicUsize, Ordering};
199//! use std::{hint, thread};
200//!
201//! fn main() {
202//! let spinlock = Arc::new(AtomicUsize::new(1));
203//!
204//! let spinlock_clone = Arc::clone(&spinlock);
205//!
206//! let thread = thread::spawn(move || {
207//! spinlock_clone.store(0, Ordering::Release);
208//! });
209//!
210//! // Wait for the other thread to release the lock
211//! while spinlock.load(Ordering::Acquire) != 0 {
212//! hint::spin_loop();
213//! }
214//!
215//! if let Err(panic) = thread.join() {
216//! println!("Thread had an error: {panic:?}");
217//! }
218//! }
219//! ```
220//!
221//! Keep a global count of live threads:
222//!
223//! ```
224//! use std::sync::atomic::{AtomicUsize, Ordering};
225//!
226//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
227//!
228//! // Note that Relaxed ordering doesn't synchronize anything
229//! // except the global thread counter itself.
230//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::Relaxed);
231//! // Note that this number may not be true at the moment of printing
232//! // because some other thread may have changed static value already.
233//! println!("live threads: {}", old_thread_count + 1);
234//! ```
235
236#![stable(feature = "rust1", since = "1.0.0")]
237#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
238#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
239#![rustc_diagnostic_item = "atomic_mod"]
240// Clippy complains about the pattern of "safe function calling unsafe function taking pointers".
241// This happens with AtomicPtr intrinsics but is fine, as the pointers clippy is concerned about
242// are just normal values that get loaded/stored, but not dereferenced.
243#![allow(clippy::not_unsafe_ptr_arg_deref)]
244
245use self::Ordering::*;
246use crate::cell::UnsafeCell;
247#[cfg(not(feature = "ferrocene_certified"))]
248use crate::hint::spin_loop;
249#[cfg(feature = "ferrocene_certified")]
250use crate::intrinsics;
251use crate::intrinsics::AtomicOrdering as AO;
252#[cfg(not(feature = "ferrocene_certified"))]
253use crate::{fmt, intrinsics};
254
255trait Sealed {}
256
257/// A marker trait for primitive types which can be modified atomically.
258///
259/// This is an implementation detail for <code>[Atomic]\<T></code> which may disappear or be replaced at any time.
260///
261/// # Safety
262///
263/// Types implementing this trait must be primitives that can be modified atomically.
264///
265/// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`,
266/// but may have a higher alignment requirement, so the following `transmute`s are sound:
267///
268/// - `&mut Self::AtomicInner` as `&mut Self`
269/// - `Self` as `Self::AtomicInner` or the reverse
270#[unstable(
271 feature = "atomic_internals",
272 reason = "implementation detail which may disappear or be replaced at any time",
273 issue = "none"
274)]
275#[expect(private_bounds)]
276pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed {
277 /// Temporary implementation detail.
278 type AtomicInner: Sized;
279}
280
281macro impl_atomic_primitive(
282 $Atom:ident $(<$T:ident>)? ($Primitive:ty),
283 size($size:literal),
284 align($align:literal) $(,)?
285) {
286 impl $(<$T>)? Sealed for $Primitive {}
287
288 #[unstable(
289 feature = "atomic_internals",
290 reason = "implementation detail which may disappear or be replaced at any time",
291 issue = "none"
292 )]
293 #[cfg(target_has_atomic_load_store = $size)]
294 unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
295 type AtomicInner = $Atom $(<$T>)?;
296 }
297}
298
299#[cfg(not(feature = "ferrocene_certified"))]
300impl_atomic_primitive!(AtomicBool(bool), size("8"), align(1));
301#[cfg(not(feature = "ferrocene_certified"))]
302impl_atomic_primitive!(AtomicI8(i8), size("8"), align(1));
303#[cfg(not(feature = "ferrocene_certified"))]
304impl_atomic_primitive!(AtomicU8(u8), size("8"), align(1));
305#[cfg(not(feature = "ferrocene_certified"))]
306impl_atomic_primitive!(AtomicI16(i16), size("16"), align(2));
307#[cfg(not(feature = "ferrocene_certified"))]
308impl_atomic_primitive!(AtomicU16(u16), size("16"), align(2));
309#[cfg(not(feature = "ferrocene_certified"))]
310impl_atomic_primitive!(AtomicI32(i32), size("32"), align(4));
311impl_atomic_primitive!(AtomicU32(u32), size("32"), align(4));
312#[cfg(not(feature = "ferrocene_certified"))]
313impl_atomic_primitive!(AtomicI64(i64), size("64"), align(8));
314#[cfg(not(feature = "ferrocene_certified"))]
315impl_atomic_primitive!(AtomicU64(u64), size("64"), align(8));
316#[cfg(not(feature = "ferrocene_certified"))]
317impl_atomic_primitive!(AtomicI128(i128), size("128"), align(16));
318#[cfg(not(feature = "ferrocene_certified"))]
319impl_atomic_primitive!(AtomicU128(u128), size("128"), align(16));
320
321#[cfg(target_pointer_width = "16")]
322#[cfg(not(feature = "ferrocene_certified"))]
323impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(2));
324#[cfg(target_pointer_width = "32")]
325#[cfg(not(feature = "ferrocene_certified"))]
326impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(4));
327#[cfg(target_pointer_width = "64")]
328#[cfg(not(feature = "ferrocene_certified"))]
329impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(8));
330
331#[cfg(target_pointer_width = "16")]
332#[cfg(not(feature = "ferrocene_certified"))]
333impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(2));
334#[cfg(target_pointer_width = "32")]
335#[cfg(not(feature = "ferrocene_certified"))]
336impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(4));
337#[cfg(target_pointer_width = "64")]
338#[cfg(not(feature = "ferrocene_certified"))]
339impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(8));
340
341#[cfg(target_pointer_width = "16")]
342#[cfg(not(feature = "ferrocene_certified"))]
343impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(2));
344#[cfg(target_pointer_width = "32")]
345#[cfg(not(feature = "ferrocene_certified"))]
346impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(4));
347#[cfg(target_pointer_width = "64")]
348#[cfg(not(feature = "ferrocene_certified"))]
349impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(8));
350
351/// A memory location which can be safely modified from multiple threads.
352///
353/// This has the same size and bit validity as the underlying type `T`. However,
354/// the alignment of this type is always equal to its size, even on targets where
355/// `T` has alignment less than its size.
356///
357/// For more about the differences between atomic types and non-atomic types as
358/// well as information about the portability of this type, please see the
359/// [module-level documentation].
360///
361/// **Note:** This type is only available on platforms that support atomic loads
362/// and stores of `T`.
363///
364/// [module-level documentation]: crate::sync::atomic
365#[unstable(feature = "generic_atomic", issue = "130539")]
366#[cfg(not(feature = "ferrocene_certified"))]
367pub type Atomic<T> = <T as AtomicPrimitive>::AtomicInner;
368
369// Some architectures don't have byte-sized atomics, which results in LLVM
370// emulating them using a LL/SC loop. However for AtomicBool we can take
371// advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND
372// instead, which LLVM can emulate using a larger atomic OR/AND operation.
373//
374// This list should only contain architectures which have word-sized atomic-or/
375// atomic-and instructions but don't natively support byte-sized atomics.
376#[cfg(target_has_atomic = "8")]
377#[cfg(not(feature = "ferrocene_certified"))]
378const EMULATE_ATOMIC_BOOL: bool = cfg!(any(
379 target_arch = "riscv32",
380 target_arch = "riscv64",
381 target_arch = "loongarch32",
382 target_arch = "loongarch64"
383));
384
385/// A boolean type which can be safely shared between threads.
386///
387/// This type has the same size, alignment, and bit validity as a [`bool`].
388///
389/// **Note**: This type is only available on platforms that support atomic
390/// loads and stores of `u8`.
391#[cfg(target_has_atomic_load_store = "8")]
392#[stable(feature = "rust1", since = "1.0.0")]
393#[rustc_diagnostic_item = "AtomicBool"]
394#[repr(C, align(1))]
395#[cfg(not(feature = "ferrocene_certified"))]
396pub struct AtomicBool {
397 v: UnsafeCell<u8>,
398}
399
400#[cfg(target_has_atomic_load_store = "8")]
401#[stable(feature = "rust1", since = "1.0.0")]
402#[cfg(not(feature = "ferrocene_certified"))]
403impl Default for AtomicBool {
404 /// Creates an `AtomicBool` initialized to `false`.
405 #[inline]
406 fn default() -> Self {
407 Self::new(false)
408 }
409}
410
411// Send is implicitly implemented for AtomicBool.
412#[cfg(target_has_atomic_load_store = "8")]
413#[stable(feature = "rust1", since = "1.0.0")]
414#[cfg(not(feature = "ferrocene_certified"))]
415unsafe impl Sync for AtomicBool {}
416
417/// A raw pointer type which can be safely shared between threads.
418///
419/// This type has the same size and bit validity as a `*mut T`.
420///
421/// **Note**: This type is only available on platforms that support atomic
422/// loads and stores of pointers. Its size depends on the target pointer's size.
423#[cfg(target_has_atomic_load_store = "ptr")]
424#[stable(feature = "rust1", since = "1.0.0")]
425#[rustc_diagnostic_item = "AtomicPtr"]
426#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
427#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
428#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
429#[cfg(not(feature = "ferrocene_certified"))]
430pub struct AtomicPtr<T> {
431 p: UnsafeCell<*mut T>,
432}
433
434#[cfg(target_has_atomic_load_store = "ptr")]
435#[stable(feature = "rust1", since = "1.0.0")]
436#[cfg(not(feature = "ferrocene_certified"))]
437impl<T> Default for AtomicPtr<T> {
438 /// Creates a null `AtomicPtr<T>`.
439 fn default() -> AtomicPtr<T> {
440 AtomicPtr::new(crate::ptr::null_mut())
441 }
442}
443
444#[cfg(target_has_atomic_load_store = "ptr")]
445#[stable(feature = "rust1", since = "1.0.0")]
446#[cfg(not(feature = "ferrocene_certified"))]
447unsafe impl<T> Send for AtomicPtr<T> {}
448#[cfg(target_has_atomic_load_store = "ptr")]
449#[stable(feature = "rust1", since = "1.0.0")]
450#[cfg(not(feature = "ferrocene_certified"))]
451unsafe impl<T> Sync for AtomicPtr<T> {}
452
453/// Atomic memory orderings
454///
455/// Memory orderings specify the way atomic operations synchronize memory.
456/// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
457/// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
458/// operations synchronize other memory while additionally preserving a total order of such
459/// operations across all threads.
460///
461/// Rust's memory orderings are [the same as those of
462/// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
463///
464/// For more information see the [nomicon].
465///
466/// [nomicon]: ../../../nomicon/atomics.html
467#[stable(feature = "rust1", since = "1.0.0")]
468#[cfg_attr(not(feature = "ferrocene_certified"), derive(Copy, Clone, Debug, Eq, PartialEq, Hash))]
469#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone))]
470#[non_exhaustive]
471#[rustc_diagnostic_item = "Ordering"]
472pub enum Ordering {
473 /// No ordering constraints, only atomic operations.
474 ///
475 /// Corresponds to [`memory_order_relaxed`] in C++20.
476 ///
477 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
478 #[stable(feature = "rust1", since = "1.0.0")]
479 Relaxed,
480 /// When coupled with a store, all previous operations become ordered
481 /// before any load of this value with [`Acquire`] (or stronger) ordering.
482 /// In particular, all previous writes become visible to all threads
483 /// that perform an [`Acquire`] (or stronger) load of this value.
484 ///
485 /// Notice that using this ordering for an operation that combines loads
486 /// and stores leads to a [`Relaxed`] load operation!
487 ///
488 /// This ordering is only applicable for operations that can perform a store.
489 ///
490 /// Corresponds to [`memory_order_release`] in C++20.
491 ///
492 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
493 #[stable(feature = "rust1", since = "1.0.0")]
494 Release,
495 /// When coupled with a load, if the loaded value was written by a store operation with
496 /// [`Release`] (or stronger) ordering, then all subsequent operations
497 /// become ordered after that store. In particular, all subsequent loads will see data
498 /// written before the store.
499 ///
500 /// Notice that using this ordering for an operation that combines loads
501 /// and stores leads to a [`Relaxed`] store operation!
502 ///
503 /// This ordering is only applicable for operations that can perform a load.
504 ///
505 /// Corresponds to [`memory_order_acquire`] in C++20.
506 ///
507 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
508 #[stable(feature = "rust1", since = "1.0.0")]
509 Acquire,
510 /// Has the effects of both [`Acquire`] and [`Release`] together:
511 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
512 ///
513 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
514 /// not performing any store and hence it has just [`Acquire`] ordering. However,
515 /// `AcqRel` will never perform [`Relaxed`] accesses.
516 ///
517 /// This ordering is only applicable for operations that combine both loads and stores.
518 ///
519 /// Corresponds to [`memory_order_acq_rel`] in C++20.
520 ///
521 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
522 #[stable(feature = "rust1", since = "1.0.0")]
523 AcqRel,
524 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
525 /// operations, respectively) with the additional guarantee that all threads see all
526 /// sequentially consistent operations in the same order.
527 ///
528 /// Corresponds to [`memory_order_seq_cst`] in C++20.
529 ///
530 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
531 #[stable(feature = "rust1", since = "1.0.0")]
532 SeqCst,
533}
534
535/// An [`AtomicBool`] initialized to `false`.
536#[cfg(target_has_atomic_load_store = "8")]
537#[stable(feature = "rust1", since = "1.0.0")]
538#[deprecated(
539 since = "1.34.0",
540 note = "the `new` function is now preferred",
541 suggestion = "AtomicBool::new(false)"
542)]
543#[cfg(not(feature = "ferrocene_certified"))]
544pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
545
546#[cfg(target_has_atomic_load_store = "8")]
547#[cfg(not(feature = "ferrocene_certified"))]
548impl AtomicBool {
549 /// Creates a new `AtomicBool`.
550 ///
551 /// # Examples
552 ///
553 /// ```
554 /// use std::sync::atomic::AtomicBool;
555 ///
556 /// let atomic_true = AtomicBool::new(true);
557 /// let atomic_false = AtomicBool::new(false);
558 /// ```
559 #[inline]
560 #[stable(feature = "rust1", since = "1.0.0")]
561 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
562 #[must_use]
563 pub const fn new(v: bool) -> AtomicBool {
564 AtomicBool { v: UnsafeCell::new(v as u8) }
565 }
566
567 /// Creates a new `AtomicBool` from a pointer.
568 ///
569 /// # Examples
570 ///
571 /// ```
572 /// use std::sync::atomic::{self, AtomicBool};
573 ///
574 /// // Get a pointer to an allocated value
575 /// let ptr: *mut bool = Box::into_raw(Box::new(false));
576 ///
577 /// assert!(ptr.cast::<AtomicBool>().is_aligned());
578 ///
579 /// {
580 /// // Create an atomic view of the allocated value
581 /// let atomic = unsafe { AtomicBool::from_ptr(ptr) };
582 ///
583 /// // Use `atomic` for atomic operations, possibly share it with other threads
584 /// atomic.store(true, atomic::Ordering::Relaxed);
585 /// }
586 ///
587 /// // It's ok to non-atomically access the value behind `ptr`,
588 /// // since the reference to the atomic ended its lifetime in the block above
589 /// assert_eq!(unsafe { *ptr }, true);
590 ///
591 /// // Deallocate the value
592 /// unsafe { drop(Box::from_raw(ptr)) }
593 /// ```
594 ///
595 /// # Safety
596 ///
597 /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that this is always true, since
598 /// `align_of::<AtomicBool>() == 1`).
599 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
600 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
601 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
602 /// sizes, without synchronization.
603 ///
604 /// [valid]: crate::ptr#safety
605 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
606 #[inline]
607 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
608 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
609 pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool {
610 // SAFETY: guaranteed by the caller
611 unsafe { &*ptr.cast() }
612 }
613
614 /// Returns a mutable reference to the underlying [`bool`].
615 ///
616 /// This is safe because the mutable reference guarantees that no other threads are
617 /// concurrently accessing the atomic data.
618 ///
619 /// # Examples
620 ///
621 /// ```
622 /// use std::sync::atomic::{AtomicBool, Ordering};
623 ///
624 /// let mut some_bool = AtomicBool::new(true);
625 /// assert_eq!(*some_bool.get_mut(), true);
626 /// *some_bool.get_mut() = false;
627 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
628 /// ```
629 #[inline]
630 #[stable(feature = "atomic_access", since = "1.15.0")]
631 pub fn get_mut(&mut self) -> &mut bool {
632 // SAFETY: the mutable reference guarantees unique ownership.
633 unsafe { &mut *(self.v.get() as *mut bool) }
634 }
635
636 /// Gets atomic access to a `&mut bool`.
637 ///
638 /// # Examples
639 ///
640 /// ```
641 /// #![feature(atomic_from_mut)]
642 /// use std::sync::atomic::{AtomicBool, Ordering};
643 ///
644 /// let mut some_bool = true;
645 /// let a = AtomicBool::from_mut(&mut some_bool);
646 /// a.store(false, Ordering::Relaxed);
647 /// assert_eq!(some_bool, false);
648 /// ```
649 #[inline]
650 #[cfg(target_has_atomic_equal_alignment = "8")]
651 #[unstable(feature = "atomic_from_mut", issue = "76314")]
652 pub fn from_mut(v: &mut bool) -> &mut Self {
653 // SAFETY: the mutable reference guarantees unique ownership, and
654 // alignment of both `bool` and `Self` is 1.
655 unsafe { &mut *(v as *mut bool as *mut Self) }
656 }
657
658 /// Gets non-atomic access to a `&mut [AtomicBool]` slice.
659 ///
660 /// This is safe because the mutable reference guarantees that no other threads are
661 /// concurrently accessing the atomic data.
662 ///
663 /// # Examples
664 ///
665 /// ```ignore-wasm
666 /// #![feature(atomic_from_mut)]
667 /// use std::sync::atomic::{AtomicBool, Ordering};
668 ///
669 /// let mut some_bools = [const { AtomicBool::new(false) }; 10];
670 ///
671 /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools);
672 /// assert_eq!(view, [false; 10]);
673 /// view[..5].copy_from_slice(&[true; 5]);
674 ///
675 /// std::thread::scope(|s| {
676 /// for t in &some_bools[..5] {
677 /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true));
678 /// }
679 ///
680 /// for f in &some_bools[5..] {
681 /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false));
682 /// }
683 /// });
684 /// ```
685 #[inline]
686 #[unstable(feature = "atomic_from_mut", issue = "76314")]
687 pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] {
688 // SAFETY: the mutable reference guarantees unique ownership.
689 unsafe { &mut *(this as *mut [Self] as *mut [bool]) }
690 }
691
692 /// Gets atomic access to a `&mut [bool]` slice.
693 ///
694 /// # Examples
695 ///
696 /// ```rust,ignore-wasm
697 /// #![feature(atomic_from_mut)]
698 /// use std::sync::atomic::{AtomicBool, Ordering};
699 ///
700 /// let mut some_bools = [false; 10];
701 /// let a = &*AtomicBool::from_mut_slice(&mut some_bools);
702 /// std::thread::scope(|s| {
703 /// for i in 0..a.len() {
704 /// s.spawn(move || a[i].store(true, Ordering::Relaxed));
705 /// }
706 /// });
707 /// assert_eq!(some_bools, [true; 10]);
708 /// ```
709 #[inline]
710 #[cfg(target_has_atomic_equal_alignment = "8")]
711 #[unstable(feature = "atomic_from_mut", issue = "76314")]
712 pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] {
713 // SAFETY: the mutable reference guarantees unique ownership, and
714 // alignment of both `bool` and `Self` is 1.
715 unsafe { &mut *(v as *mut [bool] as *mut [Self]) }
716 }
717
718 /// Consumes the atomic and returns the contained value.
719 ///
720 /// This is safe because passing `self` by value guarantees that no other threads are
721 /// concurrently accessing the atomic data.
722 ///
723 /// # Examples
724 ///
725 /// ```
726 /// use std::sync::atomic::AtomicBool;
727 ///
728 /// let some_bool = AtomicBool::new(true);
729 /// assert_eq!(some_bool.into_inner(), true);
730 /// ```
731 #[inline]
732 #[stable(feature = "atomic_access", since = "1.15.0")]
733 #[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
734 pub const fn into_inner(self) -> bool {
735 self.v.into_inner() != 0
736 }
737
738 /// Loads a value from the bool.
739 ///
740 /// `load` takes an [`Ordering`] argument which describes the memory ordering
741 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
742 ///
743 /// # Panics
744 ///
745 /// Panics if `order` is [`Release`] or [`AcqRel`].
746 ///
747 /// # Examples
748 ///
749 /// ```
750 /// use std::sync::atomic::{AtomicBool, Ordering};
751 ///
752 /// let some_bool = AtomicBool::new(true);
753 ///
754 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
755 /// ```
756 #[inline]
757 #[stable(feature = "rust1", since = "1.0.0")]
758 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
759 pub fn load(&self, order: Ordering) -> bool {
760 // SAFETY: any data races are prevented by atomic intrinsics and the raw
761 // pointer passed in is valid because we got it from a reference.
762 unsafe { atomic_load(self.v.get(), order) != 0 }
763 }
764
765 /// Stores a value into the bool.
766 ///
767 /// `store` takes an [`Ordering`] argument which describes the memory ordering
768 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
769 ///
770 /// # Panics
771 ///
772 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
773 ///
774 /// # Examples
775 ///
776 /// ```
777 /// use std::sync::atomic::{AtomicBool, Ordering};
778 ///
779 /// let some_bool = AtomicBool::new(true);
780 ///
781 /// some_bool.store(false, Ordering::Relaxed);
782 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
783 /// ```
784 #[inline]
785 #[stable(feature = "rust1", since = "1.0.0")]
786 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
787 pub fn store(&self, val: bool, order: Ordering) {
788 // SAFETY: any data races are prevented by atomic intrinsics and the raw
789 // pointer passed in is valid because we got it from a reference.
790 unsafe {
791 atomic_store(self.v.get(), val as u8, order);
792 }
793 }
794
795 /// Stores a value into the bool, returning the previous value.
796 ///
797 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
798 /// of this operation. All ordering modes are possible. Note that using
799 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
800 /// using [`Release`] makes the load part [`Relaxed`].
801 ///
802 /// **Note:** This method is only available on platforms that support atomic
803 /// operations on `u8`.
804 ///
805 /// # Examples
806 ///
807 /// ```
808 /// use std::sync::atomic::{AtomicBool, Ordering};
809 ///
810 /// let some_bool = AtomicBool::new(true);
811 ///
812 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
813 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
814 /// ```
815 #[inline]
816 #[stable(feature = "rust1", since = "1.0.0")]
817 #[cfg(target_has_atomic = "8")]
818 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
819 pub fn swap(&self, val: bool, order: Ordering) -> bool {
820 if EMULATE_ATOMIC_BOOL {
821 if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
822 } else {
823 // SAFETY: data races are prevented by atomic intrinsics.
824 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
825 }
826 }
827
828 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
829 ///
830 /// The return value is always the previous value. If it is equal to `current`, then the value
831 /// was updated.
832 ///
833 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
834 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
835 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
836 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
837 /// happens, and using [`Release`] makes the load part [`Relaxed`].
838 ///
839 /// **Note:** This method is only available on platforms that support atomic
840 /// operations on `u8`.
841 ///
842 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
843 ///
844 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
845 /// memory orderings:
846 ///
847 /// Original | Success | Failure
848 /// -------- | ------- | -------
849 /// Relaxed | Relaxed | Relaxed
850 /// Acquire | Acquire | Acquire
851 /// Release | Release | Relaxed
852 /// AcqRel | AcqRel | Acquire
853 /// SeqCst | SeqCst | SeqCst
854 ///
855 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
856 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
857 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
858 /// rather than to infer success vs failure based on the value that was read.
859 ///
860 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
861 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
862 /// which allows the compiler to generate better assembly code when the compare and swap
863 /// is used in a loop.
864 ///
865 /// # Examples
866 ///
867 /// ```
868 /// use std::sync::atomic::{AtomicBool, Ordering};
869 ///
870 /// let some_bool = AtomicBool::new(true);
871 ///
872 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
873 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
874 ///
875 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
876 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
877 /// ```
878 #[cfg(not(feature = "ferrocene_certified"))]
879 #[inline]
880 #[stable(feature = "rust1", since = "1.0.0")]
881 #[deprecated(
882 since = "1.50.0",
883 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
884 )]
885 #[cfg(target_has_atomic = "8")]
886 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
887 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
888 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
889 Ok(x) => x,
890 Err(x) => x,
891 }
892 }
893
894 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
895 ///
896 /// The return value is a result indicating whether the new value was written and containing
897 /// the previous value. On success this value is guaranteed to be equal to `current`.
898 ///
899 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
900 /// ordering of this operation. `success` describes the required ordering for the
901 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
902 /// `failure` describes the required ordering for the load operation that takes place when
903 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
904 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
905 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
906 ///
907 /// **Note:** This method is only available on platforms that support atomic
908 /// operations on `u8`.
909 ///
910 /// # Examples
911 ///
912 /// ```
913 /// use std::sync::atomic::{AtomicBool, Ordering};
914 ///
915 /// let some_bool = AtomicBool::new(true);
916 ///
917 /// assert_eq!(some_bool.compare_exchange(true,
918 /// false,
919 /// Ordering::Acquire,
920 /// Ordering::Relaxed),
921 /// Ok(true));
922 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
923 ///
924 /// assert_eq!(some_bool.compare_exchange(true, true,
925 /// Ordering::SeqCst,
926 /// Ordering::Acquire),
927 /// Err(false));
928 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
929 /// ```
930 ///
931 /// # Considerations
932 ///
933 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
934 /// of CAS operations. In particular, a load of the value followed by a successful
935 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
936 /// changed the value in the interim. This is usually important when the *equality* check in
937 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
938 /// does not necessarily imply identity. In this case, `compare_exchange` can lead to the
939 /// [ABA problem].
940 ///
941 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
942 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
943 #[inline]
944 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
945 #[doc(alias = "compare_and_swap")]
946 #[cfg(target_has_atomic = "8")]
947 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
948 pub fn compare_exchange(
949 &self,
950 current: bool,
951 new: bool,
952 success: Ordering,
953 failure: Ordering,
954 ) -> Result<bool, bool> {
955 if EMULATE_ATOMIC_BOOL {
956 // Pick the strongest ordering from success and failure.
957 let order = match (success, failure) {
958 (SeqCst, _) => SeqCst,
959 (_, SeqCst) => SeqCst,
960 (AcqRel, _) => AcqRel,
961 (_, AcqRel) => {
962 panic!("there is no such thing as an acquire-release failure ordering")
963 }
964 (Release, Acquire) => AcqRel,
965 (Acquire, _) => Acquire,
966 (_, Acquire) => Acquire,
967 (Release, Relaxed) => Release,
968 (_, Release) => panic!("there is no such thing as a release failure ordering"),
969 (Relaxed, Relaxed) => Relaxed,
970 };
971 let old = if current == new {
972 // This is a no-op, but we still need to perform the operation
973 // for memory ordering reasons.
974 self.fetch_or(false, order)
975 } else {
976 // This sets the value to the new one and returns the old one.
977 self.swap(new, order)
978 };
979 if old == current { Ok(old) } else { Err(old) }
980 } else {
981 // SAFETY: data races are prevented by atomic intrinsics.
982 match unsafe {
983 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
984 } {
985 Ok(x) => Ok(x != 0),
986 Err(x) => Err(x != 0),
987 }
988 }
989 }
990
991 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
992 ///
993 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
994 /// comparison succeeds, which can result in more efficient code on some platforms. The
995 /// return value is a result indicating whether the new value was written and containing the
996 /// previous value.
997 ///
998 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
999 /// ordering of this operation. `success` describes the required ordering for the
1000 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1001 /// `failure` describes the required ordering for the load operation that takes place when
1002 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1003 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1004 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1005 ///
1006 /// **Note:** This method is only available on platforms that support atomic
1007 /// operations on `u8`.
1008 ///
1009 /// # Examples
1010 ///
1011 /// ```
1012 /// use std::sync::atomic::{AtomicBool, Ordering};
1013 ///
1014 /// let val = AtomicBool::new(false);
1015 ///
1016 /// let new = true;
1017 /// let mut old = val.load(Ordering::Relaxed);
1018 /// loop {
1019 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1020 /// Ok(_) => break,
1021 /// Err(x) => old = x,
1022 /// }
1023 /// }
1024 /// ```
1025 ///
1026 /// # Considerations
1027 ///
1028 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1029 /// of CAS operations. In particular, a load of the value followed by a successful
1030 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1031 /// changed the value in the interim. This is usually important when the *equality* check in
1032 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1033 /// does not necessarily imply identity. In this case, `compare_exchange` can lead to the
1034 /// [ABA problem].
1035 ///
1036 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1037 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1038 #[inline]
1039 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1040 #[doc(alias = "compare_and_swap")]
1041 #[cfg(target_has_atomic = "8")]
1042 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1043 pub fn compare_exchange_weak(
1044 &self,
1045 current: bool,
1046 new: bool,
1047 success: Ordering,
1048 failure: Ordering,
1049 ) -> Result<bool, bool> {
1050 if EMULATE_ATOMIC_BOOL {
1051 return self.compare_exchange(current, new, success, failure);
1052 }
1053
1054 // SAFETY: data races are prevented by atomic intrinsics.
1055 match unsafe {
1056 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
1057 } {
1058 Ok(x) => Ok(x != 0),
1059 Err(x) => Err(x != 0),
1060 }
1061 }
1062
1063 /// Logical "and" with a boolean value.
1064 ///
1065 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
1066 /// the new value to the result.
1067 ///
1068 /// Returns the previous value.
1069 ///
1070 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1071 /// of this operation. All ordering modes are possible. Note that using
1072 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1073 /// using [`Release`] makes the load part [`Relaxed`].
1074 ///
1075 /// **Note:** This method is only available on platforms that support atomic
1076 /// operations on `u8`.
1077 ///
1078 /// # Examples
1079 ///
1080 /// ```
1081 /// use std::sync::atomic::{AtomicBool, Ordering};
1082 ///
1083 /// let foo = AtomicBool::new(true);
1084 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
1085 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1086 ///
1087 /// let foo = AtomicBool::new(true);
1088 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
1089 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1090 ///
1091 /// let foo = AtomicBool::new(false);
1092 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
1093 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1094 /// ```
1095 #[inline]
1096 #[stable(feature = "rust1", since = "1.0.0")]
1097 #[cfg(target_has_atomic = "8")]
1098 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1099 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
1100 // SAFETY: data races are prevented by atomic intrinsics.
1101 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
1102 }
1103
1104 /// Logical "nand" with a boolean value.
1105 ///
1106 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
1107 /// the new value to the result.
1108 ///
1109 /// Returns the previous value.
1110 ///
1111 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1112 /// of this operation. All ordering modes are possible. Note that using
1113 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1114 /// using [`Release`] makes the load part [`Relaxed`].
1115 ///
1116 /// **Note:** This method is only available on platforms that support atomic
1117 /// operations on `u8`.
1118 ///
1119 /// # Examples
1120 ///
1121 /// ```
1122 /// use std::sync::atomic::{AtomicBool, Ordering};
1123 ///
1124 /// let foo = AtomicBool::new(true);
1125 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
1126 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1127 ///
1128 /// let foo = AtomicBool::new(true);
1129 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
1130 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
1131 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1132 ///
1133 /// let foo = AtomicBool::new(false);
1134 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
1135 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1136 /// ```
1137 #[inline]
1138 #[stable(feature = "rust1", since = "1.0.0")]
1139 #[cfg(target_has_atomic = "8")]
1140 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1141 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
1142 // We can't use atomic_nand here because it can result in a bool with
1143 // an invalid value. This happens because the atomic operation is done
1144 // with an 8-bit integer internally, which would set the upper 7 bits.
1145 // So we just use fetch_xor or swap instead.
1146 if val {
1147 // !(x & true) == !x
1148 // We must invert the bool.
1149 self.fetch_xor(true, order)
1150 } else {
1151 // !(x & false) == true
1152 // We must set the bool to true.
1153 self.swap(true, order)
1154 }
1155 }
1156
1157 /// Logical "or" with a boolean value.
1158 ///
1159 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
1160 /// new value to the result.
1161 ///
1162 /// Returns the previous value.
1163 ///
1164 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1165 /// of this operation. All ordering modes are possible. Note that using
1166 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1167 /// using [`Release`] makes the load part [`Relaxed`].
1168 ///
1169 /// **Note:** This method is only available on platforms that support atomic
1170 /// operations on `u8`.
1171 ///
1172 /// # Examples
1173 ///
1174 /// ```
1175 /// use std::sync::atomic::{AtomicBool, Ordering};
1176 ///
1177 /// let foo = AtomicBool::new(true);
1178 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
1179 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1180 ///
1181 /// let foo = AtomicBool::new(true);
1182 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
1183 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1184 ///
1185 /// let foo = AtomicBool::new(false);
1186 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
1187 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1188 /// ```
1189 #[inline]
1190 #[stable(feature = "rust1", since = "1.0.0")]
1191 #[cfg(target_has_atomic = "8")]
1192 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1193 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
1194 // SAFETY: data races are prevented by atomic intrinsics.
1195 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
1196 }
1197
1198 /// Logical "xor" with a boolean value.
1199 ///
1200 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
1201 /// the new value to the result.
1202 ///
1203 /// Returns the previous value.
1204 ///
1205 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1206 /// of this operation. All ordering modes are possible. Note that using
1207 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1208 /// using [`Release`] makes the load part [`Relaxed`].
1209 ///
1210 /// **Note:** This method is only available on platforms that support atomic
1211 /// operations on `u8`.
1212 ///
1213 /// # Examples
1214 ///
1215 /// ```
1216 /// use std::sync::atomic::{AtomicBool, Ordering};
1217 ///
1218 /// let foo = AtomicBool::new(true);
1219 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
1220 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1221 ///
1222 /// let foo = AtomicBool::new(true);
1223 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
1224 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1225 ///
1226 /// let foo = AtomicBool::new(false);
1227 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
1228 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1229 /// ```
1230 #[inline]
1231 #[stable(feature = "rust1", since = "1.0.0")]
1232 #[cfg(target_has_atomic = "8")]
1233 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1234 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
1235 // SAFETY: data races are prevented by atomic intrinsics.
1236 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
1237 }
1238
1239 /// Logical "not" with a boolean value.
1240 ///
1241 /// Performs a logical "not" operation on the current value, and sets
1242 /// the new value to the result.
1243 ///
1244 /// Returns the previous value.
1245 ///
1246 /// `fetch_not` takes an [`Ordering`] argument which describes the memory ordering
1247 /// of this operation. All ordering modes are possible. Note that using
1248 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1249 /// using [`Release`] makes the load part [`Relaxed`].
1250 ///
1251 /// **Note:** This method is only available on platforms that support atomic
1252 /// operations on `u8`.
1253 ///
1254 /// # Examples
1255 ///
1256 /// ```
1257 /// use std::sync::atomic::{AtomicBool, Ordering};
1258 ///
1259 /// let foo = AtomicBool::new(true);
1260 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), true);
1261 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1262 ///
1263 /// let foo = AtomicBool::new(false);
1264 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), false);
1265 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1266 /// ```
1267 #[inline]
1268 #[stable(feature = "atomic_bool_fetch_not", since = "1.81.0")]
1269 #[cfg(target_has_atomic = "8")]
1270 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1271 pub fn fetch_not(&self, order: Ordering) -> bool {
1272 self.fetch_xor(true, order)
1273 }
1274
1275 /// Returns a mutable pointer to the underlying [`bool`].
1276 ///
1277 /// Doing non-atomic reads and writes on the resulting boolean can be a data race.
1278 /// This method is mostly useful for FFI, where the function signature may use
1279 /// `*mut bool` instead of `&AtomicBool`.
1280 ///
1281 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1282 /// atomic types work with interior mutability. All modifications of an atomic change the value
1283 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
1284 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
1285 /// requirements of the [memory model].
1286 ///
1287 /// # Examples
1288 ///
1289 /// ```ignore (extern-declaration)
1290 /// # fn main() {
1291 /// use std::sync::atomic::AtomicBool;
1292 ///
1293 /// extern "C" {
1294 /// fn my_atomic_op(arg: *mut bool);
1295 /// }
1296 ///
1297 /// let mut atomic = AtomicBool::new(true);
1298 /// unsafe {
1299 /// my_atomic_op(atomic.as_ptr());
1300 /// }
1301 /// # }
1302 /// ```
1303 ///
1304 /// [memory model]: self#memory-model-for-atomic-accesses
1305 #[inline]
1306 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
1307 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
1308 #[rustc_never_returns_null_ptr]
1309 pub const fn as_ptr(&self) -> *mut bool {
1310 self.v.get().cast()
1311 }
1312
1313 /// Fetches the value, and applies a function to it that returns an optional
1314 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1315 /// returned `Some(_)`, else `Err(previous_value)`.
1316 ///
1317 /// Note: This may call the function multiple times if the value has been
1318 /// changed from other threads in the meantime, as long as the function
1319 /// returns `Some(_)`, but the function will have been applied only once to
1320 /// the stored value.
1321 ///
1322 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1323 /// ordering of this operation. The first describes the required ordering for
1324 /// when the operation finally succeeds while the second describes the
1325 /// required ordering for loads. These correspond to the success and failure
1326 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1327 ///
1328 /// Using [`Acquire`] as success ordering makes the store part of this
1329 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1330 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1331 /// [`Acquire`] or [`Relaxed`].
1332 ///
1333 /// **Note:** This method is only available on platforms that support atomic
1334 /// operations on `u8`.
1335 ///
1336 /// # Considerations
1337 ///
1338 /// This method is not magic; it is not provided by the hardware, and does not act like a
1339 /// critical section or mutex.
1340 ///
1341 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1342 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1343 ///
1344 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1345 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1346 ///
1347 /// # Examples
1348 ///
1349 /// ```rust
1350 /// use std::sync::atomic::{AtomicBool, Ordering};
1351 ///
1352 /// let x = AtomicBool::new(false);
1353 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
1354 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
1355 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
1356 /// assert_eq!(x.load(Ordering::SeqCst), false);
1357 /// ```
1358 #[inline]
1359 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
1360 #[cfg(target_has_atomic = "8")]
1361 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1362 pub fn fetch_update<F>(
1363 &self,
1364 set_order: Ordering,
1365 fetch_order: Ordering,
1366 mut f: F,
1367 ) -> Result<bool, bool>
1368 where
1369 F: FnMut(bool) -> Option<bool>,
1370 {
1371 let mut prev = self.load(fetch_order);
1372 while let Some(next) = f(prev) {
1373 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1374 x @ Ok(_) => return x,
1375 Err(next_prev) => prev = next_prev,
1376 }
1377 }
1378 Err(prev)
1379 }
1380
1381 /// Fetches the value, and applies a function to it that returns an optional
1382 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1383 /// returned `Some(_)`, else `Err(previous_value)`.
1384 ///
1385 /// See also: [`update`](`AtomicBool::update`).
1386 ///
1387 /// Note: This may call the function multiple times if the value has been
1388 /// changed from other threads in the meantime, as long as the function
1389 /// returns `Some(_)`, but the function will have been applied only once to
1390 /// the stored value.
1391 ///
1392 /// `try_update` takes two [`Ordering`] arguments to describe the memory
1393 /// ordering of this operation. The first describes the required ordering for
1394 /// when the operation finally succeeds while the second describes the
1395 /// required ordering for loads. These correspond to the success and failure
1396 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1397 ///
1398 /// Using [`Acquire`] as success ordering makes the store part of this
1399 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1400 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1401 /// [`Acquire`] or [`Relaxed`].
1402 ///
1403 /// **Note:** This method is only available on platforms that support atomic
1404 /// operations on `u8`.
1405 ///
1406 /// # Considerations
1407 ///
1408 /// This method is not magic; it is not provided by the hardware, and does not act like a
1409 /// critical section or mutex.
1410 ///
1411 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1412 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1413 ///
1414 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1415 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1416 ///
1417 /// # Examples
1418 ///
1419 /// ```rust
1420 /// #![feature(atomic_try_update)]
1421 /// use std::sync::atomic::{AtomicBool, Ordering};
1422 ///
1423 /// let x = AtomicBool::new(false);
1424 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
1425 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
1426 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
1427 /// assert_eq!(x.load(Ordering::SeqCst), false);
1428 /// ```
1429 #[inline]
1430 #[unstable(feature = "atomic_try_update", issue = "135894")]
1431 #[cfg(target_has_atomic = "8")]
1432 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1433 pub fn try_update(
1434 &self,
1435 set_order: Ordering,
1436 fetch_order: Ordering,
1437 f: impl FnMut(bool) -> Option<bool>,
1438 ) -> Result<bool, bool> {
1439 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
1440 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
1441 self.fetch_update(set_order, fetch_order, f)
1442 }
1443
1444 /// Fetches the value, applies a function to it that it return a new value.
1445 /// The new value is stored and the old value is returned.
1446 ///
1447 /// See also: [`try_update`](`AtomicBool::try_update`).
1448 ///
1449 /// Note: This may call the function multiple times if the value has been changed from other threads in
1450 /// the meantime, but the function will have been applied only once to the stored value.
1451 ///
1452 /// `update` takes two [`Ordering`] arguments to describe the memory
1453 /// ordering of this operation. The first describes the required ordering for
1454 /// when the operation finally succeeds while the second describes the
1455 /// required ordering for loads. These correspond to the success and failure
1456 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1457 ///
1458 /// Using [`Acquire`] as success ordering makes the store part
1459 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1460 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1461 ///
1462 /// **Note:** This method is only available on platforms that support atomic operations on `u8`.
1463 ///
1464 /// # Considerations
1465 ///
1466 /// This method is not magic; it is not provided by the hardware, and does not act like a
1467 /// critical section or mutex.
1468 ///
1469 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1470 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1471 ///
1472 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1473 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1474 ///
1475 /// # Examples
1476 ///
1477 /// ```rust
1478 /// #![feature(atomic_try_update)]
1479 ///
1480 /// use std::sync::atomic::{AtomicBool, Ordering};
1481 ///
1482 /// let x = AtomicBool::new(false);
1483 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), false);
1484 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), true);
1485 /// assert_eq!(x.load(Ordering::SeqCst), false);
1486 /// ```
1487 #[inline]
1488 #[unstable(feature = "atomic_try_update", issue = "135894")]
1489 #[cfg(target_has_atomic = "8")]
1490 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1491 pub fn update(
1492 &self,
1493 set_order: Ordering,
1494 fetch_order: Ordering,
1495 mut f: impl FnMut(bool) -> bool,
1496 ) -> bool {
1497 let mut prev = self.load(fetch_order);
1498 loop {
1499 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
1500 Ok(x) => break x,
1501 Err(next_prev) => prev = next_prev,
1502 }
1503 }
1504 }
1505}
1506
1507#[cfg(target_has_atomic_load_store = "ptr")]
1508#[cfg(not(feature = "ferrocene_certified"))]
1509impl<T> AtomicPtr<T> {
1510 /// Creates a new `AtomicPtr`.
1511 ///
1512 /// # Examples
1513 ///
1514 /// ```
1515 /// use std::sync::atomic::AtomicPtr;
1516 ///
1517 /// let ptr = &mut 5;
1518 /// let atomic_ptr = AtomicPtr::new(ptr);
1519 /// ```
1520 #[inline]
1521 #[stable(feature = "rust1", since = "1.0.0")]
1522 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
1523 pub const fn new(p: *mut T) -> AtomicPtr<T> {
1524 AtomicPtr { p: UnsafeCell::new(p) }
1525 }
1526
1527 /// Creates a new `AtomicPtr` from a pointer.
1528 ///
1529 /// # Examples
1530 ///
1531 /// ```
1532 /// use std::sync::atomic::{self, AtomicPtr};
1533 ///
1534 /// // Get a pointer to an allocated value
1535 /// let ptr: *mut *mut u8 = Box::into_raw(Box::new(std::ptr::null_mut()));
1536 ///
1537 /// assert!(ptr.cast::<AtomicPtr<u8>>().is_aligned());
1538 ///
1539 /// {
1540 /// // Create an atomic view of the allocated value
1541 /// let atomic = unsafe { AtomicPtr::from_ptr(ptr) };
1542 ///
1543 /// // Use `atomic` for atomic operations, possibly share it with other threads
1544 /// atomic.store(std::ptr::NonNull::dangling().as_ptr(), atomic::Ordering::Relaxed);
1545 /// }
1546 ///
1547 /// // It's ok to non-atomically access the value behind `ptr`,
1548 /// // since the reference to the atomic ended its lifetime in the block above
1549 /// assert!(!unsafe { *ptr }.is_null());
1550 ///
1551 /// // Deallocate the value
1552 /// unsafe { drop(Box::from_raw(ptr)) }
1553 /// ```
1554 ///
1555 /// # Safety
1556 ///
1557 /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this
1558 /// can be bigger than `align_of::<*mut T>()`).
1559 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
1560 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
1561 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
1562 /// sizes, without synchronization.
1563 ///
1564 /// [valid]: crate::ptr#safety
1565 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
1566 #[inline]
1567 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
1568 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
1569 pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr<T> {
1570 // SAFETY: guaranteed by the caller
1571 unsafe { &*ptr.cast() }
1572 }
1573
1574 /// Returns a mutable reference to the underlying pointer.
1575 ///
1576 /// This is safe because the mutable reference guarantees that no other threads are
1577 /// concurrently accessing the atomic data.
1578 ///
1579 /// # Examples
1580 ///
1581 /// ```
1582 /// use std::sync::atomic::{AtomicPtr, Ordering};
1583 ///
1584 /// let mut data = 10;
1585 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
1586 /// let mut other_data = 5;
1587 /// *atomic_ptr.get_mut() = &mut other_data;
1588 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
1589 /// ```
1590 #[inline]
1591 #[stable(feature = "atomic_access", since = "1.15.0")]
1592 pub fn get_mut(&mut self) -> &mut *mut T {
1593 self.p.get_mut()
1594 }
1595
1596 /// Gets atomic access to a pointer.
1597 ///
1598 /// # Examples
1599 ///
1600 /// ```
1601 /// #![feature(atomic_from_mut)]
1602 /// use std::sync::atomic::{AtomicPtr, Ordering};
1603 ///
1604 /// let mut data = 123;
1605 /// let mut some_ptr = &mut data as *mut i32;
1606 /// let a = AtomicPtr::from_mut(&mut some_ptr);
1607 /// let mut other_data = 456;
1608 /// a.store(&mut other_data, Ordering::Relaxed);
1609 /// assert_eq!(unsafe { *some_ptr }, 456);
1610 /// ```
1611 #[inline]
1612 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1613 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1614 pub fn from_mut(v: &mut *mut T) -> &mut Self {
1615 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
1616 // SAFETY:
1617 // - the mutable reference guarantees unique ownership.
1618 // - the alignment of `*mut T` and `Self` is the same on all platforms
1619 // supported by rust, as verified above.
1620 unsafe { &mut *(v as *mut *mut T as *mut Self) }
1621 }
1622
1623 /// Gets non-atomic access to a `&mut [AtomicPtr]` slice.
1624 ///
1625 /// This is safe because the mutable reference guarantees that no other threads are
1626 /// concurrently accessing the atomic data.
1627 ///
1628 /// # Examples
1629 ///
1630 /// ```ignore-wasm
1631 /// #![feature(atomic_from_mut)]
1632 /// use std::ptr::null_mut;
1633 /// use std::sync::atomic::{AtomicPtr, Ordering};
1634 ///
1635 /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10];
1636 ///
1637 /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs);
1638 /// assert_eq!(view, [null_mut::<String>(); 10]);
1639 /// view
1640 /// .iter_mut()
1641 /// .enumerate()
1642 /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}"))));
1643 ///
1644 /// std::thread::scope(|s| {
1645 /// for ptr in &some_ptrs {
1646 /// s.spawn(move || {
1647 /// let ptr = ptr.load(Ordering::Relaxed);
1648 /// assert!(!ptr.is_null());
1649 ///
1650 /// let name = unsafe { Box::from_raw(ptr) };
1651 /// println!("Hello, {name}!");
1652 /// });
1653 /// }
1654 /// });
1655 /// ```
1656 #[inline]
1657 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1658 pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] {
1659 // SAFETY: the mutable reference guarantees unique ownership.
1660 unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) }
1661 }
1662
1663 /// Gets atomic access to a slice of pointers.
1664 ///
1665 /// # Examples
1666 ///
1667 /// ```ignore-wasm
1668 /// #![feature(atomic_from_mut)]
1669 /// use std::ptr::null_mut;
1670 /// use std::sync::atomic::{AtomicPtr, Ordering};
1671 ///
1672 /// let mut some_ptrs = [null_mut::<String>(); 10];
1673 /// let a = &*AtomicPtr::from_mut_slice(&mut some_ptrs);
1674 /// std::thread::scope(|s| {
1675 /// for i in 0..a.len() {
1676 /// s.spawn(move || {
1677 /// let name = Box::new(format!("thread{i}"));
1678 /// a[i].store(Box::into_raw(name), Ordering::Relaxed);
1679 /// });
1680 /// }
1681 /// });
1682 /// for p in some_ptrs {
1683 /// assert!(!p.is_null());
1684 /// let name = unsafe { Box::from_raw(p) };
1685 /// println!("Hello, {name}!");
1686 /// }
1687 /// ```
1688 #[inline]
1689 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1690 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1691 pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] {
1692 // SAFETY:
1693 // - the mutable reference guarantees unique ownership.
1694 // - the alignment of `*mut T` and `Self` is the same on all platforms
1695 // supported by rust, as verified above.
1696 unsafe { &mut *(v as *mut [*mut T] as *mut [Self]) }
1697 }
1698
1699 /// Consumes the atomic and returns the contained value.
1700 ///
1701 /// This is safe because passing `self` by value guarantees that no other threads are
1702 /// concurrently accessing the atomic data.
1703 ///
1704 /// # Examples
1705 ///
1706 /// ```
1707 /// use std::sync::atomic::AtomicPtr;
1708 ///
1709 /// let mut data = 5;
1710 /// let atomic_ptr = AtomicPtr::new(&mut data);
1711 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
1712 /// ```
1713 #[inline]
1714 #[stable(feature = "atomic_access", since = "1.15.0")]
1715 #[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
1716 pub const fn into_inner(self) -> *mut T {
1717 self.p.into_inner()
1718 }
1719
1720 /// Loads a value from the pointer.
1721 ///
1722 /// `load` takes an [`Ordering`] argument which describes the memory ordering
1723 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1724 ///
1725 /// # Panics
1726 ///
1727 /// Panics if `order` is [`Release`] or [`AcqRel`].
1728 ///
1729 /// # Examples
1730 ///
1731 /// ```
1732 /// use std::sync::atomic::{AtomicPtr, Ordering};
1733 ///
1734 /// let ptr = &mut 5;
1735 /// let some_ptr = AtomicPtr::new(ptr);
1736 ///
1737 /// let value = some_ptr.load(Ordering::Relaxed);
1738 /// ```
1739 #[inline]
1740 #[stable(feature = "rust1", since = "1.0.0")]
1741 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1742 pub fn load(&self, order: Ordering) -> *mut T {
1743 // SAFETY: data races are prevented by atomic intrinsics.
1744 unsafe { atomic_load(self.p.get(), order) }
1745 }
1746
1747 /// Stores a value into the pointer.
1748 ///
1749 /// `store` takes an [`Ordering`] argument which describes the memory ordering
1750 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1751 ///
1752 /// # Panics
1753 ///
1754 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1755 ///
1756 /// # Examples
1757 ///
1758 /// ```
1759 /// use std::sync::atomic::{AtomicPtr, Ordering};
1760 ///
1761 /// let ptr = &mut 5;
1762 /// let some_ptr = AtomicPtr::new(ptr);
1763 ///
1764 /// let other_ptr = &mut 10;
1765 ///
1766 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1767 /// ```
1768 #[inline]
1769 #[stable(feature = "rust1", since = "1.0.0")]
1770 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1771 pub fn store(&self, ptr: *mut T, order: Ordering) {
1772 // SAFETY: data races are prevented by atomic intrinsics.
1773 unsafe {
1774 atomic_store(self.p.get(), ptr, order);
1775 }
1776 }
1777
1778 /// Stores a value into the pointer, returning the previous value.
1779 ///
1780 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1781 /// of this operation. All ordering modes are possible. Note that using
1782 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1783 /// using [`Release`] makes the load part [`Relaxed`].
1784 ///
1785 /// **Note:** This method is only available on platforms that support atomic
1786 /// operations on pointers.
1787 ///
1788 /// # Examples
1789 ///
1790 /// ```
1791 /// use std::sync::atomic::{AtomicPtr, Ordering};
1792 ///
1793 /// let ptr = &mut 5;
1794 /// let some_ptr = AtomicPtr::new(ptr);
1795 ///
1796 /// let other_ptr = &mut 10;
1797 ///
1798 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1799 /// ```
1800 #[inline]
1801 #[stable(feature = "rust1", since = "1.0.0")]
1802 #[cfg(target_has_atomic = "ptr")]
1803 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1804 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1805 // SAFETY: data races are prevented by atomic intrinsics.
1806 unsafe { atomic_swap(self.p.get(), ptr, order) }
1807 }
1808
1809 /// Stores a value into the pointer if the current value is the same as the `current` value.
1810 ///
1811 /// The return value is always the previous value. If it is equal to `current`, then the value
1812 /// was updated.
1813 ///
1814 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1815 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1816 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1817 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1818 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1819 ///
1820 /// **Note:** This method is only available on platforms that support atomic
1821 /// operations on pointers.
1822 ///
1823 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1824 ///
1825 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1826 /// memory orderings:
1827 ///
1828 /// Original | Success | Failure
1829 /// -------- | ------- | -------
1830 /// Relaxed | Relaxed | Relaxed
1831 /// Acquire | Acquire | Acquire
1832 /// Release | Release | Relaxed
1833 /// AcqRel | AcqRel | Acquire
1834 /// SeqCst | SeqCst | SeqCst
1835 ///
1836 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
1837 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
1838 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
1839 /// rather than to infer success vs failure based on the value that was read.
1840 ///
1841 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
1842 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1843 /// which allows the compiler to generate better assembly code when the compare and swap
1844 /// is used in a loop.
1845 ///
1846 /// # Examples
1847 ///
1848 /// ```
1849 /// use std::sync::atomic::{AtomicPtr, Ordering};
1850 ///
1851 /// let ptr = &mut 5;
1852 /// let some_ptr = AtomicPtr::new(ptr);
1853 ///
1854 /// let other_ptr = &mut 10;
1855 ///
1856 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1857 /// ```
1858 #[inline]
1859 #[stable(feature = "rust1", since = "1.0.0")]
1860 #[deprecated(
1861 since = "1.50.0",
1862 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
1863 )]
1864 #[cfg(target_has_atomic = "ptr")]
1865 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1866 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1867 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1868 Ok(x) => x,
1869 Err(x) => x,
1870 }
1871 }
1872
1873 /// Stores a value into the pointer if the current value is the same as the `current` value.
1874 ///
1875 /// The return value is a result indicating whether the new value was written and containing
1876 /// the previous value. On success this value is guaranteed to be equal to `current`.
1877 ///
1878 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1879 /// ordering of this operation. `success` describes the required ordering for the
1880 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1881 /// `failure` describes the required ordering for the load operation that takes place when
1882 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1883 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1884 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1885 ///
1886 /// **Note:** This method is only available on platforms that support atomic
1887 /// operations on pointers.
1888 ///
1889 /// # Examples
1890 ///
1891 /// ```
1892 /// use std::sync::atomic::{AtomicPtr, Ordering};
1893 ///
1894 /// let ptr = &mut 5;
1895 /// let some_ptr = AtomicPtr::new(ptr);
1896 ///
1897 /// let other_ptr = &mut 10;
1898 ///
1899 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1900 /// Ordering::SeqCst, Ordering::Relaxed);
1901 /// ```
1902 ///
1903 /// # Considerations
1904 ///
1905 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1906 /// of CAS operations. In particular, a load of the value followed by a successful
1907 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1908 /// changed the value in the interim. This is usually important when the *equality* check in
1909 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1910 /// does not necessarily imply identity. This is a particularly common case for pointers, as
1911 /// a pointer holding the same address does not imply that the same object exists at that
1912 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
1913 ///
1914 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1915 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1916 #[inline]
1917 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1918 #[cfg(target_has_atomic = "ptr")]
1919 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1920 pub fn compare_exchange(
1921 &self,
1922 current: *mut T,
1923 new: *mut T,
1924 success: Ordering,
1925 failure: Ordering,
1926 ) -> Result<*mut T, *mut T> {
1927 // SAFETY: data races are prevented by atomic intrinsics.
1928 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1929 }
1930
1931 /// Stores a value into the pointer if the current value is the same as the `current` value.
1932 ///
1933 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1934 /// comparison succeeds, which can result in more efficient code on some platforms. The
1935 /// return value is a result indicating whether the new value was written and containing the
1936 /// previous value.
1937 ///
1938 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1939 /// ordering of this operation. `success` describes the required ordering for the
1940 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1941 /// `failure` describes the required ordering for the load operation that takes place when
1942 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1943 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1944 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1945 ///
1946 /// **Note:** This method is only available on platforms that support atomic
1947 /// operations on pointers.
1948 ///
1949 /// # Examples
1950 ///
1951 /// ```
1952 /// use std::sync::atomic::{AtomicPtr, Ordering};
1953 ///
1954 /// let some_ptr = AtomicPtr::new(&mut 5);
1955 ///
1956 /// let new = &mut 10;
1957 /// let mut old = some_ptr.load(Ordering::Relaxed);
1958 /// loop {
1959 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1960 /// Ok(_) => break,
1961 /// Err(x) => old = x,
1962 /// }
1963 /// }
1964 /// ```
1965 ///
1966 /// # Considerations
1967 ///
1968 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1969 /// of CAS operations. In particular, a load of the value followed by a successful
1970 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1971 /// changed the value in the interim. This is usually important when the *equality* check in
1972 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1973 /// does not necessarily imply identity. This is a particularly common case for pointers, as
1974 /// a pointer holding the same address does not imply that the same object exists at that
1975 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
1976 ///
1977 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1978 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1979 #[inline]
1980 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1981 #[cfg(target_has_atomic = "ptr")]
1982 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1983 pub fn compare_exchange_weak(
1984 &self,
1985 current: *mut T,
1986 new: *mut T,
1987 success: Ordering,
1988 failure: Ordering,
1989 ) -> Result<*mut T, *mut T> {
1990 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1991 // but we know for sure that the pointer is valid (we just got it from
1992 // an `UnsafeCell` that we have by reference) and the atomic operation
1993 // itself allows us to safely mutate the `UnsafeCell` contents.
1994 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
1995 }
1996
1997 /// Fetches the value, and applies a function to it that returns an optional
1998 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1999 /// returned `Some(_)`, else `Err(previous_value)`.
2000 ///
2001 /// Note: This may call the function multiple times if the value has been
2002 /// changed from other threads in the meantime, as long as the function
2003 /// returns `Some(_)`, but the function will have been applied only once to
2004 /// the stored value.
2005 ///
2006 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
2007 /// ordering of this operation. The first describes the required ordering for
2008 /// when the operation finally succeeds while the second describes the
2009 /// required ordering for loads. These correspond to the success and failure
2010 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2011 ///
2012 /// Using [`Acquire`] as success ordering makes the store part of this
2013 /// operation [`Relaxed`], and using [`Release`] makes the final successful
2014 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
2015 /// [`Acquire`] or [`Relaxed`].
2016 ///
2017 /// **Note:** This method is only available on platforms that support atomic
2018 /// operations on pointers.
2019 ///
2020 /// # Considerations
2021 ///
2022 /// This method is not magic; it is not provided by the hardware, and does not act like a
2023 /// critical section or mutex.
2024 ///
2025 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2026 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2027 /// which is a particularly common pitfall for pointers!
2028 ///
2029 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2030 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2031 ///
2032 /// # Examples
2033 ///
2034 /// ```rust
2035 /// use std::sync::atomic::{AtomicPtr, Ordering};
2036 ///
2037 /// let ptr: *mut _ = &mut 5;
2038 /// let some_ptr = AtomicPtr::new(ptr);
2039 ///
2040 /// let new: *mut _ = &mut 10;
2041 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
2042 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
2043 /// if x == ptr {
2044 /// Some(new)
2045 /// } else {
2046 /// None
2047 /// }
2048 /// });
2049 /// assert_eq!(result, Ok(ptr));
2050 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2051 /// ```
2052 #[inline]
2053 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
2054 #[cfg(target_has_atomic = "ptr")]
2055 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2056 pub fn fetch_update<F>(
2057 &self,
2058 set_order: Ordering,
2059 fetch_order: Ordering,
2060 mut f: F,
2061 ) -> Result<*mut T, *mut T>
2062 where
2063 F: FnMut(*mut T) -> Option<*mut T>,
2064 {
2065 let mut prev = self.load(fetch_order);
2066 while let Some(next) = f(prev) {
2067 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
2068 x @ Ok(_) => return x,
2069 Err(next_prev) => prev = next_prev,
2070 }
2071 }
2072 Err(prev)
2073 }
2074 /// Fetches the value, and applies a function to it that returns an optional
2075 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
2076 /// returned `Some(_)`, else `Err(previous_value)`.
2077 ///
2078 /// See also: [`update`](`AtomicPtr::update`).
2079 ///
2080 /// Note: This may call the function multiple times if the value has been
2081 /// changed from other threads in the meantime, as long as the function
2082 /// returns `Some(_)`, but the function will have been applied only once to
2083 /// the stored value.
2084 ///
2085 /// `try_update` takes two [`Ordering`] arguments to describe the memory
2086 /// ordering of this operation. The first describes the required ordering for
2087 /// when the operation finally succeeds while the second describes the
2088 /// required ordering for loads. These correspond to the success and failure
2089 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2090 ///
2091 /// Using [`Acquire`] as success ordering makes the store part of this
2092 /// operation [`Relaxed`], and using [`Release`] makes the final successful
2093 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
2094 /// [`Acquire`] or [`Relaxed`].
2095 ///
2096 /// **Note:** This method is only available on platforms that support atomic
2097 /// operations on pointers.
2098 ///
2099 /// # Considerations
2100 ///
2101 /// This method is not magic; it is not provided by the hardware, and does not act like a
2102 /// critical section or mutex.
2103 ///
2104 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2105 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2106 /// which is a particularly common pitfall for pointers!
2107 ///
2108 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2109 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2110 ///
2111 /// # Examples
2112 ///
2113 /// ```rust
2114 /// #![feature(atomic_try_update)]
2115 /// use std::sync::atomic::{AtomicPtr, Ordering};
2116 ///
2117 /// let ptr: *mut _ = &mut 5;
2118 /// let some_ptr = AtomicPtr::new(ptr);
2119 ///
2120 /// let new: *mut _ = &mut 10;
2121 /// assert_eq!(some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
2122 /// let result = some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
2123 /// if x == ptr {
2124 /// Some(new)
2125 /// } else {
2126 /// None
2127 /// }
2128 /// });
2129 /// assert_eq!(result, Ok(ptr));
2130 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2131 /// ```
2132 #[inline]
2133 #[unstable(feature = "atomic_try_update", issue = "135894")]
2134 #[cfg(target_has_atomic = "ptr")]
2135 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2136 pub fn try_update(
2137 &self,
2138 set_order: Ordering,
2139 fetch_order: Ordering,
2140 f: impl FnMut(*mut T) -> Option<*mut T>,
2141 ) -> Result<*mut T, *mut T> {
2142 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
2143 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
2144 self.fetch_update(set_order, fetch_order, f)
2145 }
2146
2147 /// Fetches the value, applies a function to it that it return a new value.
2148 /// The new value is stored and the old value is returned.
2149 ///
2150 /// See also: [`try_update`](`AtomicPtr::try_update`).
2151 ///
2152 /// Note: This may call the function multiple times if the value has been changed from other threads in
2153 /// the meantime, but the function will have been applied only once to the stored value.
2154 ///
2155 /// `update` takes two [`Ordering`] arguments to describe the memory
2156 /// ordering of this operation. The first describes the required ordering for
2157 /// when the operation finally succeeds while the second describes the
2158 /// required ordering for loads. These correspond to the success and failure
2159 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2160 ///
2161 /// Using [`Acquire`] as success ordering makes the store part
2162 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
2163 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
2164 ///
2165 /// **Note:** This method is only available on platforms that support atomic
2166 /// operations on pointers.
2167 ///
2168 /// # Considerations
2169 ///
2170 /// This method is not magic; it is not provided by the hardware, and does not act like a
2171 /// critical section or mutex.
2172 ///
2173 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2174 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2175 /// which is a particularly common pitfall for pointers!
2176 ///
2177 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2178 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2179 ///
2180 /// # Examples
2181 ///
2182 /// ```rust
2183 /// #![feature(atomic_try_update)]
2184 ///
2185 /// use std::sync::atomic::{AtomicPtr, Ordering};
2186 ///
2187 /// let ptr: *mut _ = &mut 5;
2188 /// let some_ptr = AtomicPtr::new(ptr);
2189 ///
2190 /// let new: *mut _ = &mut 10;
2191 /// let result = some_ptr.update(Ordering::SeqCst, Ordering::SeqCst, |_| new);
2192 /// assert_eq!(result, ptr);
2193 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2194 /// ```
2195 #[inline]
2196 #[unstable(feature = "atomic_try_update", issue = "135894")]
2197 #[cfg(target_has_atomic = "8")]
2198 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2199 pub fn update(
2200 &self,
2201 set_order: Ordering,
2202 fetch_order: Ordering,
2203 mut f: impl FnMut(*mut T) -> *mut T,
2204 ) -> *mut T {
2205 let mut prev = self.load(fetch_order);
2206 loop {
2207 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
2208 Ok(x) => break x,
2209 Err(next_prev) => prev = next_prev,
2210 }
2211 }
2212 }
2213
2214 /// Offsets the pointer's address by adding `val` (in units of `T`),
2215 /// returning the previous pointer.
2216 ///
2217 /// This is equivalent to using [`wrapping_add`] to atomically perform the
2218 /// equivalent of `ptr = ptr.wrapping_add(val);`.
2219 ///
2220 /// This method operates in units of `T`, which means that it cannot be used
2221 /// to offset the pointer by an amount which is not a multiple of
2222 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
2223 /// work with a deliberately misaligned pointer. In such cases, you may use
2224 /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
2225 ///
2226 /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
2227 /// memory ordering of this operation. All ordering modes are possible. Note
2228 /// that using [`Acquire`] makes the store part of this operation
2229 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2230 ///
2231 /// **Note**: This method is only available on platforms that support atomic
2232 /// operations on [`AtomicPtr`].
2233 ///
2234 /// [`wrapping_add`]: pointer::wrapping_add
2235 ///
2236 /// # Examples
2237 ///
2238 /// ```
2239 /// use core::sync::atomic::{AtomicPtr, Ordering};
2240 ///
2241 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
2242 /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
2243 /// // Note: units of `size_of::<i64>()`.
2244 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
2245 /// ```
2246 #[inline]
2247 #[cfg(target_has_atomic = "ptr")]
2248 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2249 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2250 pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
2251 self.fetch_byte_add(val.wrapping_mul(size_of::<T>()), order)
2252 }
2253
2254 /// Offsets the pointer's address by subtracting `val` (in units of `T`),
2255 /// returning the previous pointer.
2256 ///
2257 /// This is equivalent to using [`wrapping_sub`] to atomically perform the
2258 /// equivalent of `ptr = ptr.wrapping_sub(val);`.
2259 ///
2260 /// This method operates in units of `T`, which means that it cannot be used
2261 /// to offset the pointer by an amount which is not a multiple of
2262 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
2263 /// work with a deliberately misaligned pointer. In such cases, you may use
2264 /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
2265 ///
2266 /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
2267 /// ordering of this operation. All ordering modes are possible. Note that
2268 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2269 /// and using [`Release`] makes the load part [`Relaxed`].
2270 ///
2271 /// **Note**: This method is only available on platforms that support atomic
2272 /// operations on [`AtomicPtr`].
2273 ///
2274 /// [`wrapping_sub`]: pointer::wrapping_sub
2275 ///
2276 /// # Examples
2277 ///
2278 /// ```
2279 /// use core::sync::atomic::{AtomicPtr, Ordering};
2280 ///
2281 /// let array = [1i32, 2i32];
2282 /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
2283 ///
2284 /// assert!(core::ptr::eq(
2285 /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
2286 /// &array[1],
2287 /// ));
2288 /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
2289 /// ```
2290 #[inline]
2291 #[cfg(target_has_atomic = "ptr")]
2292 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2293 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2294 pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
2295 self.fetch_byte_sub(val.wrapping_mul(size_of::<T>()), order)
2296 }
2297
2298 /// Offsets the pointer's address by adding `val` *bytes*, returning the
2299 /// previous pointer.
2300 ///
2301 /// This is equivalent to using [`wrapping_byte_add`] to atomically
2302 /// perform `ptr = ptr.wrapping_byte_add(val)`.
2303 ///
2304 /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
2305 /// memory ordering of this operation. All ordering modes are possible. Note
2306 /// that using [`Acquire`] makes the store part of this operation
2307 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2308 ///
2309 /// **Note**: This method is only available on platforms that support atomic
2310 /// operations on [`AtomicPtr`].
2311 ///
2312 /// [`wrapping_byte_add`]: pointer::wrapping_byte_add
2313 ///
2314 /// # Examples
2315 ///
2316 /// ```
2317 /// use core::sync::atomic::{AtomicPtr, Ordering};
2318 ///
2319 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
2320 /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
2321 /// // Note: in units of bytes, not `size_of::<i64>()`.
2322 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
2323 /// ```
2324 #[inline]
2325 #[cfg(target_has_atomic = "ptr")]
2326 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2327 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2328 pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
2329 // SAFETY: data races are prevented by atomic intrinsics.
2330 unsafe { atomic_add(self.p.get(), val, order).cast() }
2331 }
2332
2333 /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
2334 /// previous pointer.
2335 ///
2336 /// This is equivalent to using [`wrapping_byte_sub`] to atomically
2337 /// perform `ptr = ptr.wrapping_byte_sub(val)`.
2338 ///
2339 /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
2340 /// memory ordering of this operation. All ordering modes are possible. Note
2341 /// that using [`Acquire`] makes the store part of this operation
2342 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2343 ///
2344 /// **Note**: This method is only available on platforms that support atomic
2345 /// operations on [`AtomicPtr`].
2346 ///
2347 /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
2348 ///
2349 /// # Examples
2350 ///
2351 /// ```
2352 /// use core::sync::atomic::{AtomicPtr, Ordering};
2353 ///
2354 /// let mut arr = [0i64, 1];
2355 /// let atom = AtomicPtr::<i64>::new(&raw mut arr[1]);
2356 /// assert_eq!(atom.fetch_byte_sub(8, Ordering::Relaxed).addr(), (&raw const arr[1]).addr());
2357 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), (&raw const arr[0]).addr());
2358 /// ```
2359 #[inline]
2360 #[cfg(target_has_atomic = "ptr")]
2361 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2362 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2363 pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
2364 // SAFETY: data races are prevented by atomic intrinsics.
2365 unsafe { atomic_sub(self.p.get(), val, order).cast() }
2366 }
2367
2368 /// Performs a bitwise "or" operation on the address of the current pointer,
2369 /// and the argument `val`, and stores a pointer with provenance of the
2370 /// current pointer and the resulting address.
2371 ///
2372 /// This is equivalent to using [`map_addr`] to atomically perform
2373 /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
2374 /// pointer schemes to atomically set tag bits.
2375 ///
2376 /// **Caveat**: This operation returns the previous value. To compute the
2377 /// stored value without losing provenance, you may use [`map_addr`]. For
2378 /// example: `a.fetch_or(val).map_addr(|a| a | val)`.
2379 ///
2380 /// `fetch_or` takes an [`Ordering`] argument which describes the memory
2381 /// ordering of this operation. All ordering modes are possible. Note that
2382 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2383 /// and using [`Release`] makes the load part [`Relaxed`].
2384 ///
2385 /// **Note**: This method is only available on platforms that support atomic
2386 /// operations on [`AtomicPtr`].
2387 ///
2388 /// This API and its claimed semantics are part of the Strict Provenance
2389 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2390 /// details.
2391 ///
2392 /// [`map_addr`]: pointer::map_addr
2393 ///
2394 /// # Examples
2395 ///
2396 /// ```
2397 /// use core::sync::atomic::{AtomicPtr, Ordering};
2398 ///
2399 /// let pointer = &mut 3i64 as *mut i64;
2400 ///
2401 /// let atom = AtomicPtr::<i64>::new(pointer);
2402 /// // Tag the bottom bit of the pointer.
2403 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
2404 /// // Extract and untag.
2405 /// let tagged = atom.load(Ordering::Relaxed);
2406 /// assert_eq!(tagged.addr() & 1, 1);
2407 /// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
2408 /// ```
2409 #[inline]
2410 #[cfg(target_has_atomic = "ptr")]
2411 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2412 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2413 pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
2414 // SAFETY: data races are prevented by atomic intrinsics.
2415 unsafe { atomic_or(self.p.get(), val, order).cast() }
2416 }
2417
2418 /// Performs a bitwise "and" operation on the address of the current
2419 /// pointer, and the argument `val`, and stores a pointer with provenance of
2420 /// the current pointer and the resulting address.
2421 ///
2422 /// This is equivalent to using [`map_addr`] to atomically perform
2423 /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
2424 /// pointer schemes to atomically unset tag bits.
2425 ///
2426 /// **Caveat**: This operation returns the previous value. To compute the
2427 /// stored value without losing provenance, you may use [`map_addr`]. For
2428 /// example: `a.fetch_and(val).map_addr(|a| a & val)`.
2429 ///
2430 /// `fetch_and` takes an [`Ordering`] argument which describes the memory
2431 /// ordering of this operation. All ordering modes are possible. Note that
2432 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2433 /// and using [`Release`] makes the load part [`Relaxed`].
2434 ///
2435 /// **Note**: This method is only available on platforms that support atomic
2436 /// operations on [`AtomicPtr`].
2437 ///
2438 /// This API and its claimed semantics are part of the Strict Provenance
2439 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2440 /// details.
2441 ///
2442 /// [`map_addr`]: pointer::map_addr
2443 ///
2444 /// # Examples
2445 ///
2446 /// ```
2447 /// use core::sync::atomic::{AtomicPtr, Ordering};
2448 ///
2449 /// let pointer = &mut 3i64 as *mut i64;
2450 /// // A tagged pointer
2451 /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
2452 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
2453 /// // Untag, and extract the previously tagged pointer.
2454 /// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
2455 /// .map_addr(|a| a & !1);
2456 /// assert_eq!(untagged, pointer);
2457 /// ```
2458 #[inline]
2459 #[cfg(target_has_atomic = "ptr")]
2460 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2461 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2462 pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
2463 // SAFETY: data races are prevented by atomic intrinsics.
2464 unsafe { atomic_and(self.p.get(), val, order).cast() }
2465 }
2466
2467 /// Performs a bitwise "xor" operation on the address of the current
2468 /// pointer, and the argument `val`, and stores a pointer with provenance of
2469 /// the current pointer and the resulting address.
2470 ///
2471 /// This is equivalent to using [`map_addr`] to atomically perform
2472 /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
2473 /// pointer schemes to atomically toggle tag bits.
2474 ///
2475 /// **Caveat**: This operation returns the previous value. To compute the
2476 /// stored value without losing provenance, you may use [`map_addr`]. For
2477 /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
2478 ///
2479 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory
2480 /// ordering of this operation. All ordering modes are possible. Note that
2481 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2482 /// and using [`Release`] makes the load part [`Relaxed`].
2483 ///
2484 /// **Note**: This method is only available on platforms that support atomic
2485 /// operations on [`AtomicPtr`].
2486 ///
2487 /// This API and its claimed semantics are part of the Strict Provenance
2488 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2489 /// details.
2490 ///
2491 /// [`map_addr`]: pointer::map_addr
2492 ///
2493 /// # Examples
2494 ///
2495 /// ```
2496 /// use core::sync::atomic::{AtomicPtr, Ordering};
2497 ///
2498 /// let pointer = &mut 3i64 as *mut i64;
2499 /// let atom = AtomicPtr::<i64>::new(pointer);
2500 ///
2501 /// // Toggle a tag bit on the pointer.
2502 /// atom.fetch_xor(1, Ordering::Relaxed);
2503 /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
2504 /// ```
2505 #[inline]
2506 #[cfg(target_has_atomic = "ptr")]
2507 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2508 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2509 pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
2510 // SAFETY: data races are prevented by atomic intrinsics.
2511 unsafe { atomic_xor(self.p.get(), val, order).cast() }
2512 }
2513
2514 /// Returns a mutable pointer to the underlying pointer.
2515 ///
2516 /// Doing non-atomic reads and writes on the resulting pointer can be a data race.
2517 /// This method is mostly useful for FFI, where the function signature may use
2518 /// `*mut *mut T` instead of `&AtomicPtr<T>`.
2519 ///
2520 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2521 /// atomic types work with interior mutability. All modifications of an atomic change the value
2522 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2523 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
2524 /// requirements of the [memory model].
2525 ///
2526 /// # Examples
2527 ///
2528 /// ```ignore (extern-declaration)
2529 /// use std::sync::atomic::AtomicPtr;
2530 ///
2531 /// extern "C" {
2532 /// fn my_atomic_op(arg: *mut *mut u32);
2533 /// }
2534 ///
2535 /// let mut value = 17;
2536 /// let atomic = AtomicPtr::new(&mut value);
2537 ///
2538 /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
2539 /// unsafe {
2540 /// my_atomic_op(atomic.as_ptr());
2541 /// }
2542 /// ```
2543 ///
2544 /// [memory model]: self#memory-model-for-atomic-accesses
2545 #[inline]
2546 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
2547 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
2548 #[rustc_never_returns_null_ptr]
2549 pub const fn as_ptr(&self) -> *mut *mut T {
2550 self.p.get()
2551 }
2552}
2553
2554#[cfg(target_has_atomic_load_store = "8")]
2555#[stable(feature = "atomic_bool_from", since = "1.24.0")]
2556#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2557#[cfg(not(feature = "ferrocene_certified"))]
2558impl const From<bool> for AtomicBool {
2559 /// Converts a `bool` into an `AtomicBool`.
2560 ///
2561 /// # Examples
2562 ///
2563 /// ```
2564 /// use std::sync::atomic::AtomicBool;
2565 /// let atomic_bool = AtomicBool::from(true);
2566 /// assert_eq!(format!("{atomic_bool:?}"), "true")
2567 /// ```
2568 #[inline]
2569 fn from(b: bool) -> Self {
2570 Self::new(b)
2571 }
2572}
2573
2574#[cfg(target_has_atomic_load_store = "ptr")]
2575#[stable(feature = "atomic_from", since = "1.23.0")]
2576#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2577#[cfg(not(feature = "ferrocene_certified"))]
2578impl<T> const From<*mut T> for AtomicPtr<T> {
2579 /// Converts a `*mut T` into an `AtomicPtr<T>`.
2580 #[inline]
2581 fn from(p: *mut T) -> Self {
2582 Self::new(p)
2583 }
2584}
2585
2586#[allow(unused_macros)] // This macro ends up being unused on some architectures.
2587macro_rules! if_8_bit {
2588 (u8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($yes)*)?) };
2589 (i8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($yes)*)?) };
2590 ($_:ident, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($no)*)?) };
2591}
2592
2593#[cfg(target_has_atomic_load_store)]
2594macro_rules! atomic_int {
2595 ($cfg_cas:meta,
2596 $cfg_align:meta,
2597 $stable:meta,
2598 $stable_cxchg:meta,
2599 $stable_debug:meta,
2600 $stable_access:meta,
2601 $stable_from:meta,
2602 $stable_nand:meta,
2603 $const_stable_new:meta,
2604 $const_stable_into_inner:meta,
2605 $diagnostic_item:meta,
2606 $s_int_type:literal,
2607 $extra_feature:expr,
2608 $min_fn:ident, $max_fn:ident,
2609 $align:expr,
2610 $int_type:ident $atomic_type:ident) => {
2611 /// An integer type which can be safely shared between threads.
2612 ///
2613 /// This type has the same
2614 #[doc = if_8_bit!(
2615 $int_type,
2616 yes = ["size, alignment, and bit validity"],
2617 no = ["size and bit validity"],
2618 )]
2619 /// as the underlying integer type, [`
2620 #[doc = $s_int_type]
2621 /// `].
2622 #[doc = if_8_bit! {
2623 $int_type,
2624 no = [
2625 "However, the alignment of this type is always equal to its ",
2626 "size, even on targets where [`", $s_int_type, "`] has a ",
2627 "lesser alignment."
2628 ],
2629 }]
2630 ///
2631 /// For more about the differences between atomic types and
2632 /// non-atomic types as well as information about the portability of
2633 /// this type, please see the [module-level documentation].
2634 ///
2635 /// **Note:** This type is only available on platforms that support
2636 /// atomic loads and stores of [`
2637 #[doc = $s_int_type]
2638 /// `].
2639 ///
2640 /// [module-level documentation]: crate::sync::atomic
2641 #[$stable]
2642 #[$diagnostic_item]
2643 #[repr(C, align($align))]
2644 pub struct $atomic_type {
2645 v: UnsafeCell<$int_type>,
2646 }
2647
2648 #[$stable]
2649 impl Default for $atomic_type {
2650 #[inline]
2651 fn default() -> Self {
2652 Self::new(Default::default())
2653 }
2654 }
2655
2656 #[$stable_from]
2657 #[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2658 impl const From<$int_type> for $atomic_type {
2659 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
2660 #[inline]
2661 fn from(v: $int_type) -> Self { Self::new(v) }
2662 }
2663
2664 #[$stable_debug]
2665 #[cfg(not(feature = "ferrocene_certified"))]
2666 impl fmt::Debug for $atomic_type {
2667 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2668 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
2669 }
2670 }
2671
2672 // Send is implicitly implemented.
2673 #[$stable]
2674 unsafe impl Sync for $atomic_type {}
2675
2676 impl $atomic_type {
2677 /// Creates a new atomic integer.
2678 ///
2679 /// # Examples
2680 ///
2681 /// ```
2682 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2683 ///
2684 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
2685 /// ```
2686 #[inline]
2687 #[$stable]
2688 #[$const_stable_new]
2689 #[must_use]
2690 pub const fn new(v: $int_type) -> Self {
2691 Self {v: UnsafeCell::new(v)}
2692 }
2693
2694 /// Creates a new reference to an atomic integer from a pointer.
2695 ///
2696 /// # Examples
2697 ///
2698 /// ```
2699 #[doc = concat!($extra_feature, "use std::sync::atomic::{self, ", stringify!($atomic_type), "};")]
2700 ///
2701 /// // Get a pointer to an allocated value
2702 #[doc = concat!("let ptr: *mut ", stringify!($int_type), " = Box::into_raw(Box::new(0));")]
2703 ///
2704 #[doc = concat!("assert!(ptr.cast::<", stringify!($atomic_type), ">().is_aligned());")]
2705 ///
2706 /// {
2707 /// // Create an atomic view of the allocated value
2708 // SAFETY: this is a doc comment, tidy, it can't hurt you (also guaranteed by the construction of `ptr` and the assert above)
2709 #[doc = concat!(" let atomic = unsafe {", stringify!($atomic_type), "::from_ptr(ptr) };")]
2710 ///
2711 /// // Use `atomic` for atomic operations, possibly share it with other threads
2712 /// atomic.store(1, atomic::Ordering::Relaxed);
2713 /// }
2714 ///
2715 /// // It's ok to non-atomically access the value behind `ptr`,
2716 /// // since the reference to the atomic ended its lifetime in the block above
2717 /// assert_eq!(unsafe { *ptr }, 1);
2718 ///
2719 /// // Deallocate the value
2720 /// unsafe { drop(Box::from_raw(ptr)) }
2721 /// ```
2722 ///
2723 /// # Safety
2724 ///
2725 /// * `ptr` must be aligned to
2726 #[doc = concat!(" `align_of::<", stringify!($atomic_type), ">()`")]
2727 #[doc = if_8_bit!{
2728 $int_type,
2729 yes = [
2730 " (note that this is always true, since `align_of::<",
2731 stringify!($atomic_type), ">() == 1`)."
2732 ],
2733 no = [
2734 " (note that on some platforms this can be bigger than `align_of::<",
2735 stringify!($int_type), ">()`)."
2736 ],
2737 }]
2738 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
2739 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
2740 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
2741 /// sizes, without synchronization.
2742 ///
2743 /// [valid]: crate::ptr#safety
2744 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
2745 #[inline]
2746 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
2747 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
2748 pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
2749 // SAFETY: guaranteed by the caller
2750 unsafe { &*ptr.cast() }
2751 }
2752
2753
2754 /// Returns a mutable reference to the underlying integer.
2755 ///
2756 /// This is safe because the mutable reference guarantees that no other threads are
2757 /// concurrently accessing the atomic data.
2758 ///
2759 /// # Examples
2760 ///
2761 /// ```
2762 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2763 ///
2764 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
2765 /// assert_eq!(*some_var.get_mut(), 10);
2766 /// *some_var.get_mut() = 5;
2767 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
2768 /// ```
2769 #[inline]
2770 #[$stable_access]
2771 pub fn get_mut(&mut self) -> &mut $int_type {
2772 self.v.get_mut()
2773 }
2774
2775 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
2776 ///
2777 #[doc = if_8_bit! {
2778 $int_type,
2779 no = [
2780 "**Note:** This function is only available on targets where `",
2781 stringify!($atomic_type), "` has the same alignment as `", stringify!($int_type), "`."
2782 ],
2783 }]
2784 ///
2785 /// # Examples
2786 ///
2787 /// ```
2788 /// #![feature(atomic_from_mut)]
2789 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2790 ///
2791 /// let mut some_int = 123;
2792 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
2793 /// a.store(100, Ordering::Relaxed);
2794 /// assert_eq!(some_int, 100);
2795 /// ```
2796 ///
2797 #[inline]
2798 #[$cfg_align]
2799 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2800 pub fn from_mut(v: &mut $int_type) -> &mut Self {
2801 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2802 // SAFETY:
2803 // - the mutable reference guarantees unique ownership.
2804 // - the alignment of `$int_type` and `Self` is the
2805 // same, as promised by $cfg_align and verified above.
2806 unsafe { &mut *(v as *mut $int_type as *mut Self) }
2807 }
2808
2809 #[doc = concat!("Get non-atomic access to a `&mut [", stringify!($atomic_type), "]` slice")]
2810 ///
2811 /// This is safe because the mutable reference guarantees that no other threads are
2812 /// concurrently accessing the atomic data.
2813 ///
2814 /// # Examples
2815 ///
2816 /// ```ignore-wasm
2817 /// #![feature(atomic_from_mut)]
2818 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2819 ///
2820 #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")]
2821 ///
2822 #[doc = concat!("let view: &mut [", stringify!($int_type), "] = ", stringify!($atomic_type), "::get_mut_slice(&mut some_ints);")]
2823 /// assert_eq!(view, [0; 10]);
2824 /// view
2825 /// .iter_mut()
2826 /// .enumerate()
2827 /// .for_each(|(idx, int)| *int = idx as _);
2828 ///
2829 /// std::thread::scope(|s| {
2830 /// some_ints
2831 /// .iter()
2832 /// .enumerate()
2833 /// .for_each(|(idx, int)| {
2834 /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _));
2835 /// })
2836 /// });
2837 /// ```
2838 #[inline]
2839 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2840 pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
2841 // SAFETY: the mutable reference guarantees unique ownership.
2842 unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
2843 }
2844
2845 #[doc = concat!("Get atomic access to a `&mut [", stringify!($int_type), "]` slice.")]
2846 ///
2847 /// # Examples
2848 ///
2849 /// ```ignore-wasm
2850 /// #![feature(atomic_from_mut)]
2851 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2852 ///
2853 /// let mut some_ints = [0; 10];
2854 #[doc = concat!("let a = &*", stringify!($atomic_type), "::from_mut_slice(&mut some_ints);")]
2855 /// std::thread::scope(|s| {
2856 /// for i in 0..a.len() {
2857 /// s.spawn(move || a[i].store(i as _, Ordering::Relaxed));
2858 /// }
2859 /// });
2860 /// for (i, n) in some_ints.into_iter().enumerate() {
2861 /// assert_eq!(i, n as usize);
2862 /// }
2863 /// ```
2864 #[inline]
2865 #[$cfg_align]
2866 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2867 pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
2868 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2869 // SAFETY:
2870 // - the mutable reference guarantees unique ownership.
2871 // - the alignment of `$int_type` and `Self` is the
2872 // same, as promised by $cfg_align and verified above.
2873 unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
2874 }
2875
2876 /// Consumes the atomic and returns the contained value.
2877 ///
2878 /// This is safe because passing `self` by value guarantees that no other threads are
2879 /// concurrently accessing the atomic data.
2880 ///
2881 /// # Examples
2882 ///
2883 /// ```
2884 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2885 ///
2886 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2887 /// assert_eq!(some_var.into_inner(), 5);
2888 /// ```
2889 #[inline]
2890 #[$stable_access]
2891 #[$const_stable_into_inner]
2892 pub const fn into_inner(self) -> $int_type {
2893 self.v.into_inner()
2894 }
2895
2896 /// Loads a value from the atomic integer.
2897 ///
2898 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2899 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
2900 ///
2901 /// # Panics
2902 ///
2903 /// Panics if `order` is [`Release`] or [`AcqRel`].
2904 ///
2905 /// # Examples
2906 ///
2907 /// ```
2908 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2909 ///
2910 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2911 ///
2912 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
2913 /// ```
2914 #[inline]
2915 #[$stable]
2916 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2917 pub fn load(&self, order: Ordering) -> $int_type {
2918 // SAFETY: data races are prevented by atomic intrinsics.
2919 unsafe { atomic_load(self.v.get(), order) }
2920 }
2921
2922 /// Stores a value into the atomic integer.
2923 ///
2924 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2925 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
2926 ///
2927 /// # Panics
2928 ///
2929 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
2930 ///
2931 /// # Examples
2932 ///
2933 /// ```
2934 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2935 ///
2936 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2937 ///
2938 /// some_var.store(10, Ordering::Relaxed);
2939 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2940 /// ```
2941 #[inline]
2942 #[$stable]
2943 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2944 pub fn store(&self, val: $int_type, order: Ordering) {
2945 // SAFETY: data races are prevented by atomic intrinsics.
2946 unsafe { atomic_store(self.v.get(), val, order); }
2947 }
2948
2949 /// Stores a value into the atomic integer, returning the previous value.
2950 ///
2951 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
2952 /// of this operation. All ordering modes are possible. Note that using
2953 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2954 /// using [`Release`] makes the load part [`Relaxed`].
2955 ///
2956 /// **Note**: This method is only available on platforms that support atomic operations on
2957 #[doc = concat!("[`", $s_int_type, "`].")]
2958 ///
2959 /// # Examples
2960 ///
2961 /// ```
2962 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2963 ///
2964 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2965 ///
2966 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
2967 /// ```
2968 #[inline]
2969 #[$stable]
2970 #[$cfg_cas]
2971 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2972 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
2973 // SAFETY: data races are prevented by atomic intrinsics.
2974 unsafe { atomic_swap(self.v.get(), val, order) }
2975 }
2976
2977 /// Stores a value into the atomic integer if the current value is the same as
2978 /// the `current` value.
2979 ///
2980 /// The return value is always the previous value. If it is equal to `current`, then the
2981 /// value was updated.
2982 ///
2983 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
2984 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
2985 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
2986 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
2987 /// happens, and using [`Release`] makes the load part [`Relaxed`].
2988 ///
2989 /// **Note**: This method is only available on platforms that support atomic operations on
2990 #[doc = concat!("[`", $s_int_type, "`].")]
2991 ///
2992 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
2993 ///
2994 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
2995 /// memory orderings:
2996 ///
2997 /// Original | Success | Failure
2998 /// -------- | ------- | -------
2999 /// Relaxed | Relaxed | Relaxed
3000 /// Acquire | Acquire | Acquire
3001 /// Release | Release | Relaxed
3002 /// AcqRel | AcqRel | Acquire
3003 /// SeqCst | SeqCst | SeqCst
3004 ///
3005 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
3006 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
3007 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
3008 /// rather than to infer success vs failure based on the value that was read.
3009 ///
3010 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
3011 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
3012 /// which allows the compiler to generate better assembly code when the compare and swap
3013 /// is used in a loop.
3014 ///
3015 /// # Examples
3016 ///
3017 /// ```
3018 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3019 ///
3020 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
3021 ///
3022 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
3023 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3024 ///
3025 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
3026 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3027 /// ```
3028 #[cfg(not(feature = "ferrocene_certified"))]
3029 #[inline]
3030 #[$stable]
3031 #[deprecated(
3032 since = "1.50.0",
3033 note = "Use `compare_exchange` or `compare_exchange_weak` instead")
3034 ]
3035 #[$cfg_cas]
3036 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3037 pub fn compare_and_swap(&self,
3038 current: $int_type,
3039 new: $int_type,
3040 order: Ordering) -> $int_type {
3041 match self.compare_exchange(current,
3042 new,
3043 order,
3044 strongest_failure_ordering(order)) {
3045 Ok(x) => x,
3046 Err(x) => x,
3047 }
3048 }
3049
3050 /// Stores a value into the atomic integer if the current value is the same as
3051 /// the `current` value.
3052 ///
3053 /// The return value is a result indicating whether the new value was written and
3054 /// containing the previous value. On success this value is guaranteed to be equal to
3055 /// `current`.
3056 ///
3057 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
3058 /// ordering of this operation. `success` describes the required ordering for the
3059 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
3060 /// `failure` describes the required ordering for the load operation that takes place when
3061 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
3062 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
3063 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3064 ///
3065 /// **Note**: This method is only available on platforms that support atomic operations on
3066 #[doc = concat!("[`", $s_int_type, "`].")]
3067 ///
3068 /// # Examples
3069 ///
3070 /// ```
3071 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3072 ///
3073 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
3074 ///
3075 /// assert_eq!(some_var.compare_exchange(5, 10,
3076 /// Ordering::Acquire,
3077 /// Ordering::Relaxed),
3078 /// Ok(5));
3079 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3080 ///
3081 /// assert_eq!(some_var.compare_exchange(6, 12,
3082 /// Ordering::SeqCst,
3083 /// Ordering::Acquire),
3084 /// Err(10));
3085 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3086 /// ```
3087 ///
3088 /// # Considerations
3089 ///
3090 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
3091 /// of CAS operations. In particular, a load of the value followed by a successful
3092 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
3093 /// changed the value in the interim! This is usually important when the *equality* check in
3094 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
3095 /// does not necessarily imply identity. This is a particularly common case for pointers, as
3096 /// a pointer holding the same address does not imply that the same object exists at that
3097 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
3098 ///
3099 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3100 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3101 #[inline]
3102 #[$stable_cxchg]
3103 #[$cfg_cas]
3104 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3105 pub fn compare_exchange(&self,
3106 current: $int_type,
3107 new: $int_type,
3108 success: Ordering,
3109 failure: Ordering) -> Result<$int_type, $int_type> {
3110 // SAFETY: data races are prevented by atomic intrinsics.
3111 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
3112 }
3113
3114 /// Stores a value into the atomic integer if the current value is the same as
3115 /// the `current` value.
3116 ///
3117 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
3118 /// this function is allowed to spuriously fail even
3119 /// when the comparison succeeds, which can result in more efficient code on some
3120 /// platforms. The return value is a result indicating whether the new value was
3121 /// written and containing the previous value.
3122 ///
3123 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
3124 /// ordering of this operation. `success` describes the required ordering for the
3125 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
3126 /// `failure` describes the required ordering for the load operation that takes place when
3127 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
3128 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
3129 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3130 ///
3131 /// **Note**: This method is only available on platforms that support atomic operations on
3132 #[doc = concat!("[`", $s_int_type, "`].")]
3133 ///
3134 /// # Examples
3135 ///
3136 /// ```
3137 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3138 ///
3139 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
3140 ///
3141 /// let mut old = val.load(Ordering::Relaxed);
3142 /// loop {
3143 /// let new = old * 2;
3144 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
3145 /// Ok(_) => break,
3146 /// Err(x) => old = x,
3147 /// }
3148 /// }
3149 /// ```
3150 ///
3151 /// # Considerations
3152 ///
3153 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
3154 /// of CAS operations. In particular, a load of the value followed by a successful
3155 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
3156 /// changed the value in the interim. This is usually important when the *equality* check in
3157 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
3158 /// does not necessarily imply identity. This is a particularly common case for pointers, as
3159 /// a pointer holding the same address does not imply that the same object exists at that
3160 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
3161 ///
3162 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3163 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3164 #[inline]
3165 #[$stable_cxchg]
3166 #[$cfg_cas]
3167 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3168 pub fn compare_exchange_weak(&self,
3169 current: $int_type,
3170 new: $int_type,
3171 success: Ordering,
3172 failure: Ordering) -> Result<$int_type, $int_type> {
3173 // SAFETY: data races are prevented by atomic intrinsics.
3174 unsafe {
3175 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
3176 }
3177 }
3178
3179 /// Adds to the current value, returning the previous value.
3180 ///
3181 /// This operation wraps around on overflow.
3182 ///
3183 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
3184 /// of this operation. All ordering modes are possible. Note that using
3185 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3186 /// using [`Release`] makes the load part [`Relaxed`].
3187 ///
3188 /// **Note**: This method is only available on platforms that support atomic operations on
3189 #[doc = concat!("[`", $s_int_type, "`].")]
3190 ///
3191 /// # Examples
3192 ///
3193 /// ```
3194 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3195 ///
3196 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
3197 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
3198 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
3199 /// ```
3200 #[inline]
3201 #[$stable]
3202 #[$cfg_cas]
3203 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3204 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
3205 // SAFETY: data races are prevented by atomic intrinsics.
3206 unsafe { atomic_add(self.v.get(), val, order) }
3207 }
3208
3209 /// Subtracts from the current value, returning the previous value.
3210 ///
3211 /// This operation wraps around on overflow.
3212 ///
3213 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
3214 /// of this operation. All ordering modes are possible. Note that using
3215 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3216 /// using [`Release`] makes the load part [`Relaxed`].
3217 ///
3218 /// **Note**: This method is only available on platforms that support atomic operations on
3219 #[doc = concat!("[`", $s_int_type, "`].")]
3220 ///
3221 /// # Examples
3222 ///
3223 /// ```
3224 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3225 ///
3226 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
3227 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
3228 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
3229 /// ```
3230 #[inline]
3231 #[$stable]
3232 #[$cfg_cas]
3233 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3234 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
3235 // SAFETY: data races are prevented by atomic intrinsics.
3236 unsafe { atomic_sub(self.v.get(), val, order) }
3237 }
3238
3239 /// Bitwise "and" with the current value.
3240 ///
3241 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
3242 /// sets the new value to the result.
3243 ///
3244 /// Returns the previous value.
3245 ///
3246 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
3247 /// of this operation. All ordering modes are possible. Note that using
3248 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3249 /// using [`Release`] makes the load part [`Relaxed`].
3250 ///
3251 /// **Note**: This method is only available on platforms that support atomic operations on
3252 #[doc = concat!("[`", $s_int_type, "`].")]
3253 ///
3254 /// # Examples
3255 ///
3256 /// ```
3257 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3258 ///
3259 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3260 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
3261 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
3262 /// ```
3263 #[inline]
3264 #[$stable]
3265 #[$cfg_cas]
3266 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3267 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
3268 // SAFETY: data races are prevented by atomic intrinsics.
3269 unsafe { atomic_and(self.v.get(), val, order) }
3270 }
3271
3272 /// Bitwise "nand" with the current value.
3273 ///
3274 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
3275 /// sets the new value to the result.
3276 ///
3277 /// Returns the previous value.
3278 ///
3279 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
3280 /// of this operation. All ordering modes are possible. Note that using
3281 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3282 /// using [`Release`] makes the load part [`Relaxed`].
3283 ///
3284 /// **Note**: This method is only available on platforms that support atomic operations on
3285 #[doc = concat!("[`", $s_int_type, "`].")]
3286 ///
3287 /// # Examples
3288 ///
3289 /// ```
3290 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3291 ///
3292 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
3293 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
3294 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
3295 /// ```
3296 #[inline]
3297 #[$stable_nand]
3298 #[$cfg_cas]
3299 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3300 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
3301 // SAFETY: data races are prevented by atomic intrinsics.
3302 unsafe { atomic_nand(self.v.get(), val, order) }
3303 }
3304
3305 /// Bitwise "or" with the current value.
3306 ///
3307 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
3308 /// sets the new value to the result.
3309 ///
3310 /// Returns the previous value.
3311 ///
3312 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
3313 /// of this operation. All ordering modes are possible. Note that using
3314 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3315 /// using [`Release`] makes the load part [`Relaxed`].
3316 ///
3317 /// **Note**: This method is only available on platforms that support atomic operations on
3318 #[doc = concat!("[`", $s_int_type, "`].")]
3319 ///
3320 /// # Examples
3321 ///
3322 /// ```
3323 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3324 ///
3325 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3326 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
3327 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
3328 /// ```
3329 #[inline]
3330 #[$stable]
3331 #[$cfg_cas]
3332 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3333 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
3334 // SAFETY: data races are prevented by atomic intrinsics.
3335 unsafe { atomic_or(self.v.get(), val, order) }
3336 }
3337
3338 /// Bitwise "xor" with the current value.
3339 ///
3340 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
3341 /// sets the new value to the result.
3342 ///
3343 /// Returns the previous value.
3344 ///
3345 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
3346 /// of this operation. All ordering modes are possible. Note that using
3347 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3348 /// using [`Release`] makes the load part [`Relaxed`].
3349 ///
3350 /// **Note**: This method is only available on platforms that support atomic operations on
3351 #[doc = concat!("[`", $s_int_type, "`].")]
3352 ///
3353 /// # Examples
3354 ///
3355 /// ```
3356 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3357 ///
3358 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3359 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
3360 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
3361 /// ```
3362 #[inline]
3363 #[$stable]
3364 #[$cfg_cas]
3365 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3366 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
3367 // SAFETY: data races are prevented by atomic intrinsics.
3368 unsafe { atomic_xor(self.v.get(), val, order) }
3369 }
3370
3371 /// Fetches the value, and applies a function to it that returns an optional
3372 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
3373 /// `Err(previous_value)`.
3374 ///
3375 /// Note: This may call the function multiple times if the value has been changed from other threads in
3376 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
3377 /// only once to the stored value.
3378 ///
3379 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3380 /// The first describes the required ordering for when the operation finally succeeds while the second
3381 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3382 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3383 /// respectively.
3384 ///
3385 /// Using [`Acquire`] as success ordering makes the store part
3386 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3387 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3388 ///
3389 /// **Note**: This method is only available on platforms that support atomic operations on
3390 #[doc = concat!("[`", $s_int_type, "`].")]
3391 ///
3392 /// # Considerations
3393 ///
3394 /// This method is not magic; it is not provided by the hardware, and does not act like a
3395 /// critical section or mutex.
3396 ///
3397 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3398 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3399 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3400 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3401 ///
3402 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3403 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3404 ///
3405 /// # Examples
3406 ///
3407 /// ```rust
3408 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3409 ///
3410 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3411 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
3412 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
3413 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
3414 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3415 /// ```
3416 #[inline]
3417 #[stable(feature = "no_more_cas", since = "1.45.0")]
3418 #[$cfg_cas]
3419 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3420 pub fn fetch_update<F>(&self,
3421 set_order: Ordering,
3422 fetch_order: Ordering,
3423 mut f: F) -> Result<$int_type, $int_type>
3424 where F: FnMut($int_type) -> Option<$int_type> {
3425 let mut prev = self.load(fetch_order);
3426 while let Some(next) = f(prev) {
3427 // Ferrocene annotation: Both arms of this match expression are covered, which
3428 // means that scrutinee expression itself must have been evaluated in either
3429 // case.
3430 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
3431 x @ Ok(_) => return x,
3432 Err(next_prev) => prev = next_prev
3433 }
3434 }
3435 Err(prev)
3436 }
3437
3438 /// Fetches the value, and applies a function to it that returns an optional
3439 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
3440 /// `Err(previous_value)`.
3441 ///
3442 #[doc = concat!("See also: [`update`](`", stringify!($atomic_type), "::update`).")]
3443 ///
3444 /// Note: This may call the function multiple times if the value has been changed from other threads in
3445 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
3446 /// only once to the stored value.
3447 ///
3448 /// `try_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3449 /// The first describes the required ordering for when the operation finally succeeds while the second
3450 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3451 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3452 /// respectively.
3453 ///
3454 /// Using [`Acquire`] as success ordering makes the store part
3455 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3456 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3457 ///
3458 /// **Note**: This method is only available on platforms that support atomic operations on
3459 #[doc = concat!("[`", $s_int_type, "`].")]
3460 ///
3461 /// # Considerations
3462 ///
3463 /// This method is not magic; it is not provided by the hardware, and does not act like a
3464 /// critical section or mutex.
3465 ///
3466 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3467 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3468 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3469 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3470 ///
3471 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3472 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3473 ///
3474 /// # Examples
3475 ///
3476 /// ```rust
3477 /// #![feature(atomic_try_update)]
3478 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3479 ///
3480 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3481 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
3482 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
3483 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
3484 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3485 /// ```
3486 #[inline]
3487 #[unstable(feature = "atomic_try_update", issue = "135894")]
3488 #[$cfg_cas]
3489 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3490 pub fn try_update(
3491 &self,
3492 set_order: Ordering,
3493 fetch_order: Ordering,
3494 f: impl FnMut($int_type) -> Option<$int_type>,
3495 ) -> Result<$int_type, $int_type> {
3496 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
3497 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
3498 self.fetch_update(set_order, fetch_order, f)
3499 }
3500
3501 /// Fetches the value, applies a function to it that it return a new value.
3502 /// The new value is stored and the old value is returned.
3503 ///
3504 #[doc = concat!("See also: [`try_update`](`", stringify!($atomic_type), "::try_update`).")]
3505 ///
3506 /// Note: This may call the function multiple times if the value has been changed from other threads in
3507 /// the meantime, but the function will have been applied only once to the stored value.
3508 ///
3509 /// `update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3510 /// The first describes the required ordering for when the operation finally succeeds while the second
3511 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3512 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3513 /// respectively.
3514 ///
3515 /// Using [`Acquire`] as success ordering makes the store part
3516 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3517 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3518 ///
3519 /// **Note**: This method is only available on platforms that support atomic operations on
3520 #[doc = concat!("[`", $s_int_type, "`].")]
3521 ///
3522 /// # Considerations
3523 ///
3524 /// [CAS operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3525 /// This method is not magic; it is not provided by the hardware, and does not act like a
3526 /// critical section or mutex.
3527 ///
3528 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3529 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3530 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3531 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3532 ///
3533 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3534 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3535 ///
3536 /// # Examples
3537 ///
3538 /// ```rust
3539 /// #![feature(atomic_try_update)]
3540 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3541 ///
3542 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3543 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 7);
3544 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 8);
3545 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3546 /// ```
3547 #[inline]
3548 #[unstable(feature = "atomic_try_update", issue = "135894")]
3549 #[$cfg_cas]
3550 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3551 pub fn update(
3552 &self,
3553 set_order: Ordering,
3554 fetch_order: Ordering,
3555 mut f: impl FnMut($int_type) -> $int_type,
3556 ) -> $int_type {
3557 let mut prev = self.load(fetch_order);
3558 loop {
3559 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
3560 Ok(x) => break x,
3561 Err(next_prev) => prev = next_prev,
3562 }
3563 }
3564 }
3565
3566 /// Maximum with the current value.
3567 ///
3568 /// Finds the maximum of the current value and the argument `val`, and
3569 /// sets the new value to the result.
3570 ///
3571 /// Returns the previous value.
3572 ///
3573 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
3574 /// of this operation. All ordering modes are possible. Note that using
3575 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3576 /// using [`Release`] makes the load part [`Relaxed`].
3577 ///
3578 /// **Note**: This method is only available on platforms that support atomic operations on
3579 #[doc = concat!("[`", $s_int_type, "`].")]
3580 ///
3581 /// # Examples
3582 ///
3583 /// ```
3584 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3585 ///
3586 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3587 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
3588 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
3589 /// ```
3590 ///
3591 /// If you want to obtain the maximum value in one step, you can use the following:
3592 ///
3593 /// ```
3594 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3595 ///
3596 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3597 /// let bar = 42;
3598 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
3599 /// assert!(max_foo == 42);
3600 /// ```
3601 #[inline]
3602 #[stable(feature = "atomic_min_max", since = "1.45.0")]
3603 #[$cfg_cas]
3604 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3605 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
3606 // SAFETY: data races are prevented by atomic intrinsics.
3607 unsafe { $max_fn(self.v.get(), val, order) }
3608 }
3609
3610 /// Minimum with the current value.
3611 ///
3612 /// Finds the minimum of the current value and the argument `val`, and
3613 /// sets the new value to the result.
3614 ///
3615 /// Returns the previous value.
3616 ///
3617 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
3618 /// of this operation. All ordering modes are possible. Note that using
3619 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3620 /// using [`Release`] makes the load part [`Relaxed`].
3621 ///
3622 /// **Note**: This method is only available on platforms that support atomic operations on
3623 #[doc = concat!("[`", $s_int_type, "`].")]
3624 ///
3625 /// # Examples
3626 ///
3627 /// ```
3628 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3629 ///
3630 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3631 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
3632 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
3633 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
3634 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
3635 /// ```
3636 ///
3637 /// If you want to obtain the minimum value in one step, you can use the following:
3638 ///
3639 /// ```
3640 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3641 ///
3642 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3643 /// let bar = 12;
3644 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
3645 /// assert_eq!(min_foo, 12);
3646 /// ```
3647 #[inline]
3648 #[stable(feature = "atomic_min_max", since = "1.45.0")]
3649 #[$cfg_cas]
3650 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3651 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
3652 // SAFETY: data races are prevented by atomic intrinsics.
3653 unsafe { $min_fn(self.v.get(), val, order) }
3654 }
3655
3656 /// Returns a mutable pointer to the underlying integer.
3657 ///
3658 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
3659 /// This method is mostly useful for FFI, where the function signature may use
3660 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
3661 ///
3662 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
3663 /// atomic types work with interior mutability. All modifications of an atomic change the value
3664 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
3665 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
3666 /// requirements of the [memory model].
3667 ///
3668 /// # Examples
3669 ///
3670 /// ```ignore (extern-declaration)
3671 /// # fn main() {
3672 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
3673 ///
3674 /// extern "C" {
3675 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
3676 /// }
3677 ///
3678 #[doc = concat!("let atomic = ", stringify!($atomic_type), "::new(1);")]
3679 ///
3680 /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
3681 /// unsafe {
3682 /// my_atomic_op(atomic.as_ptr());
3683 /// }
3684 /// # }
3685 /// ```
3686 ///
3687 /// [memory model]: self#memory-model-for-atomic-accesses
3688 #[inline]
3689 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
3690 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
3691 #[rustc_never_returns_null_ptr]
3692 pub const fn as_ptr(&self) -> *mut $int_type {
3693 self.v.get()
3694 }
3695 }
3696 }
3697}
3698
3699#[cfg(target_has_atomic_load_store = "8")]
3700#[cfg(not(feature = "ferrocene_certified"))]
3701atomic_int! {
3702 cfg(target_has_atomic = "8"),
3703 cfg(target_has_atomic_equal_alignment = "8"),
3704 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3705 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3706 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3707 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3708 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3709 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3710 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3711 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3712 rustc_diagnostic_item = "AtomicI8",
3713 "i8",
3714 "",
3715 atomic_min, atomic_max,
3716 1,
3717 i8 AtomicI8
3718}
3719#[cfg(target_has_atomic_load_store = "8")]
3720#[cfg(not(feature = "ferrocene_certified"))]
3721atomic_int! {
3722 cfg(target_has_atomic = "8"),
3723 cfg(target_has_atomic_equal_alignment = "8"),
3724 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3725 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3726 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3727 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3728 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3729 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3730 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3731 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3732 rustc_diagnostic_item = "AtomicU8",
3733 "u8",
3734 "",
3735 atomic_umin, atomic_umax,
3736 1,
3737 u8 AtomicU8
3738}
3739#[cfg(target_has_atomic_load_store = "16")]
3740#[cfg(not(feature = "ferrocene_certified"))]
3741atomic_int! {
3742 cfg(target_has_atomic = "16"),
3743 cfg(target_has_atomic_equal_alignment = "16"),
3744 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3745 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3746 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3747 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3748 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3749 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3750 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3751 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3752 rustc_diagnostic_item = "AtomicI16",
3753 "i16",
3754 "",
3755 atomic_min, atomic_max,
3756 2,
3757 i16 AtomicI16
3758}
3759#[cfg(target_has_atomic_load_store = "16")]
3760#[cfg(not(feature = "ferrocene_certified"))]
3761atomic_int! {
3762 cfg(target_has_atomic = "16"),
3763 cfg(target_has_atomic_equal_alignment = "16"),
3764 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3765 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3766 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3767 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3768 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3769 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3770 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3771 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3772 rustc_diagnostic_item = "AtomicU16",
3773 "u16",
3774 "",
3775 atomic_umin, atomic_umax,
3776 2,
3777 u16 AtomicU16
3778}
3779#[cfg(target_has_atomic_load_store = "32")]
3780#[cfg(not(feature = "ferrocene_certified"))]
3781atomic_int! {
3782 cfg(target_has_atomic = "32"),
3783 cfg(target_has_atomic_equal_alignment = "32"),
3784 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3785 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3786 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3787 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3788 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3789 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3790 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3791 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3792 rustc_diagnostic_item = "AtomicI32",
3793 "i32",
3794 "",
3795 atomic_min, atomic_max,
3796 4,
3797 i32 AtomicI32
3798}
3799#[cfg(target_has_atomic_load_store = "32")]
3800atomic_int! {
3801 cfg(target_has_atomic = "32"),
3802 cfg(target_has_atomic_equal_alignment = "32"),
3803 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3804 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3805 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3806 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3807 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3808 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3809 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3810 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3811 rustc_diagnostic_item = "AtomicU32",
3812 "u32",
3813 "",
3814 atomic_umin, atomic_umax,
3815 4,
3816 u32 AtomicU32
3817}
3818#[cfg(target_has_atomic_load_store = "64")]
3819#[cfg(not(feature = "ferrocene_certified"))]
3820atomic_int! {
3821 cfg(target_has_atomic = "64"),
3822 cfg(target_has_atomic_equal_alignment = "64"),
3823 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3824 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3825 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3826 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3827 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3828 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3829 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3830 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3831 rustc_diagnostic_item = "AtomicI64",
3832 "i64",
3833 "",
3834 atomic_min, atomic_max,
3835 8,
3836 i64 AtomicI64
3837}
3838#[cfg(target_has_atomic_load_store = "64")]
3839#[cfg(not(feature = "ferrocene_certified"))]
3840atomic_int! {
3841 cfg(target_has_atomic = "64"),
3842 cfg(target_has_atomic_equal_alignment = "64"),
3843 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3844 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3845 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3846 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3847 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3848 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3849 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3850 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3851 rustc_diagnostic_item = "AtomicU64",
3852 "u64",
3853 "",
3854 atomic_umin, atomic_umax,
3855 8,
3856 u64 AtomicU64
3857}
3858#[cfg(target_has_atomic_load_store = "128")]
3859#[cfg(not(feature = "ferrocene_certified"))]
3860atomic_int! {
3861 cfg(target_has_atomic = "128"),
3862 cfg(target_has_atomic_equal_alignment = "128"),
3863 unstable(feature = "integer_atomics", issue = "99069"),
3864 unstable(feature = "integer_atomics", issue = "99069"),
3865 unstable(feature = "integer_atomics", issue = "99069"),
3866 unstable(feature = "integer_atomics", issue = "99069"),
3867 unstable(feature = "integer_atomics", issue = "99069"),
3868 unstable(feature = "integer_atomics", issue = "99069"),
3869 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3870 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3871 rustc_diagnostic_item = "AtomicI128",
3872 "i128",
3873 "#![feature(integer_atomics)]\n\n",
3874 atomic_min, atomic_max,
3875 16,
3876 i128 AtomicI128
3877}
3878#[cfg(target_has_atomic_load_store = "128")]
3879#[cfg(not(feature = "ferrocene_certified"))]
3880atomic_int! {
3881 cfg(target_has_atomic = "128"),
3882 cfg(target_has_atomic_equal_alignment = "128"),
3883 unstable(feature = "integer_atomics", issue = "99069"),
3884 unstable(feature = "integer_atomics", issue = "99069"),
3885 unstable(feature = "integer_atomics", issue = "99069"),
3886 unstable(feature = "integer_atomics", issue = "99069"),
3887 unstable(feature = "integer_atomics", issue = "99069"),
3888 unstable(feature = "integer_atomics", issue = "99069"),
3889 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3890 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3891 rustc_diagnostic_item = "AtomicU128",
3892 "u128",
3893 "#![feature(integer_atomics)]\n\n",
3894 atomic_umin, atomic_umax,
3895 16,
3896 u128 AtomicU128
3897}
3898
3899#[cfg(target_has_atomic_load_store = "ptr")]
3900#[cfg(not(feature = "ferrocene_certified"))]
3901macro_rules! atomic_int_ptr_sized {
3902 ( $($target_pointer_width:literal $align:literal)* ) => { $(
3903 #[cfg(target_pointer_width = $target_pointer_width)]
3904 atomic_int! {
3905 cfg(target_has_atomic = "ptr"),
3906 cfg(target_has_atomic_equal_alignment = "ptr"),
3907 stable(feature = "rust1", since = "1.0.0"),
3908 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
3909 stable(feature = "atomic_debug", since = "1.3.0"),
3910 stable(feature = "atomic_access", since = "1.15.0"),
3911 stable(feature = "atomic_from", since = "1.23.0"),
3912 stable(feature = "atomic_nand", since = "1.27.0"),
3913 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
3914 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3915 rustc_diagnostic_item = "AtomicIsize",
3916 "isize",
3917 "",
3918 atomic_min, atomic_max,
3919 $align,
3920 isize AtomicIsize
3921 }
3922 #[cfg(target_pointer_width = $target_pointer_width)]
3923 atomic_int! {
3924 cfg(target_has_atomic = "ptr"),
3925 cfg(target_has_atomic_equal_alignment = "ptr"),
3926 stable(feature = "rust1", since = "1.0.0"),
3927 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
3928 stable(feature = "atomic_debug", since = "1.3.0"),
3929 stable(feature = "atomic_access", since = "1.15.0"),
3930 stable(feature = "atomic_from", since = "1.23.0"),
3931 stable(feature = "atomic_nand", since = "1.27.0"),
3932 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
3933 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3934 rustc_diagnostic_item = "AtomicUsize",
3935 "usize",
3936 "",
3937 atomic_umin, atomic_umax,
3938 $align,
3939 usize AtomicUsize
3940 }
3941
3942 /// An [`AtomicIsize`] initialized to `0`.
3943 #[cfg(target_pointer_width = $target_pointer_width)]
3944 #[stable(feature = "rust1", since = "1.0.0")]
3945 #[deprecated(
3946 since = "1.34.0",
3947 note = "the `new` function is now preferred",
3948 suggestion = "AtomicIsize::new(0)",
3949 )]
3950 pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
3951
3952 /// An [`AtomicUsize`] initialized to `0`.
3953 #[cfg(target_pointer_width = $target_pointer_width)]
3954 #[stable(feature = "rust1", since = "1.0.0")]
3955 #[deprecated(
3956 since = "1.34.0",
3957 note = "the `new` function is now preferred",
3958 suggestion = "AtomicUsize::new(0)",
3959 )]
3960 pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
3961 )* };
3962}
3963
3964#[cfg(target_has_atomic_load_store = "ptr")]
3965#[cfg(not(feature = "ferrocene_certified"))]
3966atomic_int_ptr_sized! {
3967 "16" 2
3968 "32" 4
3969 "64" 8
3970}
3971
3972#[cfg(not(feature = "ferrocene_certified"))]
3973#[inline]
3974#[cfg(target_has_atomic)]
3975fn strongest_failure_ordering(order: Ordering) -> Ordering {
3976 match order {
3977 Release => Relaxed,
3978 Relaxed => Relaxed,
3979 SeqCst => SeqCst,
3980 Acquire => Acquire,
3981 AcqRel => Acquire,
3982 }
3983}
3984
3985#[inline]
3986#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3987unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
3988 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
3989 unsafe {
3990 match order {
3991 Relaxed => intrinsics::atomic_store::<T, { AO::Relaxed }>(dst, val),
3992 Release => intrinsics::atomic_store::<T, { AO::Release }>(dst, val),
3993 SeqCst => intrinsics::atomic_store::<T, { AO::SeqCst }>(dst, val),
3994 Acquire => panic!("there is no such thing as an acquire store"),
3995 AcqRel => panic!("there is no such thing as an acquire-release store"),
3996 }
3997 }
3998}
3999
4000#[inline]
4001#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4002unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
4003 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
4004 unsafe {
4005 match order {
4006 Relaxed => intrinsics::atomic_load::<T, { AO::Relaxed }>(dst),
4007 Acquire => intrinsics::atomic_load::<T, { AO::Acquire }>(dst),
4008 SeqCst => intrinsics::atomic_load::<T, { AO::SeqCst }>(dst),
4009 Release => panic!("there is no such thing as a release load"),
4010 AcqRel => panic!("there is no such thing as an acquire-release load"),
4011 }
4012 }
4013}
4014
4015#[inline]
4016#[cfg(target_has_atomic)]
4017#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4018unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4019 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
4020 unsafe {
4021 match order {
4022 Relaxed => intrinsics::atomic_xchg::<T, { AO::Relaxed }>(dst, val),
4023 Acquire => intrinsics::atomic_xchg::<T, { AO::Acquire }>(dst, val),
4024 Release => intrinsics::atomic_xchg::<T, { AO::Release }>(dst, val),
4025 AcqRel => intrinsics::atomic_xchg::<T, { AO::AcqRel }>(dst, val),
4026 SeqCst => intrinsics::atomic_xchg::<T, { AO::SeqCst }>(dst, val),
4027 }
4028 }
4029}
4030
4031/// Returns the previous value (like __sync_fetch_and_add).
4032#[inline]
4033#[cfg(target_has_atomic)]
4034#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4035unsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4036 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
4037 unsafe {
4038 match order {
4039 Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
4040 Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
4041 Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
4042 AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
4043 SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
4044 }
4045 }
4046}
4047
4048/// Returns the previous value (like __sync_fetch_and_sub).
4049#[inline]
4050#[cfg(target_has_atomic)]
4051#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4052unsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4053 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
4054 unsafe {
4055 match order {
4056 Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
4057 Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
4058 Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
4059 AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
4060 SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
4061 }
4062 }
4063}
4064
4065/// Publicly exposed for stdarch; nobody else should use this.
4066#[inline]
4067#[cfg(target_has_atomic)]
4068#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4069#[unstable(feature = "core_intrinsics", issue = "none")]
4070#[doc(hidden)]
4071pub unsafe fn atomic_compare_exchange<T: Copy>(
4072 dst: *mut T,
4073 old: T,
4074 new: T,
4075 success: Ordering,
4076 failure: Ordering,
4077) -> Result<T, T> {
4078 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
4079 let (val, ok) = unsafe {
4080 match (success, failure) {
4081 (Relaxed, Relaxed) => {
4082 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
4083 }
4084 (Relaxed, Acquire) => {
4085 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
4086 }
4087 (Relaxed, SeqCst) => {
4088 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
4089 }
4090 (Acquire, Relaxed) => {
4091 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
4092 }
4093 (Acquire, Acquire) => {
4094 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
4095 }
4096 (Acquire, SeqCst) => {
4097 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
4098 }
4099 (Release, Relaxed) => {
4100 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
4101 }
4102 (Release, Acquire) => {
4103 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
4104 }
4105 (Release, SeqCst) => {
4106 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
4107 }
4108 (AcqRel, Relaxed) => {
4109 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
4110 }
4111 (AcqRel, Acquire) => {
4112 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
4113 }
4114 (AcqRel, SeqCst) => {
4115 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
4116 }
4117 (SeqCst, Relaxed) => {
4118 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
4119 }
4120 (SeqCst, Acquire) => {
4121 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
4122 }
4123 (SeqCst, SeqCst) => {
4124 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
4125 }
4126 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
4127 (_, Release) => panic!("there is no such thing as a release failure ordering"),
4128 }
4129 };
4130 // Ferrocene annotation: Both branches of this conditional are covered, which means that the
4131 // `ok` boolean must have been evaluated in either case.
4132 if ok {
4133 //
4134 Ok(val)
4135 } else {
4136 //
4137 Err(val)
4138 }
4139}
4140
4141#[inline]
4142#[cfg(target_has_atomic)]
4143#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4144unsafe fn atomic_compare_exchange_weak<T: Copy>(
4145 dst: *mut T,
4146 old: T,
4147 new: T,
4148 success: Ordering,
4149 failure: Ordering,
4150) -> Result<T, T> {
4151 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
4152 let (val, ok) = unsafe {
4153 match (success, failure) {
4154 (Relaxed, Relaxed) => {
4155 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
4156 }
4157 (Relaxed, Acquire) => {
4158 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
4159 }
4160 (Relaxed, SeqCst) => {
4161 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
4162 }
4163 (Acquire, Relaxed) => {
4164 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
4165 }
4166 (Acquire, Acquire) => {
4167 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
4168 }
4169 (Acquire, SeqCst) => {
4170 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
4171 }
4172 (Release, Relaxed) => {
4173 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
4174 }
4175 (Release, Acquire) => {
4176 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
4177 }
4178 (Release, SeqCst) => {
4179 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
4180 }
4181 (AcqRel, Relaxed) => {
4182 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
4183 }
4184 (AcqRel, Acquire) => {
4185 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
4186 }
4187 (AcqRel, SeqCst) => {
4188 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
4189 }
4190 (SeqCst, Relaxed) => {
4191 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
4192 }
4193 (SeqCst, Acquire) => {
4194 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
4195 }
4196 (SeqCst, SeqCst) => {
4197 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
4198 }
4199 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
4200 (_, Release) => panic!("there is no such thing as a release failure ordering"),
4201 }
4202 };
4203 // Ferrocene annotation: Both branches of this conditional are covered, which means that the
4204 // `ok` boolean must have been evaluated in either case.
4205 if ok {
4206 //
4207 Ok(val)
4208 } else {
4209 //
4210 Err(val)
4211 }
4212}
4213
4214#[inline]
4215#[cfg(target_has_atomic)]
4216#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4217unsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4218 // SAFETY: the caller must uphold the safety contract for `atomic_and`
4219 unsafe {
4220 match order {
4221 Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
4222 Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
4223 Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
4224 AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
4225 SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
4226 }
4227 }
4228}
4229
4230#[inline]
4231#[cfg(target_has_atomic)]
4232#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4233unsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4234 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
4235 unsafe {
4236 match order {
4237 Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
4238 Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
4239 Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
4240 AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
4241 SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
4242 }
4243 }
4244}
4245
4246#[inline]
4247#[cfg(target_has_atomic)]
4248#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4249unsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4250 // SAFETY: the caller must uphold the safety contract for `atomic_or`
4251 unsafe {
4252 match order {
4253 SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
4254 Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
4255 Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
4256 AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
4257 Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
4258 }
4259 }
4260}
4261
4262#[inline]
4263#[cfg(target_has_atomic)]
4264#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4265unsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4266 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
4267 unsafe {
4268 match order {
4269 SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
4270 Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
4271 Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
4272 AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
4273 Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
4274 }
4275 }
4276}
4277
4278/// Updates `*dst` to the max value of `val` and the old value (signed comparison)
4279#[inline]
4280#[cfg(target_has_atomic)]
4281#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4282#[cfg(not(feature = "ferrocene_certified"))]
4283unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4284 // SAFETY: the caller must uphold the safety contract for `atomic_max`
4285 unsafe {
4286 match order {
4287 Relaxed => intrinsics::atomic_max::<T, { AO::Relaxed }>(dst, val),
4288 Acquire => intrinsics::atomic_max::<T, { AO::Acquire }>(dst, val),
4289 Release => intrinsics::atomic_max::<T, { AO::Release }>(dst, val),
4290 AcqRel => intrinsics::atomic_max::<T, { AO::AcqRel }>(dst, val),
4291 SeqCst => intrinsics::atomic_max::<T, { AO::SeqCst }>(dst, val),
4292 }
4293 }
4294}
4295
4296/// Updates `*dst` to the min value of `val` and the old value (signed comparison)
4297#[inline]
4298#[cfg(target_has_atomic)]
4299#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4300#[cfg(not(feature = "ferrocene_certified"))]
4301unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4302 // SAFETY: the caller must uphold the safety contract for `atomic_min`
4303 unsafe {
4304 match order {
4305 Relaxed => intrinsics::atomic_min::<T, { AO::Relaxed }>(dst, val),
4306 Acquire => intrinsics::atomic_min::<T, { AO::Acquire }>(dst, val),
4307 Release => intrinsics::atomic_min::<T, { AO::Release }>(dst, val),
4308 AcqRel => intrinsics::atomic_min::<T, { AO::AcqRel }>(dst, val),
4309 SeqCst => intrinsics::atomic_min::<T, { AO::SeqCst }>(dst, val),
4310 }
4311 }
4312}
4313
4314/// Updates `*dst` to the max value of `val` and the old value (unsigned comparison)
4315#[inline]
4316#[cfg(target_has_atomic)]
4317#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4318unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4319 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
4320 unsafe {
4321 match order {
4322 Relaxed => intrinsics::atomic_umax::<T, { AO::Relaxed }>(dst, val),
4323 Acquire => intrinsics::atomic_umax::<T, { AO::Acquire }>(dst, val),
4324 Release => intrinsics::atomic_umax::<T, { AO::Release }>(dst, val),
4325 AcqRel => intrinsics::atomic_umax::<T, { AO::AcqRel }>(dst, val),
4326 SeqCst => intrinsics::atomic_umax::<T, { AO::SeqCst }>(dst, val),
4327 }
4328 }
4329}
4330
4331/// Updates `*dst` to the min value of `val` and the old value (unsigned comparison)
4332#[inline]
4333#[cfg(target_has_atomic)]
4334#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4335unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4336 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
4337 unsafe {
4338 match order {
4339 Relaxed => intrinsics::atomic_umin::<T, { AO::Relaxed }>(dst, val),
4340 Acquire => intrinsics::atomic_umin::<T, { AO::Acquire }>(dst, val),
4341 Release => intrinsics::atomic_umin::<T, { AO::Release }>(dst, val),
4342 AcqRel => intrinsics::atomic_umin::<T, { AO::AcqRel }>(dst, val),
4343 SeqCst => intrinsics::atomic_umin::<T, { AO::SeqCst }>(dst, val),
4344 }
4345 }
4346}
4347
4348/// An atomic fence.
4349///
4350/// Fences create synchronization between themselves and atomic operations or fences in other
4351/// threads. To achieve this, a fence prevents the compiler and CPU from reordering certain types of
4352/// memory operations around it.
4353///
4354/// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
4355/// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
4356/// exist operations X and Y, both operating on some atomic object 'm' such
4357/// that A is sequenced before X, Y is sequenced before B and Y observes
4358/// the change to m. This provides a happens-before dependence between A and B.
4359///
4360/// ```text
4361/// Thread 1 Thread 2
4362///
4363/// fence(Release); A --------------
4364/// m.store(3, Relaxed); X --------- |
4365/// | |
4366/// | |
4367/// -------------> Y if m.load(Relaxed) == 3 {
4368/// |-------> B fence(Acquire);
4369/// ...
4370/// }
4371/// ```
4372///
4373/// Note that in the example above, it is crucial that the accesses to `m` are atomic. Fences cannot
4374/// be used to establish synchronization among non-atomic accesses in different threads. However,
4375/// thanks to the happens-before relationship between A and B, any non-atomic accesses that
4376/// happen-before A are now also properly synchronized with any non-atomic accesses that
4377/// happen-after B.
4378///
4379/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
4380/// with a fence.
4381///
4382/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
4383/// and [`Release`] semantics, participates in the global program order of the
4384/// other [`SeqCst`] operations and/or fences.
4385///
4386/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
4387///
4388/// # Panics
4389///
4390/// Panics if `order` is [`Relaxed`].
4391///
4392/// # Examples
4393///
4394/// ```
4395/// use std::sync::atomic::AtomicBool;
4396/// use std::sync::atomic::fence;
4397/// use std::sync::atomic::Ordering;
4398///
4399/// // A mutual exclusion primitive based on spinlock.
4400/// pub struct Mutex {
4401/// flag: AtomicBool,
4402/// }
4403///
4404/// impl Mutex {
4405/// pub fn new() -> Mutex {
4406/// Mutex {
4407/// flag: AtomicBool::new(false),
4408/// }
4409/// }
4410///
4411/// pub fn lock(&self) {
4412/// // Wait until the old value is `false`.
4413/// while self
4414/// .flag
4415/// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
4416/// .is_err()
4417/// {}
4418/// // This fence synchronizes-with store in `unlock`.
4419/// fence(Ordering::Acquire);
4420/// }
4421///
4422/// pub fn unlock(&self) {
4423/// self.flag.store(false, Ordering::Release);
4424/// }
4425/// }
4426/// ```
4427#[inline]
4428#[stable(feature = "rust1", since = "1.0.0")]
4429#[rustc_diagnostic_item = "fence"]
4430#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4431#[cfg(not(feature = "ferrocene_certified"))]
4432pub fn fence(order: Ordering) {
4433 // SAFETY: using an atomic fence is safe.
4434 unsafe {
4435 match order {
4436 Acquire => intrinsics::atomic_fence::<{ AO::Acquire }>(),
4437 Release => intrinsics::atomic_fence::<{ AO::Release }>(),
4438 AcqRel => intrinsics::atomic_fence::<{ AO::AcqRel }>(),
4439 SeqCst => intrinsics::atomic_fence::<{ AO::SeqCst }>(),
4440 Relaxed => panic!("there is no such thing as a relaxed fence"),
4441 }
4442 }
4443}
4444
4445/// A "compiler-only" atomic fence.
4446///
4447/// Like [`fence`], this function establishes synchronization with other atomic operations and
4448/// fences. However, unlike [`fence`], `compiler_fence` only establishes synchronization with
4449/// operations *in the same thread*. This may at first sound rather useless, since code within a
4450/// thread is typically already totally ordered and does not need any further synchronization.
4451/// However, there are cases where code can run on the same thread without being ordered:
4452/// - The most common case is that of a *signal handler*: a signal handler runs in the same thread
4453/// as the code it interrupted, but it is not ordered with respect to that code. `compiler_fence`
4454/// can be used to establish synchronization between a thread and its signal handler, the same way
4455/// that `fence` can be used to establish synchronization across threads.
4456/// - Similar situations can arise in embedded programming with interrupt handlers, or in custom
4457/// implementations of preemptive green threads. In general, `compiler_fence` can establish
4458/// synchronization with code that is guaranteed to run on the same hardware CPU.
4459///
4460/// See [`fence`] for how a fence can be used to achieve synchronization. Note that just like
4461/// [`fence`], synchronization still requires atomic operations to be used in both threads -- it is
4462/// not possible to perform synchronization entirely with fences and non-atomic operations.
4463///
4464/// `compiler_fence` does not emit any machine code, but restricts the kinds of memory re-ordering
4465/// the compiler is allowed to do. `compiler_fence` corresponds to [`atomic_signal_fence`] in C and
4466/// C++.
4467///
4468/// [`atomic_signal_fence`]: https://en.cppreference.com/w/cpp/atomic/atomic_signal_fence
4469///
4470/// # Panics
4471///
4472/// Panics if `order` is [`Relaxed`].
4473///
4474/// # Examples
4475///
4476/// Without the two `compiler_fence` calls, the read of `IMPORTANT_VARIABLE` in `signal_handler`
4477/// is *undefined behavior* due to a data race, despite everything happening in a single thread.
4478/// This is because the signal handler is considered to run concurrently with its associated
4479/// thread, and explicit synchronization is required to pass data between a thread and its
4480/// signal handler. The code below uses two `compiler_fence` calls to establish the usual
4481/// release-acquire synchronization pattern (see [`fence`] for an image).
4482///
4483/// ```
4484/// use std::sync::atomic::AtomicBool;
4485/// use std::sync::atomic::Ordering;
4486/// use std::sync::atomic::compiler_fence;
4487///
4488/// static mut IMPORTANT_VARIABLE: usize = 0;
4489/// static IS_READY: AtomicBool = AtomicBool::new(false);
4490///
4491/// fn main() {
4492/// unsafe { IMPORTANT_VARIABLE = 42 };
4493/// // Marks earlier writes as being released with future relaxed stores.
4494/// compiler_fence(Ordering::Release);
4495/// IS_READY.store(true, Ordering::Relaxed);
4496/// }
4497///
4498/// fn signal_handler() {
4499/// if IS_READY.load(Ordering::Relaxed) {
4500/// // Acquires writes that were released with relaxed stores that we read from.
4501/// compiler_fence(Ordering::Acquire);
4502/// assert_eq!(unsafe { IMPORTANT_VARIABLE }, 42);
4503/// }
4504/// }
4505/// ```
4506#[inline]
4507#[stable(feature = "compiler_fences", since = "1.21.0")]
4508#[rustc_diagnostic_item = "compiler_fence"]
4509#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4510#[cfg(not(feature = "ferrocene_certified"))]
4511pub fn compiler_fence(order: Ordering) {
4512 // SAFETY: using an atomic fence is safe.
4513 unsafe {
4514 match order {
4515 Acquire => intrinsics::atomic_singlethreadfence::<{ AO::Acquire }>(),
4516 Release => intrinsics::atomic_singlethreadfence::<{ AO::Release }>(),
4517 AcqRel => intrinsics::atomic_singlethreadfence::<{ AO::AcqRel }>(),
4518 SeqCst => intrinsics::atomic_singlethreadfence::<{ AO::SeqCst }>(),
4519 Relaxed => panic!("there is no such thing as a relaxed fence"),
4520 }
4521 }
4522}
4523
4524#[cfg(target_has_atomic_load_store = "8")]
4525#[stable(feature = "atomic_debug", since = "1.3.0")]
4526#[cfg(not(feature = "ferrocene_certified"))]
4527impl fmt::Debug for AtomicBool {
4528 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4529 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
4530 }
4531}
4532
4533#[cfg(target_has_atomic_load_store = "ptr")]
4534#[stable(feature = "atomic_debug", since = "1.3.0")]
4535#[cfg(not(feature = "ferrocene_certified"))]
4536impl<T> fmt::Debug for AtomicPtr<T> {
4537 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4538 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
4539 }
4540}
4541
4542#[cfg(target_has_atomic_load_store = "ptr")]
4543#[stable(feature = "atomic_pointer", since = "1.24.0")]
4544#[cfg(not(feature = "ferrocene_certified"))]
4545impl<T> fmt::Pointer for AtomicPtr<T> {
4546 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4547 fmt::Pointer::fmt(&self.load(Ordering::Relaxed), f)
4548 }
4549}
4550
4551/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
4552///
4553/// This function is deprecated in favor of [`hint::spin_loop`].
4554///
4555/// [`hint::spin_loop`]: crate::hint::spin_loop
4556#[inline]
4557#[stable(feature = "spin_loop_hint", since = "1.24.0")]
4558#[deprecated(since = "1.51.0", note = "use hint::spin_loop instead")]
4559#[cfg(not(feature = "ferrocene_certified"))]
4560pub fn spin_loop_hint() {
4561 spin_loop()
4562}