alloc/
sync.rs

1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12use core::cell::CloneFromCell;
13#[cfg(not(no_global_oom_handling))]
14use core::clone::TrivialClone;
15use core::clone::{CloneToUninit, UseCloned};
16use core::cmp::Ordering;
17use core::hash::{Hash, Hasher};
18use core::intrinsics::abort;
19#[cfg(not(no_global_oom_handling))]
20use core::iter;
21use core::marker::{PhantomData, Unsize};
22use core::mem::{self, ManuallyDrop, align_of_val_raw};
23use core::num::NonZeroUsize;
24use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
25#[cfg(not(no_global_oom_handling))]
26use core::ops::{Residual, Try};
27use core::panic::{RefUnwindSafe, UnwindSafe};
28use core::pin::{Pin, PinCoerceUnsized};
29use core::ptr::{self, NonNull};
30#[cfg(not(no_global_oom_handling))]
31use core::slice::from_raw_parts_mut;
32use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
33use core::sync::atomic::{self, Atomic};
34use core::{borrow, fmt, hint};
35
36#[cfg(not(no_global_oom_handling))]
37use crate::alloc::handle_alloc_error;
38use crate::alloc::{AllocError, Allocator, Global, Layout};
39use crate::borrow::{Cow, ToOwned};
40use crate::boxed::Box;
41use crate::rc::is_dangling;
42#[cfg(not(no_global_oom_handling))]
43use crate::string::String;
44#[cfg(not(no_global_oom_handling))]
45use crate::vec::Vec;
46
47/// A soft limit on the amount of references that may be made to an `Arc`.
48///
49/// Going above this limit will abort your program (although not
50/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
51/// Trying to go above it might call a `panic` (if not actually going above it).
52///
53/// This is a global invariant, and also applies when using a compare-exchange loop.
54///
55/// See comment in `Arc::clone`.
56const MAX_REFCOUNT: usize = (isize::MAX) as usize;
57
58/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
59#[cfg_attr(feature = "ferrocene_certified_runtime", expect(dead_code))]
60const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
61
62#[cfg(not(sanitize = "thread"))]
63macro_rules! acquire {
64    ($x:expr) => {
65        atomic::fence(Acquire)
66    };
67}
68
69// ThreadSanitizer does not support memory fences. To avoid false positive
70// reports in Arc / Weak implementation use atomic loads for synchronization
71// instead.
72#[cfg(sanitize = "thread")]
73macro_rules! acquire {
74    ($x:expr) => {
75        $x.load(Acquire)
76    };
77}
78
79/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
80/// Reference Counted'.
81///
82/// The type `Arc<T>` provides shared ownership of a value of type `T`,
83/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
84/// a new `Arc` instance, which points to the same allocation on the heap as the
85/// source `Arc`, while increasing a reference count. When the last `Arc`
86/// pointer to a given allocation is destroyed, the value stored in that allocation (often
87/// referred to as "inner value") is also dropped.
88///
89/// Shared references in Rust disallow mutation by default, and `Arc` is no
90/// exception: you cannot generally obtain a mutable reference to something
91/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
92///
93/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
94///    [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
95///
96/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
97///    without requiring interior mutability. This approach clones the data only when
98///    needed (when there are multiple references) and can be more efficient when mutations
99///    are infrequent.
100///
101/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
102///    which provides direct mutable access to the inner value without any cloning.
103///
104/// ```
105/// use std::sync::Arc;
106///
107/// let mut data = Arc::new(vec![1, 2, 3]);
108///
109/// // This will clone the vector only if there are other references to it
110/// Arc::make_mut(&mut data).push(4);
111///
112/// assert_eq!(*data, vec![1, 2, 3, 4]);
113/// ```
114///
115/// **Note**: This type is only available on platforms that support atomic
116/// loads and stores of pointers, which includes all platforms that support
117/// the `std` crate but not all those which only support [`alloc`](crate).
118/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
119///
120/// ## Thread Safety
121///
122/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
123/// counting. This means that it is thread-safe. The disadvantage is that
124/// atomic operations are more expensive than ordinary memory accesses. If you
125/// are not sharing reference-counted allocations between threads, consider using
126/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
127/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
128/// However, a library might choose `Arc<T>` in order to give library consumers
129/// more flexibility.
130///
131/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
132/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
133/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
134/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
135/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
136/// data, but it  doesn't add thread safety to its data. Consider
137/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
138/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
139/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
140/// non-atomic operations.
141///
142/// In the end, this means that you may need to pair `Arc<T>` with some sort of
143/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
144///
145/// ## Breaking cycles with `Weak`
146///
147/// The [`downgrade`][downgrade] method can be used to create a non-owning
148/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
149/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
150/// already been dropped. In other words, `Weak` pointers do not keep the value
151/// inside the allocation alive; however, they *do* keep the allocation
152/// (the backing store for the value) alive.
153///
154/// A cycle between `Arc` pointers will never be deallocated. For this reason,
155/// [`Weak`] is used to break cycles. For example, a tree could have
156/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
157/// pointers from children back to their parents.
158///
159/// # Cloning references
160///
161/// Creating a new reference from an existing reference-counted pointer is done using the
162/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
163///
164/// ```
165/// use std::sync::Arc;
166/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
167/// // The two syntaxes below are equivalent.
168/// let a = foo.clone();
169/// let b = Arc::clone(&foo);
170/// // a, b, and foo are all Arcs that point to the same memory location
171/// ```
172///
173/// ## `Deref` behavior
174///
175/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
176/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
177/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
178/// functions, called using [fully qualified syntax]:
179///
180/// ```
181/// use std::sync::Arc;
182///
183/// let my_arc = Arc::new(());
184/// let my_weak = Arc::downgrade(&my_arc);
185/// ```
186///
187/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
188/// fully qualified syntax. Some people prefer to use fully qualified syntax,
189/// while others prefer using method-call syntax.
190///
191/// ```
192/// use std::sync::Arc;
193///
194/// let arc = Arc::new(());
195/// // Method-call syntax
196/// let arc2 = arc.clone();
197/// // Fully qualified syntax
198/// let arc3 = Arc::clone(&arc);
199/// ```
200///
201/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
202/// already been dropped.
203///
204/// [`Rc<T>`]: crate::rc::Rc
205/// [clone]: Clone::clone
206/// [mutex]: ../../std/sync/struct.Mutex.html
207/// [rwlock]: ../../std/sync/struct.RwLock.html
208/// [atomic]: core::sync::atomic
209/// [downgrade]: Arc::downgrade
210/// [upgrade]: Weak::upgrade
211/// [RefCell\<T>]: core::cell::RefCell
212/// [`RefCell<T>`]: core::cell::RefCell
213/// [`std::sync`]: ../../std/sync/index.html
214/// [`Arc::clone(&from)`]: Arc::clone
215/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
216///
217/// # Examples
218///
219/// Sharing some immutable data between threads:
220///
221/// ```
222/// use std::sync::Arc;
223/// use std::thread;
224///
225/// let five = Arc::new(5);
226///
227/// for _ in 0..10 {
228///     let five = Arc::clone(&five);
229///
230///     thread::spawn(move || {
231///         println!("{five:?}");
232///     });
233/// }
234/// ```
235///
236/// Sharing a mutable [`AtomicUsize`]:
237///
238/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
239///
240/// ```
241/// use std::sync::Arc;
242/// use std::sync::atomic::{AtomicUsize, Ordering};
243/// use std::thread;
244///
245/// let val = Arc::new(AtomicUsize::new(5));
246///
247/// for _ in 0..10 {
248///     let val = Arc::clone(&val);
249///
250///     thread::spawn(move || {
251///         let v = val.fetch_add(1, Ordering::Relaxed);
252///         println!("{v:?}");
253///     });
254/// }
255/// ```
256///
257/// See the [`rc` documentation][rc_examples] for more examples of reference
258/// counting in general.
259///
260/// [rc_examples]: crate::rc#examples
261#[doc(search_unbox)]
262#[rustc_diagnostic_item = "Arc"]
263#[stable(feature = "rust1", since = "1.0.0")]
264#[rustc_insignificant_dtor]
265pub struct Arc<
266    T: ?Sized,
267    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
268> {
269    ptr: NonNull<ArcInner<T>>,
270    phantom: PhantomData<ArcInner<T>>,
271    alloc: A,
272}
273
274#[stable(feature = "rust1", since = "1.0.0")]
275unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
276#[stable(feature = "rust1", since = "1.0.0")]
277unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
278
279#[stable(feature = "catch_unwind", since = "1.9.0")]
280impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
281
282#[unstable(feature = "coerce_unsized", issue = "18598")]
283impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
284
285#[unstable(feature = "dispatch_from_dyn", issue = "none")]
286impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
287
288// SAFETY: `Arc::clone` doesn't access any `Cell`s which could contain the `Arc` being cloned.
289#[unstable(feature = "cell_get_cloned", issue = "145329")]
290unsafe impl<T: ?Sized> CloneFromCell for Arc<T> {}
291
292impl<T: ?Sized> Arc<T> {
293    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
294        unsafe { Self::from_inner_in(ptr, Global) }
295    }
296
297    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
298        unsafe { Self::from_ptr_in(ptr, Global) }
299    }
300}
301
302impl<T: ?Sized, A: Allocator> Arc<T, A> {
303    #[inline]
304    fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
305        let this = mem::ManuallyDrop::new(this);
306        (this.ptr, unsafe { ptr::read(&this.alloc) })
307    }
308
309    #[inline]
310    unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
311        Self { ptr, phantom: PhantomData, alloc }
312    }
313
314    #[inline]
315    unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
316        unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
317    }
318}
319
320/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
321/// managed allocation.
322///
323/// The allocation is accessed by calling [`upgrade`] on the `Weak`
324/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
325///
326/// Since a `Weak` reference does not count towards ownership, it will not
327/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
328/// guarantees about the value still being present. Thus it may return [`None`]
329/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
330/// itself (the backing store) from being deallocated.
331///
332/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
333/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
334/// prevent circular references between [`Arc`] pointers, since mutual owning references
335/// would never allow either [`Arc`] to be dropped. For example, a tree could
336/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
337/// pointers from children back to their parents.
338///
339/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
340///
341/// [`upgrade`]: Weak::upgrade
342#[stable(feature = "arc_weak", since = "1.4.0")]
343#[rustc_diagnostic_item = "ArcWeak"]
344pub struct Weak<
345    T: ?Sized,
346    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
347> {
348    // This is a `NonNull` to allow optimizing the size of this type in enums,
349    // but it is not necessarily a valid pointer.
350    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
351    // to allocate space on the heap. That's not a value a real pointer
352    // will ever have because ArcInner has alignment at least 2.
353    ptr: NonNull<ArcInner<T>>,
354    alloc: A,
355}
356
357#[stable(feature = "arc_weak", since = "1.4.0")]
358unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
359#[stable(feature = "arc_weak", since = "1.4.0")]
360unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
361
362#[unstable(feature = "coerce_unsized", issue = "18598")]
363impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
364#[unstable(feature = "dispatch_from_dyn", issue = "none")]
365impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
366
367// SAFETY: `Weak::clone` doesn't access any `Cell`s which could contain the `Weak` being cloned.
368#[unstable(feature = "cell_get_cloned", issue = "145329")]
369unsafe impl<T: ?Sized> CloneFromCell for Weak<T> {}
370
371#[stable(feature = "arc_weak", since = "1.4.0")]
372impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
373    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
374        write!(f, "(Weak)")
375    }
376}
377
378// This is repr(C) to future-proof against possible field-reordering, which
379// would interfere with otherwise safe [into|from]_raw() of transmutable
380// inner types.
381// Unlike RcInner, repr(align(2)) is not strictly required because atomic types
382// have the alignment same as its size, but we use it for consistency and clarity.
383#[repr(C, align(2))]
384struct ArcInner<T: ?Sized> {
385    strong: Atomic<usize>,
386
387    // the value usize::MAX acts as a sentinel for temporarily "locking" the
388    // ability to upgrade weak pointers or downgrade strong ones; this is used
389    // to avoid races in `make_mut` and `get_mut`.
390    weak: Atomic<usize>,
391
392    data: T,
393}
394
395/// Calculate layout for `ArcInner<T>` using the inner value's layout
396fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
397    // Calculate layout using the given value layout.
398    // Previously, layout was calculated on the expression
399    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
400    // reference (see #54908).
401    Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
402}
403
404unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
405unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
406
407impl<T> Arc<T> {
408    /// Constructs a new `Arc<T>`.
409    ///
410    /// # Examples
411    ///
412    /// ```
413    /// use std::sync::Arc;
414    ///
415    /// let five = Arc::new(5);
416    /// ```
417    #[cfg(not(no_global_oom_handling))]
418    #[inline]
419    #[stable(feature = "rust1", since = "1.0.0")]
420    pub fn new(data: T) -> Arc<T> {
421        // Start the weak pointer count as 1 which is the weak pointer that's
422        // held by all the strong pointers (kinda), see std/rc.rs for more info
423        let x: Box<_> = Box::new(ArcInner {
424            strong: atomic::AtomicUsize::new(1),
425            weak: atomic::AtomicUsize::new(1),
426            data,
427        });
428        unsafe { Self::from_inner(Box::leak(x).into()) }
429    }
430
431    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
432    /// to allow you to construct a `T` which holds a weak pointer to itself.
433    ///
434    /// Generally, a structure circularly referencing itself, either directly or
435    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
436    /// Using this function, you get access to the weak pointer during the
437    /// initialization of `T`, before the `Arc<T>` is created, such that you can
438    /// clone and store it inside the `T`.
439    ///
440    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
441    /// then calls your closure, giving it a `Weak<T>` to this allocation,
442    /// and only afterwards completes the construction of the `Arc<T>` by placing
443    /// the `T` returned from your closure into the allocation.
444    ///
445    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
446    /// returns, calling [`upgrade`] on the weak reference inside your closure will
447    /// fail and result in a `None` value.
448    ///
449    /// # Panics
450    ///
451    /// If `data_fn` panics, the panic is propagated to the caller, and the
452    /// temporary [`Weak<T>`] is dropped normally.
453    ///
454    /// # Example
455    ///
456    /// ```
457    /// # #![allow(dead_code)]
458    /// use std::sync::{Arc, Weak};
459    ///
460    /// struct Gadget {
461    ///     me: Weak<Gadget>,
462    /// }
463    ///
464    /// impl Gadget {
465    ///     /// Constructs a reference counted Gadget.
466    ///     fn new() -> Arc<Self> {
467    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
468    ///         // `Arc` we're constructing.
469    ///         Arc::new_cyclic(|me| {
470    ///             // Create the actual struct here.
471    ///             Gadget { me: me.clone() }
472    ///         })
473    ///     }
474    ///
475    ///     /// Returns a reference counted pointer to Self.
476    ///     fn me(&self) -> Arc<Self> {
477    ///         self.me.upgrade().unwrap()
478    ///     }
479    /// }
480    /// ```
481    /// [`upgrade`]: Weak::upgrade
482    #[cfg(not(no_global_oom_handling))]
483    #[inline]
484    #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
485    pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
486    where
487        F: FnOnce(&Weak<T>) -> T,
488    {
489        Self::new_cyclic_in(data_fn, Global)
490    }
491
492    /// Constructs a new `Arc` with uninitialized contents.
493    ///
494    /// # Examples
495    ///
496    /// ```
497    /// use std::sync::Arc;
498    ///
499    /// let mut five = Arc::<u32>::new_uninit();
500    ///
501    /// // Deferred initialization:
502    /// Arc::get_mut(&mut five).unwrap().write(5);
503    ///
504    /// let five = unsafe { five.assume_init() };
505    ///
506    /// assert_eq!(*five, 5)
507    /// ```
508    #[cfg(not(no_global_oom_handling))]
509    #[inline]
510    #[stable(feature = "new_uninit", since = "1.82.0")]
511    #[must_use]
512    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
513        unsafe {
514            Arc::from_ptr(Arc::allocate_for_layout(
515                Layout::new::<T>(),
516                |layout| Global.allocate(layout),
517                <*mut u8>::cast,
518            ))
519        }
520    }
521
522    /// Constructs a new `Arc` with uninitialized contents, with the memory
523    /// being filled with `0` bytes.
524    ///
525    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
526    /// of this method.
527    ///
528    /// # Examples
529    ///
530    /// ```
531    /// use std::sync::Arc;
532    ///
533    /// let zero = Arc::<u32>::new_zeroed();
534    /// let zero = unsafe { zero.assume_init() };
535    ///
536    /// assert_eq!(*zero, 0)
537    /// ```
538    ///
539    /// [zeroed]: mem::MaybeUninit::zeroed
540    #[cfg(not(no_global_oom_handling))]
541    #[inline]
542    #[stable(feature = "new_zeroed_alloc", since = "1.92.0")]
543    #[must_use]
544    pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
545        unsafe {
546            Arc::from_ptr(Arc::allocate_for_layout(
547                Layout::new::<T>(),
548                |layout| Global.allocate_zeroed(layout),
549                <*mut u8>::cast,
550            ))
551        }
552    }
553
554    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
555    /// `data` will be pinned in memory and unable to be moved.
556    #[cfg(not(no_global_oom_handling))]
557    #[stable(feature = "pin", since = "1.33.0")]
558    #[must_use]
559    pub fn pin(data: T) -> Pin<Arc<T>> {
560        unsafe { Pin::new_unchecked(Arc::new(data)) }
561    }
562
563    /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
564    #[unstable(feature = "allocator_api", issue = "32838")]
565    #[inline]
566    pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
567        unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
568    }
569
570    /// Constructs a new `Arc<T>`, returning an error if allocation fails.
571    ///
572    /// # Examples
573    ///
574    /// ```
575    /// #![feature(allocator_api)]
576    /// use std::sync::Arc;
577    ///
578    /// let five = Arc::try_new(5)?;
579    /// # Ok::<(), std::alloc::AllocError>(())
580    /// ```
581    #[unstable(feature = "allocator_api", issue = "32838")]
582    #[inline]
583    pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
584        // Start the weak pointer count as 1 which is the weak pointer that's
585        // held by all the strong pointers (kinda), see std/rc.rs for more info
586        let x: Box<_> = Box::try_new(ArcInner {
587            strong: atomic::AtomicUsize::new(1),
588            weak: atomic::AtomicUsize::new(1),
589            data,
590        })?;
591        unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
592    }
593
594    /// Constructs a new `Arc` with uninitialized contents, returning an error
595    /// if allocation fails.
596    ///
597    /// # Examples
598    ///
599    /// ```
600    /// #![feature(allocator_api)]
601    ///
602    /// use std::sync::Arc;
603    ///
604    /// let mut five = Arc::<u32>::try_new_uninit()?;
605    ///
606    /// // Deferred initialization:
607    /// Arc::get_mut(&mut five).unwrap().write(5);
608    ///
609    /// let five = unsafe { five.assume_init() };
610    ///
611    /// assert_eq!(*five, 5);
612    /// # Ok::<(), std::alloc::AllocError>(())
613    /// ```
614    #[unstable(feature = "allocator_api", issue = "32838")]
615    pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
616        unsafe {
617            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
618                Layout::new::<T>(),
619                |layout| Global.allocate(layout),
620                <*mut u8>::cast,
621            )?))
622        }
623    }
624
625    /// Constructs a new `Arc` with uninitialized contents, with the memory
626    /// being filled with `0` bytes, returning an error if allocation fails.
627    ///
628    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
629    /// of this method.
630    ///
631    /// # Examples
632    ///
633    /// ```
634    /// #![feature( allocator_api)]
635    ///
636    /// use std::sync::Arc;
637    ///
638    /// let zero = Arc::<u32>::try_new_zeroed()?;
639    /// let zero = unsafe { zero.assume_init() };
640    ///
641    /// assert_eq!(*zero, 0);
642    /// # Ok::<(), std::alloc::AllocError>(())
643    /// ```
644    ///
645    /// [zeroed]: mem::MaybeUninit::zeroed
646    #[unstable(feature = "allocator_api", issue = "32838")]
647    pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
648        unsafe {
649            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
650                Layout::new::<T>(),
651                |layout| Global.allocate_zeroed(layout),
652                <*mut u8>::cast,
653            )?))
654        }
655    }
656
657    /// Maps the value in an `Arc`, reusing the allocation if possible.
658    ///
659    /// `f` is called on a reference to the value in the `Arc`, and the result is returned, also in
660    /// an `Arc`.
661    ///
662    /// Note: this is an associated function, which means that you have
663    /// to call it as `Arc::map(a, f)` instead of `r.map(a)`. This
664    /// is so that there is no conflict with a method on the inner type.
665    ///
666    /// # Examples
667    ///
668    /// ```
669    /// #![feature(smart_pointer_try_map)]
670    ///
671    /// use std::sync::Arc;
672    ///
673    /// let r = Arc::new(7);
674    /// let new = Arc::map(r, |i| i + 7);
675    /// assert_eq!(*new, 14);
676    /// ```
677    #[cfg(not(no_global_oom_handling))]
678    #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
679    pub fn map<U>(this: Self, f: impl FnOnce(&T) -> U) -> Arc<U> {
680        if size_of::<T>() == size_of::<U>()
681            && align_of::<T>() == align_of::<U>()
682            && Arc::is_unique(&this)
683        {
684            unsafe {
685                let ptr = Arc::into_raw(this);
686                let value = ptr.read();
687                let mut allocation = Arc::from_raw(ptr.cast::<mem::MaybeUninit<U>>());
688
689                Arc::get_mut_unchecked(&mut allocation).write(f(&value));
690                allocation.assume_init()
691            }
692        } else {
693            Arc::new(f(&*this))
694        }
695    }
696
697    /// Attempts to map the value in an `Arc`, reusing the allocation if possible.
698    ///
699    /// `f` is called on a reference to the value in the `Arc`, and if the operation succeeds, the
700    /// result is returned, also in an `Arc`.
701    ///
702    /// Note: this is an associated function, which means that you have
703    /// to call it as `Arc::try_map(a, f)` instead of `a.try_map(f)`. This
704    /// is so that there is no conflict with a method on the inner type.
705    ///
706    /// # Examples
707    ///
708    /// ```
709    /// #![feature(smart_pointer_try_map)]
710    ///
711    /// use std::sync::Arc;
712    ///
713    /// let b = Arc::new(7);
714    /// let new = Arc::try_map(b, |&i| u32::try_from(i)).unwrap();
715    /// assert_eq!(*new, 7);
716    /// ```
717    #[cfg(not(no_global_oom_handling))]
718    #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
719    pub fn try_map<R>(
720        this: Self,
721        f: impl FnOnce(&T) -> R,
722    ) -> <R::Residual as Residual<Arc<R::Output>>>::TryType
723    where
724        R: Try,
725        R::Residual: Residual<Arc<R::Output>>,
726    {
727        if size_of::<T>() == size_of::<R::Output>()
728            && align_of::<T>() == align_of::<R::Output>()
729            && Arc::is_unique(&this)
730        {
731            unsafe {
732                let ptr = Arc::into_raw(this);
733                let value = ptr.read();
734                let mut allocation = Arc::from_raw(ptr.cast::<mem::MaybeUninit<R::Output>>());
735
736                Arc::get_mut_unchecked(&mut allocation).write(f(&value)?);
737                try { allocation.assume_init() }
738            }
739        } else {
740            try { Arc::new(f(&*this)?) }
741        }
742    }
743}
744
745impl<T, A: Allocator> Arc<T, A> {
746    /// Constructs a new `Arc<T>` in the provided allocator.
747    ///
748    /// # Examples
749    ///
750    /// ```
751    /// #![feature(allocator_api)]
752    ///
753    /// use std::sync::Arc;
754    /// use std::alloc::System;
755    ///
756    /// let five = Arc::new_in(5, System);
757    /// ```
758    #[inline]
759    #[cfg(not(no_global_oom_handling))]
760    #[unstable(feature = "allocator_api", issue = "32838")]
761    pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
762        // Start the weak pointer count as 1 which is the weak pointer that's
763        // held by all the strong pointers (kinda), see std/rc.rs for more info
764        let x = Box::new_in(
765            ArcInner {
766                strong: atomic::AtomicUsize::new(1),
767                weak: atomic::AtomicUsize::new(1),
768                data,
769            },
770            alloc,
771        );
772        let (ptr, alloc) = Box::into_unique(x);
773        unsafe { Self::from_inner_in(ptr.into(), alloc) }
774    }
775
776    /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
777    ///
778    /// # Examples
779    ///
780    /// ```
781    /// #![feature(get_mut_unchecked)]
782    /// #![feature(allocator_api)]
783    ///
784    /// use std::sync::Arc;
785    /// use std::alloc::System;
786    ///
787    /// let mut five = Arc::<u32, _>::new_uninit_in(System);
788    ///
789    /// let five = unsafe {
790    ///     // Deferred initialization:
791    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
792    ///
793    ///     five.assume_init()
794    /// };
795    ///
796    /// assert_eq!(*five, 5)
797    /// ```
798    #[cfg(not(no_global_oom_handling))]
799    #[unstable(feature = "allocator_api", issue = "32838")]
800    #[inline]
801    pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
802        unsafe {
803            Arc::from_ptr_in(
804                Arc::allocate_for_layout(
805                    Layout::new::<T>(),
806                    |layout| alloc.allocate(layout),
807                    <*mut u8>::cast,
808                ),
809                alloc,
810            )
811        }
812    }
813
814    /// Constructs a new `Arc` with uninitialized contents, with the memory
815    /// being filled with `0` bytes, in the provided allocator.
816    ///
817    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
818    /// of this method.
819    ///
820    /// # Examples
821    ///
822    /// ```
823    /// #![feature(allocator_api)]
824    ///
825    /// use std::sync::Arc;
826    /// use std::alloc::System;
827    ///
828    /// let zero = Arc::<u32, _>::new_zeroed_in(System);
829    /// let zero = unsafe { zero.assume_init() };
830    ///
831    /// assert_eq!(*zero, 0)
832    /// ```
833    ///
834    /// [zeroed]: mem::MaybeUninit::zeroed
835    #[cfg(not(no_global_oom_handling))]
836    #[unstable(feature = "allocator_api", issue = "32838")]
837    #[inline]
838    pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
839        unsafe {
840            Arc::from_ptr_in(
841                Arc::allocate_for_layout(
842                    Layout::new::<T>(),
843                    |layout| alloc.allocate_zeroed(layout),
844                    <*mut u8>::cast,
845                ),
846                alloc,
847            )
848        }
849    }
850
851    /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
852    /// to allow you to construct a `T` which holds a weak pointer to itself.
853    ///
854    /// Generally, a structure circularly referencing itself, either directly or
855    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
856    /// Using this function, you get access to the weak pointer during the
857    /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
858    /// clone and store it inside the `T`.
859    ///
860    /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
861    /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
862    /// and only afterwards completes the construction of the `Arc<T, A>` by placing
863    /// the `T` returned from your closure into the allocation.
864    ///
865    /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
866    /// returns, calling [`upgrade`] on the weak reference inside your closure will
867    /// fail and result in a `None` value.
868    ///
869    /// # Panics
870    ///
871    /// If `data_fn` panics, the panic is propagated to the caller, and the
872    /// temporary [`Weak<T>`] is dropped normally.
873    ///
874    /// # Example
875    ///
876    /// See [`new_cyclic`]
877    ///
878    /// [`new_cyclic`]: Arc::new_cyclic
879    /// [`upgrade`]: Weak::upgrade
880    #[cfg(not(no_global_oom_handling))]
881    #[inline]
882    #[unstable(feature = "allocator_api", issue = "32838")]
883    pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
884    where
885        F: FnOnce(&Weak<T, A>) -> T,
886    {
887        // Construct the inner in the "uninitialized" state with a single
888        // weak reference.
889        let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
890            ArcInner {
891                strong: atomic::AtomicUsize::new(0),
892                weak: atomic::AtomicUsize::new(1),
893                data: mem::MaybeUninit::<T>::uninit(),
894            },
895            alloc,
896        ));
897        let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
898        let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
899
900        let weak = Weak { ptr: init_ptr, alloc };
901
902        // It's important we don't give up ownership of the weak pointer, or
903        // else the memory might be freed by the time `data_fn` returns. If
904        // we really wanted to pass ownership, we could create an additional
905        // weak pointer for ourselves, but this would result in additional
906        // updates to the weak reference count which might not be necessary
907        // otherwise.
908        let data = data_fn(&weak);
909
910        // Now we can properly initialize the inner value and turn our weak
911        // reference into a strong reference.
912        let strong = unsafe {
913            let inner = init_ptr.as_ptr();
914            ptr::write(&raw mut (*inner).data, data);
915
916            // The above write to the data field must be visible to any threads which
917            // observe a non-zero strong count. Therefore we need at least "Release" ordering
918            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
919            //
920            // "Acquire" ordering is not required. When considering the possible behaviors
921            // of `data_fn` we only need to look at what it could do with a reference to a
922            // non-upgradeable `Weak`:
923            // - It can *clone* the `Weak`, increasing the weak reference count.
924            // - It can drop those clones, decreasing the weak reference count (but never to zero).
925            //
926            // These side effects do not impact us in any way, and no other side effects are
927            // possible with safe code alone.
928            let prev_value = (*inner).strong.fetch_add(1, Release);
929            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
930
931            // Strong references should collectively own a shared weak reference,
932            // so don't run the destructor for our old weak reference.
933            // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
934            // and forgetting the weak reference.
935            let alloc = weak.into_raw_with_allocator().1;
936
937            Arc::from_inner_in(init_ptr, alloc)
938        };
939
940        strong
941    }
942
943    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
944    /// then `data` will be pinned in memory and unable to be moved.
945    #[cfg(not(no_global_oom_handling))]
946    #[unstable(feature = "allocator_api", issue = "32838")]
947    #[inline]
948    pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
949    where
950        A: 'static,
951    {
952        unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
953    }
954
955    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
956    /// fails.
957    #[inline]
958    #[unstable(feature = "allocator_api", issue = "32838")]
959    pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
960    where
961        A: 'static,
962    {
963        unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
964    }
965
966    /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
967    ///
968    /// # Examples
969    ///
970    /// ```
971    /// #![feature(allocator_api)]
972    ///
973    /// use std::sync::Arc;
974    /// use std::alloc::System;
975    ///
976    /// let five = Arc::try_new_in(5, System)?;
977    /// # Ok::<(), std::alloc::AllocError>(())
978    /// ```
979    #[unstable(feature = "allocator_api", issue = "32838")]
980    #[inline]
981    pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
982        // Start the weak pointer count as 1 which is the weak pointer that's
983        // held by all the strong pointers (kinda), see std/rc.rs for more info
984        let x = Box::try_new_in(
985            ArcInner {
986                strong: atomic::AtomicUsize::new(1),
987                weak: atomic::AtomicUsize::new(1),
988                data,
989            },
990            alloc,
991        )?;
992        let (ptr, alloc) = Box::into_unique(x);
993        Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
994    }
995
996    /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
997    /// error if allocation fails.
998    ///
999    /// # Examples
1000    ///
1001    /// ```
1002    /// #![feature(allocator_api)]
1003    /// #![feature(get_mut_unchecked)]
1004    ///
1005    /// use std::sync::Arc;
1006    /// use std::alloc::System;
1007    ///
1008    /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
1009    ///
1010    /// let five = unsafe {
1011    ///     // Deferred initialization:
1012    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
1013    ///
1014    ///     five.assume_init()
1015    /// };
1016    ///
1017    /// assert_eq!(*five, 5);
1018    /// # Ok::<(), std::alloc::AllocError>(())
1019    /// ```
1020    #[unstable(feature = "allocator_api", issue = "32838")]
1021    #[inline]
1022    pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
1023        unsafe {
1024            Ok(Arc::from_ptr_in(
1025                Arc::try_allocate_for_layout(
1026                    Layout::new::<T>(),
1027                    |layout| alloc.allocate(layout),
1028                    <*mut u8>::cast,
1029                )?,
1030                alloc,
1031            ))
1032        }
1033    }
1034
1035    /// Constructs a new `Arc` with uninitialized contents, with the memory
1036    /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
1037    /// fails.
1038    ///
1039    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
1040    /// of this method.
1041    ///
1042    /// # Examples
1043    ///
1044    /// ```
1045    /// #![feature(allocator_api)]
1046    ///
1047    /// use std::sync::Arc;
1048    /// use std::alloc::System;
1049    ///
1050    /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
1051    /// let zero = unsafe { zero.assume_init() };
1052    ///
1053    /// assert_eq!(*zero, 0);
1054    /// # Ok::<(), std::alloc::AllocError>(())
1055    /// ```
1056    ///
1057    /// [zeroed]: mem::MaybeUninit::zeroed
1058    #[unstable(feature = "allocator_api", issue = "32838")]
1059    #[inline]
1060    pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
1061        unsafe {
1062            Ok(Arc::from_ptr_in(
1063                Arc::try_allocate_for_layout(
1064                    Layout::new::<T>(),
1065                    |layout| alloc.allocate_zeroed(layout),
1066                    <*mut u8>::cast,
1067                )?,
1068                alloc,
1069            ))
1070        }
1071    }
1072    /// Returns the inner value, if the `Arc` has exactly one strong reference.
1073    ///
1074    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
1075    /// passed in.
1076    ///
1077    /// This will succeed even if there are outstanding weak references.
1078    ///
1079    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
1080    /// keep the `Arc` in the [`Err`] case.
1081    /// Immediately dropping the [`Err`]-value, as the expression
1082    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
1083    /// drop to zero and the inner value of the `Arc` to be dropped.
1084    /// For instance, if two threads execute such an expression in parallel,
1085    /// there is a race condition without the possibility of unsafety:
1086    /// The threads could first both check whether they own the last instance
1087    /// in `Arc::try_unwrap`, determine that they both do not, and then both
1088    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
1089    /// In this scenario, the value inside the `Arc` is safely destroyed
1090    /// by exactly one of the threads, but neither thread will ever be able
1091    /// to use the value.
1092    ///
1093    /// # Examples
1094    ///
1095    /// ```
1096    /// use std::sync::Arc;
1097    ///
1098    /// let x = Arc::new(3);
1099    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1100    ///
1101    /// let x = Arc::new(4);
1102    /// let _y = Arc::clone(&x);
1103    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1104    /// ```
1105    #[inline]
1106    #[stable(feature = "arc_unique", since = "1.4.0")]
1107    pub fn try_unwrap(this: Self) -> Result<T, Self> {
1108        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1109            return Err(this);
1110        }
1111
1112        acquire!(this.inner().strong);
1113
1114        let this = ManuallyDrop::new(this);
1115        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1116        let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1117
1118        // Make a weak pointer to clean up the implicit strong-weak reference
1119        let _weak = Weak { ptr: this.ptr, alloc };
1120
1121        Ok(elem)
1122    }
1123
1124    /// Returns the inner value, if the `Arc` has exactly one strong reference.
1125    ///
1126    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1127    ///
1128    /// This will succeed even if there are outstanding weak references.
1129    ///
1130    /// If `Arc::into_inner` is called on every clone of this `Arc`,
1131    /// it is guaranteed that exactly one of the calls returns the inner value.
1132    /// This means in particular that the inner value is not dropped.
1133    ///
1134    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1135    /// is meant for different use-cases. If used as a direct replacement
1136    /// for `Arc::into_inner` anyway, such as with the expression
1137    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1138    /// **not** give the same guarantee as described in the previous paragraph.
1139    /// For more information, see the examples below and read the documentation
1140    /// of [`Arc::try_unwrap`].
1141    ///
1142    /// # Examples
1143    ///
1144    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1145    /// ```
1146    /// use std::sync::Arc;
1147    ///
1148    /// let x = Arc::new(3);
1149    /// let y = Arc::clone(&x);
1150    ///
1151    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1152    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1153    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1154    ///
1155    /// let x_inner_value = x_thread.join().unwrap();
1156    /// let y_inner_value = y_thread.join().unwrap();
1157    ///
1158    /// // One of the threads is guaranteed to receive the inner value:
1159    /// assert!(matches!(
1160    ///     (x_inner_value, y_inner_value),
1161    ///     (None, Some(3)) | (Some(3), None)
1162    /// ));
1163    /// // The result could also be `(None, None)` if the threads called
1164    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1165    /// ```
1166    ///
1167    /// A more practical example demonstrating the need for `Arc::into_inner`:
1168    /// ```
1169    /// use std::sync::Arc;
1170    ///
1171    /// // Definition of a simple singly linked list using `Arc`:
1172    /// #[derive(Clone)]
1173    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1174    /// struct Node<T>(T, Option<Arc<Node<T>>>);
1175    ///
1176    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1177    /// // can cause a stack overflow. To prevent this, we can provide a
1178    /// // manual `Drop` implementation that does the destruction in a loop:
1179    /// impl<T> Drop for LinkedList<T> {
1180    ///     fn drop(&mut self) {
1181    ///         let mut link = self.0.take();
1182    ///         while let Some(arc_node) = link.take() {
1183    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1184    ///                 link = next;
1185    ///             }
1186    ///         }
1187    ///     }
1188    /// }
1189    ///
1190    /// // Implementation of `new` and `push` omitted
1191    /// impl<T> LinkedList<T> {
1192    ///     /* ... */
1193    /// #   fn new() -> Self {
1194    /// #       LinkedList(None)
1195    /// #   }
1196    /// #   fn push(&mut self, x: T) {
1197    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
1198    /// #   }
1199    /// }
1200    ///
1201    /// // The following code could have still caused a stack overflow
1202    /// // despite the manual `Drop` impl if that `Drop` impl had used
1203    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1204    ///
1205    /// // Create a long list and clone it
1206    /// let mut x = LinkedList::new();
1207    /// let size = 100000;
1208    /// # let size = if cfg!(miri) { 100 } else { size };
1209    /// for i in 0..size {
1210    ///     x.push(i); // Adds i to the front of x
1211    /// }
1212    /// let y = x.clone();
1213    ///
1214    /// // Drop the clones in parallel
1215    /// let x_thread = std::thread::spawn(|| drop(x));
1216    /// let y_thread = std::thread::spawn(|| drop(y));
1217    /// x_thread.join().unwrap();
1218    /// y_thread.join().unwrap();
1219    /// ```
1220    #[inline]
1221    #[stable(feature = "arc_into_inner", since = "1.70.0")]
1222    pub fn into_inner(this: Self) -> Option<T> {
1223        // Make sure that the ordinary `Drop` implementation isn’t called as well
1224        let mut this = mem::ManuallyDrop::new(this);
1225
1226        // Following the implementation of `drop` and `drop_slow`
1227        if this.inner().strong.fetch_sub(1, Release) != 1 {
1228            return None;
1229        }
1230
1231        acquire!(this.inner().strong);
1232
1233        // SAFETY: This mirrors the line
1234        //
1235        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1236        //
1237        // in `drop_slow`. Instead of dropping the value behind the pointer,
1238        // it is read and eventually returned; `ptr::read` has the same
1239        // safety conditions as `ptr::drop_in_place`.
1240
1241        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1242        let alloc = unsafe { ptr::read(&this.alloc) };
1243
1244        drop(Weak { ptr: this.ptr, alloc });
1245
1246        Some(inner)
1247    }
1248}
1249
1250impl<T> Arc<[T]> {
1251    /// Constructs a new atomically reference-counted slice with uninitialized contents.
1252    ///
1253    /// # Examples
1254    ///
1255    /// ```
1256    /// use std::sync::Arc;
1257    ///
1258    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1259    ///
1260    /// // Deferred initialization:
1261    /// let data = Arc::get_mut(&mut values).unwrap();
1262    /// data[0].write(1);
1263    /// data[1].write(2);
1264    /// data[2].write(3);
1265    ///
1266    /// let values = unsafe { values.assume_init() };
1267    ///
1268    /// assert_eq!(*values, [1, 2, 3])
1269    /// ```
1270    #[cfg(not(no_global_oom_handling))]
1271    #[inline]
1272    #[stable(feature = "new_uninit", since = "1.82.0")]
1273    #[must_use]
1274    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1275        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1276    }
1277
1278    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1279    /// filled with `0` bytes.
1280    ///
1281    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1282    /// incorrect usage of this method.
1283    ///
1284    /// # Examples
1285    ///
1286    /// ```
1287    /// use std::sync::Arc;
1288    ///
1289    /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1290    /// let values = unsafe { values.assume_init() };
1291    ///
1292    /// assert_eq!(*values, [0, 0, 0])
1293    /// ```
1294    ///
1295    /// [zeroed]: mem::MaybeUninit::zeroed
1296    #[cfg(not(no_global_oom_handling))]
1297    #[inline]
1298    #[stable(feature = "new_zeroed_alloc", since = "1.92.0")]
1299    #[must_use]
1300    pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1301        unsafe {
1302            Arc::from_ptr(Arc::allocate_for_layout(
1303                Layout::array::<T>(len).unwrap(),
1304                |layout| Global.allocate_zeroed(layout),
1305                |mem| {
1306                    ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1307                        as *mut ArcInner<[mem::MaybeUninit<T>]>
1308                },
1309            ))
1310        }
1311    }
1312
1313    /// Converts the reference-counted slice into a reference-counted array.
1314    ///
1315    /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1316    ///
1317    /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1318    #[unstable(feature = "alloc_slice_into_array", issue = "148082")]
1319    #[inline]
1320    #[must_use]
1321    pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1322        if self.len() == N {
1323            let ptr = Self::into_raw(self) as *const [T; N];
1324
1325            // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1326            let me = unsafe { Arc::from_raw(ptr) };
1327            Some(me)
1328        } else {
1329            None
1330        }
1331    }
1332}
1333
1334impl<T, A: Allocator> Arc<[T], A> {
1335    /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1336    /// provided allocator.
1337    ///
1338    /// # Examples
1339    ///
1340    /// ```
1341    /// #![feature(get_mut_unchecked)]
1342    /// #![feature(allocator_api)]
1343    ///
1344    /// use std::sync::Arc;
1345    /// use std::alloc::System;
1346    ///
1347    /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1348    ///
1349    /// let values = unsafe {
1350    ///     // Deferred initialization:
1351    ///     Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1352    ///     Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1353    ///     Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1354    ///
1355    ///     values.assume_init()
1356    /// };
1357    ///
1358    /// assert_eq!(*values, [1, 2, 3])
1359    /// ```
1360    #[cfg(not(no_global_oom_handling))]
1361    #[unstable(feature = "allocator_api", issue = "32838")]
1362    #[inline]
1363    pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1364        unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1365    }
1366
1367    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1368    /// filled with `0` bytes, in the provided allocator.
1369    ///
1370    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1371    /// incorrect usage of this method.
1372    ///
1373    /// # Examples
1374    ///
1375    /// ```
1376    /// #![feature(allocator_api)]
1377    ///
1378    /// use std::sync::Arc;
1379    /// use std::alloc::System;
1380    ///
1381    /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1382    /// let values = unsafe { values.assume_init() };
1383    ///
1384    /// assert_eq!(*values, [0, 0, 0])
1385    /// ```
1386    ///
1387    /// [zeroed]: mem::MaybeUninit::zeroed
1388    #[cfg(not(no_global_oom_handling))]
1389    #[unstable(feature = "allocator_api", issue = "32838")]
1390    #[inline]
1391    pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1392        unsafe {
1393            Arc::from_ptr_in(
1394                Arc::allocate_for_layout(
1395                    Layout::array::<T>(len).unwrap(),
1396                    |layout| alloc.allocate_zeroed(layout),
1397                    |mem| {
1398                        ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1399                            as *mut ArcInner<[mem::MaybeUninit<T>]>
1400                    },
1401                ),
1402                alloc,
1403            )
1404        }
1405    }
1406}
1407
1408impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1409    /// Converts to `Arc<T>`.
1410    ///
1411    /// # Safety
1412    ///
1413    /// As with [`MaybeUninit::assume_init`],
1414    /// it is up to the caller to guarantee that the inner value
1415    /// really is in an initialized state.
1416    /// Calling this when the content is not yet fully initialized
1417    /// causes immediate undefined behavior.
1418    ///
1419    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1420    ///
1421    /// # Examples
1422    ///
1423    /// ```
1424    /// use std::sync::Arc;
1425    ///
1426    /// let mut five = Arc::<u32>::new_uninit();
1427    ///
1428    /// // Deferred initialization:
1429    /// Arc::get_mut(&mut five).unwrap().write(5);
1430    ///
1431    /// let five = unsafe { five.assume_init() };
1432    ///
1433    /// assert_eq!(*five, 5)
1434    /// ```
1435    #[stable(feature = "new_uninit", since = "1.82.0")]
1436    #[must_use = "`self` will be dropped if the result is not used"]
1437    #[inline]
1438    pub unsafe fn assume_init(self) -> Arc<T, A> {
1439        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1440        unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1441    }
1442}
1443
1444impl<T: ?Sized + CloneToUninit> Arc<T> {
1445    /// Constructs a new `Arc<T>` with a clone of `value`.
1446    ///
1447    /// # Examples
1448    ///
1449    /// ```
1450    /// #![feature(clone_from_ref)]
1451    /// use std::sync::Arc;
1452    ///
1453    /// let hello: Arc<str> = Arc::clone_from_ref("hello");
1454    /// ```
1455    #[cfg(not(no_global_oom_handling))]
1456    #[unstable(feature = "clone_from_ref", issue = "149075")]
1457    pub fn clone_from_ref(value: &T) -> Arc<T> {
1458        Arc::clone_from_ref_in(value, Global)
1459    }
1460
1461    /// Constructs a new `Arc<T>` with a clone of `value`, returning an error if allocation fails
1462    ///
1463    /// # Examples
1464    ///
1465    /// ```
1466    /// #![feature(clone_from_ref)]
1467    /// #![feature(allocator_api)]
1468    /// use std::sync::Arc;
1469    ///
1470    /// let hello: Arc<str> = Arc::try_clone_from_ref("hello")?;
1471    /// # Ok::<(), std::alloc::AllocError>(())
1472    /// ```
1473    #[unstable(feature = "clone_from_ref", issue = "149075")]
1474    //#[unstable(feature = "allocator_api", issue = "32838")]
1475    pub fn try_clone_from_ref(value: &T) -> Result<Arc<T>, AllocError> {
1476        Arc::try_clone_from_ref_in(value, Global)
1477    }
1478}
1479
1480impl<T: ?Sized + CloneToUninit, A: Allocator> Arc<T, A> {
1481    /// Constructs a new `Arc<T>` with a clone of `value` in the provided allocator.
1482    ///
1483    /// # Examples
1484    ///
1485    /// ```
1486    /// #![feature(clone_from_ref)]
1487    /// #![feature(allocator_api)]
1488    /// use std::sync::Arc;
1489    /// use std::alloc::System;
1490    ///
1491    /// let hello: Arc<str, System> = Arc::clone_from_ref_in("hello", System);
1492    /// ```
1493    #[cfg(not(no_global_oom_handling))]
1494    #[unstable(feature = "clone_from_ref", issue = "149075")]
1495    //#[unstable(feature = "allocator_api", issue = "32838")]
1496    pub fn clone_from_ref_in(value: &T, alloc: A) -> Arc<T, A> {
1497        // `in_progress` drops the allocation if we panic before finishing initializing it.
1498        let mut in_progress: UniqueArcUninit<T, A> = UniqueArcUninit::new(value, alloc);
1499
1500        // Initialize with clone of value.
1501        let initialized_clone = unsafe {
1502            // Clone. If the clone panics, `in_progress` will be dropped and clean up.
1503            value.clone_to_uninit(in_progress.data_ptr().cast());
1504            // Cast type of pointer, now that it is initialized.
1505            in_progress.into_arc()
1506        };
1507
1508        initialized_clone
1509    }
1510
1511    /// Constructs a new `Arc<T>` with a clone of `value` in the provided allocator, returning an error if allocation fails
1512    ///
1513    /// # Examples
1514    ///
1515    /// ```
1516    /// #![feature(clone_from_ref)]
1517    /// #![feature(allocator_api)]
1518    /// use std::sync::Arc;
1519    /// use std::alloc::System;
1520    ///
1521    /// let hello: Arc<str, System> = Arc::try_clone_from_ref_in("hello", System)?;
1522    /// # Ok::<(), std::alloc::AllocError>(())
1523    /// ```
1524    #[unstable(feature = "clone_from_ref", issue = "149075")]
1525    //#[unstable(feature = "allocator_api", issue = "32838")]
1526    pub fn try_clone_from_ref_in(value: &T, alloc: A) -> Result<Arc<T, A>, AllocError> {
1527        // `in_progress` drops the allocation if we panic before finishing initializing it.
1528        let mut in_progress: UniqueArcUninit<T, A> = UniqueArcUninit::try_new(value, alloc)?;
1529
1530        // Initialize with clone of value.
1531        let initialized_clone = unsafe {
1532            // Clone. If the clone panics, `in_progress` will be dropped and clean up.
1533            value.clone_to_uninit(in_progress.data_ptr().cast());
1534            // Cast type of pointer, now that it is initialized.
1535            in_progress.into_arc()
1536        };
1537
1538        Ok(initialized_clone)
1539    }
1540}
1541
1542impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1543    /// Converts to `Arc<[T]>`.
1544    ///
1545    /// # Safety
1546    ///
1547    /// As with [`MaybeUninit::assume_init`],
1548    /// it is up to the caller to guarantee that the inner value
1549    /// really is in an initialized state.
1550    /// Calling this when the content is not yet fully initialized
1551    /// causes immediate undefined behavior.
1552    ///
1553    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1554    ///
1555    /// # Examples
1556    ///
1557    /// ```
1558    /// use std::sync::Arc;
1559    ///
1560    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1561    ///
1562    /// // Deferred initialization:
1563    /// let data = Arc::get_mut(&mut values).unwrap();
1564    /// data[0].write(1);
1565    /// data[1].write(2);
1566    /// data[2].write(3);
1567    ///
1568    /// let values = unsafe { values.assume_init() };
1569    ///
1570    /// assert_eq!(*values, [1, 2, 3])
1571    /// ```
1572    #[stable(feature = "new_uninit", since = "1.82.0")]
1573    #[must_use = "`self` will be dropped if the result is not used"]
1574    #[inline]
1575    pub unsafe fn assume_init(self) -> Arc<[T], A> {
1576        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1577        unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1578    }
1579}
1580
1581impl<T: ?Sized> Arc<T> {
1582    /// Constructs an `Arc<T>` from a raw pointer.
1583    ///
1584    /// The raw pointer must have been previously returned by a call to
1585    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1586    ///
1587    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1588    ///   is trivially true if `U` is `T`.
1589    /// * If `U` is unsized, its data pointer must have the same size and
1590    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1591    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1592    ///   coercion].
1593    ///
1594    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1595    /// and alignment, this is basically like transmuting references of
1596    /// different types. See [`mem::transmute`][transmute] for more information
1597    /// on what restrictions apply in this case.
1598    ///
1599    /// The raw pointer must point to a block of memory allocated by the global allocator.
1600    ///
1601    /// The user of `from_raw` has to make sure a specific value of `T` is only
1602    /// dropped once.
1603    ///
1604    /// This function is unsafe because improper use may lead to memory unsafety,
1605    /// even if the returned `Arc<T>` is never accessed.
1606    ///
1607    /// [into_raw]: Arc::into_raw
1608    /// [transmute]: core::mem::transmute
1609    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1610    ///
1611    /// # Examples
1612    ///
1613    /// ```
1614    /// use std::sync::Arc;
1615    ///
1616    /// let x = Arc::new("hello".to_owned());
1617    /// let x_ptr = Arc::into_raw(x);
1618    ///
1619    /// unsafe {
1620    ///     // Convert back to an `Arc` to prevent leak.
1621    ///     let x = Arc::from_raw(x_ptr);
1622    ///     assert_eq!(&*x, "hello");
1623    ///
1624    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1625    /// }
1626    ///
1627    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1628    /// ```
1629    ///
1630    /// Convert a slice back into its original array:
1631    ///
1632    /// ```
1633    /// use std::sync::Arc;
1634    ///
1635    /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1636    /// let x_ptr: *const [u32] = Arc::into_raw(x);
1637    ///
1638    /// unsafe {
1639    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1640    ///     assert_eq!(&*x, &[1, 2, 3]);
1641    /// }
1642    /// ```
1643    #[inline]
1644    #[stable(feature = "rc_raw", since = "1.17.0")]
1645    pub unsafe fn from_raw(ptr: *const T) -> Self {
1646        unsafe { Arc::from_raw_in(ptr, Global) }
1647    }
1648
1649    /// Consumes the `Arc`, returning the wrapped pointer.
1650    ///
1651    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1652    /// [`Arc::from_raw`].
1653    ///
1654    /// # Examples
1655    ///
1656    /// ```
1657    /// use std::sync::Arc;
1658    ///
1659    /// let x = Arc::new("hello".to_owned());
1660    /// let x_ptr = Arc::into_raw(x);
1661    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1662    /// # // Prevent leaks for Miri.
1663    /// # drop(unsafe { Arc::from_raw(x_ptr) });
1664    /// ```
1665    #[must_use = "losing the pointer will leak memory"]
1666    #[stable(feature = "rc_raw", since = "1.17.0")]
1667    #[rustc_never_returns_null_ptr]
1668    pub fn into_raw(this: Self) -> *const T {
1669        let this = ManuallyDrop::new(this);
1670        Self::as_ptr(&*this)
1671    }
1672
1673    /// Increments the strong reference count on the `Arc<T>` associated with the
1674    /// provided pointer by one.
1675    ///
1676    /// # Safety
1677    ///
1678    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1679    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1680    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1681    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1682    /// allocated by the global allocator.
1683    ///
1684    /// [from_raw_in]: Arc::from_raw_in
1685    ///
1686    /// # Examples
1687    ///
1688    /// ```
1689    /// use std::sync::Arc;
1690    ///
1691    /// let five = Arc::new(5);
1692    ///
1693    /// unsafe {
1694    ///     let ptr = Arc::into_raw(five);
1695    ///     Arc::increment_strong_count(ptr);
1696    ///
1697    ///     // This assertion is deterministic because we haven't shared
1698    ///     // the `Arc` between threads.
1699    ///     let five = Arc::from_raw(ptr);
1700    ///     assert_eq!(2, Arc::strong_count(&five));
1701    /// #   // Prevent leaks for Miri.
1702    /// #   Arc::decrement_strong_count(ptr);
1703    /// }
1704    /// ```
1705    #[inline]
1706    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1707    pub unsafe fn increment_strong_count(ptr: *const T) {
1708        unsafe { Arc::increment_strong_count_in(ptr, Global) }
1709    }
1710
1711    /// Decrements the strong reference count on the `Arc<T>` associated with the
1712    /// provided pointer by one.
1713    ///
1714    /// # Safety
1715    ///
1716    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1717    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1718    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1719    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1720    /// allocated by the global allocator. This method can be used to release the final
1721    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1722    /// released.
1723    ///
1724    /// [from_raw_in]: Arc::from_raw_in
1725    ///
1726    /// # Examples
1727    ///
1728    /// ```
1729    /// use std::sync::Arc;
1730    ///
1731    /// let five = Arc::new(5);
1732    ///
1733    /// unsafe {
1734    ///     let ptr = Arc::into_raw(five);
1735    ///     Arc::increment_strong_count(ptr);
1736    ///
1737    ///     // Those assertions are deterministic because we haven't shared
1738    ///     // the `Arc` between threads.
1739    ///     let five = Arc::from_raw(ptr);
1740    ///     assert_eq!(2, Arc::strong_count(&five));
1741    ///     Arc::decrement_strong_count(ptr);
1742    ///     assert_eq!(1, Arc::strong_count(&five));
1743    /// }
1744    /// ```
1745    #[inline]
1746    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1747    pub unsafe fn decrement_strong_count(ptr: *const T) {
1748        unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1749    }
1750}
1751
1752impl<T: ?Sized, A: Allocator> Arc<T, A> {
1753    /// Returns a reference to the underlying allocator.
1754    ///
1755    /// Note: this is an associated function, which means that you have
1756    /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1757    /// is so that there is no conflict with a method on the inner type.
1758    #[inline]
1759    #[unstable(feature = "allocator_api", issue = "32838")]
1760    pub fn allocator(this: &Self) -> &A {
1761        &this.alloc
1762    }
1763
1764    /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1765    ///
1766    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1767    /// [`Arc::from_raw_in`].
1768    ///
1769    /// # Examples
1770    ///
1771    /// ```
1772    /// #![feature(allocator_api)]
1773    /// use std::sync::Arc;
1774    /// use std::alloc::System;
1775    ///
1776    /// let x = Arc::new_in("hello".to_owned(), System);
1777    /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1778    /// assert_eq!(unsafe { &*ptr }, "hello");
1779    /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1780    /// assert_eq!(&*x, "hello");
1781    /// ```
1782    #[must_use = "losing the pointer will leak memory"]
1783    #[unstable(feature = "allocator_api", issue = "32838")]
1784    pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1785        let this = mem::ManuallyDrop::new(this);
1786        let ptr = Self::as_ptr(&this);
1787        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1788        let alloc = unsafe { ptr::read(&this.alloc) };
1789        (ptr, alloc)
1790    }
1791
1792    /// Provides a raw pointer to the data.
1793    ///
1794    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1795    /// as long as there are strong counts in the `Arc`.
1796    ///
1797    /// # Examples
1798    ///
1799    /// ```
1800    /// use std::sync::Arc;
1801    ///
1802    /// let x = Arc::new("hello".to_owned());
1803    /// let y = Arc::clone(&x);
1804    /// let x_ptr = Arc::as_ptr(&x);
1805    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1806    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1807    /// ```
1808    #[must_use]
1809    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1810    #[rustc_never_returns_null_ptr]
1811    pub fn as_ptr(this: &Self) -> *const T {
1812        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1813
1814        // SAFETY: This cannot go through Deref::deref or ArcInnerPtr::inner because
1815        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1816        // write through the pointer after the Arc is recovered through `from_raw`.
1817        unsafe { &raw mut (*ptr).data }
1818    }
1819
1820    /// Constructs an `Arc<T, A>` from a raw pointer.
1821    ///
1822    /// The raw pointer must have been previously returned by a call to [`Arc<U,
1823    /// A>::into_raw`][into_raw] with the following requirements:
1824    ///
1825    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1826    ///   is trivially true if `U` is `T`.
1827    /// * If `U` is unsized, its data pointer must have the same size and
1828    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1829    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1830    ///   coercion].
1831    ///
1832    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1833    /// and alignment, this is basically like transmuting references of
1834    /// different types. See [`mem::transmute`][transmute] for more information
1835    /// on what restrictions apply in this case.
1836    ///
1837    /// The raw pointer must point to a block of memory allocated by `alloc`
1838    ///
1839    /// The user of `from_raw` has to make sure a specific value of `T` is only
1840    /// dropped once.
1841    ///
1842    /// This function is unsafe because improper use may lead to memory unsafety,
1843    /// even if the returned `Arc<T>` is never accessed.
1844    ///
1845    /// [into_raw]: Arc::into_raw
1846    /// [transmute]: core::mem::transmute
1847    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1848    ///
1849    /// # Examples
1850    ///
1851    /// ```
1852    /// #![feature(allocator_api)]
1853    ///
1854    /// use std::sync::Arc;
1855    /// use std::alloc::System;
1856    ///
1857    /// let x = Arc::new_in("hello".to_owned(), System);
1858    /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1859    ///
1860    /// unsafe {
1861    ///     // Convert back to an `Arc` to prevent leak.
1862    ///     let x = Arc::from_raw_in(x_ptr, System);
1863    ///     assert_eq!(&*x, "hello");
1864    ///
1865    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1866    /// }
1867    ///
1868    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1869    /// ```
1870    ///
1871    /// Convert a slice back into its original array:
1872    ///
1873    /// ```
1874    /// #![feature(allocator_api)]
1875    ///
1876    /// use std::sync::Arc;
1877    /// use std::alloc::System;
1878    ///
1879    /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1880    /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1881    ///
1882    /// unsafe {
1883    ///     let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1884    ///     assert_eq!(&*x, &[1, 2, 3]);
1885    /// }
1886    /// ```
1887    #[inline]
1888    #[unstable(feature = "allocator_api", issue = "32838")]
1889    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1890        unsafe {
1891            let offset = data_offset(ptr);
1892
1893            // Reverse the offset to find the original ArcInner.
1894            let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1895
1896            Self::from_ptr_in(arc_ptr, alloc)
1897        }
1898    }
1899
1900    /// Creates a new [`Weak`] pointer to this allocation.
1901    ///
1902    /// # Examples
1903    ///
1904    /// ```
1905    /// use std::sync::Arc;
1906    ///
1907    /// let five = Arc::new(5);
1908    ///
1909    /// let weak_five = Arc::downgrade(&five);
1910    /// ```
1911    #[must_use = "this returns a new `Weak` pointer, \
1912                  without modifying the original `Arc`"]
1913    #[stable(feature = "arc_weak", since = "1.4.0")]
1914    pub fn downgrade(this: &Self) -> Weak<T, A>
1915    where
1916        A: Clone,
1917    {
1918        // This Relaxed is OK because we're checking the value in the CAS
1919        // below.
1920        let mut cur = this.inner().weak.load(Relaxed);
1921
1922        loop {
1923            // check if the weak counter is currently "locked"; if so, spin.
1924            if cur == usize::MAX {
1925                hint::spin_loop();
1926                cur = this.inner().weak.load(Relaxed);
1927                continue;
1928            }
1929
1930            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1931            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1932
1933            // NOTE: this code currently ignores the possibility of overflow
1934            // into usize::MAX; in general both Rc and Arc need to be adjusted
1935            // to deal with overflow.
1936
1937            // Unlike with Clone(), we need this to be an Acquire read to
1938            // synchronize with the write coming from `is_unique`, so that the
1939            // events prior to that write happen before this read.
1940            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1941                Ok(_) => {
1942                    // Make sure we do not create a dangling Weak
1943                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
1944                    return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1945                }
1946                Err(old) => cur = old,
1947            }
1948        }
1949    }
1950
1951    /// Gets the number of [`Weak`] pointers to this allocation.
1952    ///
1953    /// # Safety
1954    ///
1955    /// This method by itself is safe, but using it correctly requires extra care.
1956    /// Another thread can change the weak count at any time,
1957    /// including potentially between calling this method and acting on the result.
1958    ///
1959    /// # Examples
1960    ///
1961    /// ```
1962    /// use std::sync::Arc;
1963    ///
1964    /// let five = Arc::new(5);
1965    /// let _weak_five = Arc::downgrade(&five);
1966    ///
1967    /// // This assertion is deterministic because we haven't shared
1968    /// // the `Arc` or `Weak` between threads.
1969    /// assert_eq!(1, Arc::weak_count(&five));
1970    /// ```
1971    #[inline]
1972    #[must_use]
1973    #[stable(feature = "arc_counts", since = "1.15.0")]
1974    pub fn weak_count(this: &Self) -> usize {
1975        let cnt = this.inner().weak.load(Relaxed);
1976        // If the weak count is currently locked, the value of the
1977        // count was 0 just before taking the lock.
1978        if cnt == usize::MAX { 0 } else { cnt - 1 }
1979    }
1980
1981    /// Gets the number of strong (`Arc`) pointers to this allocation.
1982    ///
1983    /// # Safety
1984    ///
1985    /// This method by itself is safe, but using it correctly requires extra care.
1986    /// Another thread can change the strong count at any time,
1987    /// including potentially between calling this method and acting on the result.
1988    ///
1989    /// # Examples
1990    ///
1991    /// ```
1992    /// use std::sync::Arc;
1993    ///
1994    /// let five = Arc::new(5);
1995    /// let _also_five = Arc::clone(&five);
1996    ///
1997    /// // This assertion is deterministic because we haven't shared
1998    /// // the `Arc` between threads.
1999    /// assert_eq!(2, Arc::strong_count(&five));
2000    /// ```
2001    #[inline]
2002    #[must_use]
2003    #[stable(feature = "arc_counts", since = "1.15.0")]
2004    pub fn strong_count(this: &Self) -> usize {
2005        this.inner().strong.load(Relaxed)
2006    }
2007
2008    /// Increments the strong reference count on the `Arc<T>` associated with the
2009    /// provided pointer by one.
2010    ///
2011    /// # Safety
2012    ///
2013    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
2014    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
2015    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
2016    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
2017    /// allocated by `alloc`.
2018    ///
2019    /// [from_raw_in]: Arc::from_raw_in
2020    ///
2021    /// # Examples
2022    ///
2023    /// ```
2024    /// #![feature(allocator_api)]
2025    ///
2026    /// use std::sync::Arc;
2027    /// use std::alloc::System;
2028    ///
2029    /// let five = Arc::new_in(5, System);
2030    ///
2031    /// unsafe {
2032    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
2033    ///     Arc::increment_strong_count_in(ptr, System);
2034    ///
2035    ///     // This assertion is deterministic because we haven't shared
2036    ///     // the `Arc` between threads.
2037    ///     let five = Arc::from_raw_in(ptr, System);
2038    ///     assert_eq!(2, Arc::strong_count(&five));
2039    /// #   // Prevent leaks for Miri.
2040    /// #   Arc::decrement_strong_count_in(ptr, System);
2041    /// }
2042    /// ```
2043    #[inline]
2044    #[unstable(feature = "allocator_api", issue = "32838")]
2045    pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
2046    where
2047        A: Clone,
2048    {
2049        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
2050        let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
2051        // Now increase refcount, but don't drop new refcount either
2052        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
2053    }
2054
2055    /// Decrements the strong reference count on the `Arc<T>` associated with the
2056    /// provided pointer by one.
2057    ///
2058    /// # Safety
2059    ///
2060    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
2061    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
2062    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
2063    /// least 1) when invoking this method, and `ptr` must point to a block of memory
2064    /// allocated by `alloc`. This method can be used to release the final
2065    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
2066    /// released.
2067    ///
2068    /// [from_raw_in]: Arc::from_raw_in
2069    ///
2070    /// # Examples
2071    ///
2072    /// ```
2073    /// #![feature(allocator_api)]
2074    ///
2075    /// use std::sync::Arc;
2076    /// use std::alloc::System;
2077    ///
2078    /// let five = Arc::new_in(5, System);
2079    ///
2080    /// unsafe {
2081    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
2082    ///     Arc::increment_strong_count_in(ptr, System);
2083    ///
2084    ///     // Those assertions are deterministic because we haven't shared
2085    ///     // the `Arc` between threads.
2086    ///     let five = Arc::from_raw_in(ptr, System);
2087    ///     assert_eq!(2, Arc::strong_count(&five));
2088    ///     Arc::decrement_strong_count_in(ptr, System);
2089    ///     assert_eq!(1, Arc::strong_count(&five));
2090    /// }
2091    /// ```
2092    #[inline]
2093    #[unstable(feature = "allocator_api", issue = "32838")]
2094    pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
2095        unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
2096    }
2097
2098    #[inline]
2099    fn inner(&self) -> &ArcInner<T> {
2100        // This unsafety is ok because while this arc is alive we're guaranteed
2101        // that the inner pointer is valid. Furthermore, we know that the
2102        // `ArcInner` structure itself is `Sync` because the inner data is
2103        // `Sync` as well, so we're ok loaning out an immutable pointer to these
2104        // contents.
2105        unsafe { self.ptr.as_ref() }
2106    }
2107
2108    // Non-inlined part of `drop`.
2109    #[inline(never)]
2110    unsafe fn drop_slow(&mut self) {
2111        // Drop the weak ref collectively held by all strong references when this
2112        // variable goes out of scope. This ensures that the memory is deallocated
2113        // even if the destructor of `T` panics.
2114        // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
2115        // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
2116        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
2117
2118        // Destroy the data at this time, even though we must not free the box
2119        // allocation itself (there might still be weak pointers lying around).
2120        // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
2121        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
2122    }
2123
2124    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
2125    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
2126    ///
2127    /// # Examples
2128    ///
2129    /// ```
2130    /// use std::sync::Arc;
2131    ///
2132    /// let five = Arc::new(5);
2133    /// let same_five = Arc::clone(&five);
2134    /// let other_five = Arc::new(5);
2135    ///
2136    /// assert!(Arc::ptr_eq(&five, &same_five));
2137    /// assert!(!Arc::ptr_eq(&five, &other_five));
2138    /// ```
2139    ///
2140    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2141    #[inline]
2142    #[must_use]
2143    #[stable(feature = "ptr_eq", since = "1.17.0")]
2144    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
2145        ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
2146    }
2147}
2148
2149impl<T: ?Sized> Arc<T> {
2150    /// Allocates an `ArcInner<T>` with sufficient space for
2151    /// a possibly-unsized inner value where the value has the layout provided.
2152    ///
2153    /// The function `mem_to_arcinner` is called with the data pointer
2154    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2155    #[cfg(not(no_global_oom_handling))]
2156    unsafe fn allocate_for_layout(
2157        value_layout: Layout,
2158        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
2159        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2160    ) -> *mut ArcInner<T> {
2161        let layout = arcinner_layout_for_value_layout(value_layout);
2162
2163        let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
2164
2165        unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
2166    }
2167
2168    /// Allocates an `ArcInner<T>` with sufficient space for
2169    /// a possibly-unsized inner value where the value has the layout provided,
2170    /// returning an error if allocation fails.
2171    ///
2172    /// The function `mem_to_arcinner` is called with the data pointer
2173    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2174    unsafe fn try_allocate_for_layout(
2175        value_layout: Layout,
2176        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
2177        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2178    ) -> Result<*mut ArcInner<T>, AllocError> {
2179        let layout = arcinner_layout_for_value_layout(value_layout);
2180
2181        let ptr = allocate(layout)?;
2182
2183        let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
2184
2185        Ok(inner)
2186    }
2187
2188    unsafe fn initialize_arcinner(
2189        ptr: NonNull<[u8]>,
2190        layout: Layout,
2191        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2192    ) -> *mut ArcInner<T> {
2193        let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
2194        debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
2195
2196        unsafe {
2197            (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2198            (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2199        }
2200
2201        inner
2202    }
2203}
2204
2205impl<T: ?Sized, A: Allocator> Arc<T, A> {
2206    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2207    #[inline]
2208    #[cfg(not(no_global_oom_handling))]
2209    unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2210        // Allocate for the `ArcInner<T>` using the given value.
2211        unsafe {
2212            Arc::allocate_for_layout(
2213                Layout::for_value_raw(ptr),
2214                |layout| alloc.allocate(layout),
2215                |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2216            )
2217        }
2218    }
2219
2220    #[cfg(not(no_global_oom_handling))]
2221    fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2222        unsafe {
2223            let value_size = size_of_val(&*src);
2224            let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2225
2226            // Copy value as bytes
2227            ptr::copy_nonoverlapping(
2228                (&raw const *src) as *const u8,
2229                (&raw mut (*ptr).data) as *mut u8,
2230                value_size,
2231            );
2232
2233            // Free the allocation without dropping its contents
2234            let (bptr, alloc) = Box::into_raw_with_allocator(src);
2235            let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2236            drop(src);
2237
2238            Self::from_ptr_in(ptr, alloc)
2239        }
2240    }
2241}
2242
2243impl<T> Arc<[T]> {
2244    /// Allocates an `ArcInner<[T]>` with the given length.
2245    #[cfg(not(no_global_oom_handling))]
2246    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2247        unsafe {
2248            Self::allocate_for_layout(
2249                Layout::array::<T>(len).unwrap(),
2250                |layout| Global.allocate(layout),
2251                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2252            )
2253        }
2254    }
2255
2256    /// Copy elements from slice into newly allocated `Arc<[T]>`
2257    ///
2258    /// Unsafe because the caller must either take ownership, bind `T: Copy` or
2259    /// bind `T: TrivialClone`.
2260    #[cfg(not(no_global_oom_handling))]
2261    unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2262        unsafe {
2263            let ptr = Self::allocate_for_slice(v.len());
2264
2265            ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2266
2267            Self::from_ptr(ptr)
2268        }
2269    }
2270
2271    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2272    ///
2273    /// Behavior is undefined should the size be wrong.
2274    #[cfg(not(no_global_oom_handling))]
2275    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2276        // Panic guard while cloning T elements.
2277        // In the event of a panic, elements that have been written
2278        // into the new ArcInner will be dropped, then the memory freed.
2279        struct Guard<T> {
2280            mem: NonNull<u8>,
2281            elems: *mut T,
2282            layout: Layout,
2283            n_elems: usize,
2284        }
2285
2286        impl<T> Drop for Guard<T> {
2287            fn drop(&mut self) {
2288                unsafe {
2289                    let slice = from_raw_parts_mut(self.elems, self.n_elems);
2290                    ptr::drop_in_place(slice);
2291
2292                    Global.deallocate(self.mem, self.layout);
2293                }
2294            }
2295        }
2296
2297        unsafe {
2298            let ptr = Self::allocate_for_slice(len);
2299
2300            let mem = ptr as *mut _ as *mut u8;
2301            let layout = Layout::for_value_raw(ptr);
2302
2303            // Pointer to first element
2304            let elems = (&raw mut (*ptr).data) as *mut T;
2305
2306            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2307
2308            for (i, item) in iter.enumerate() {
2309                ptr::write(elems.add(i), item);
2310                guard.n_elems += 1;
2311            }
2312
2313            // All clear. Forget the guard so it doesn't free the new ArcInner.
2314            mem::forget(guard);
2315
2316            Self::from_ptr(ptr)
2317        }
2318    }
2319}
2320
2321impl<T, A: Allocator> Arc<[T], A> {
2322    /// Allocates an `ArcInner<[T]>` with the given length.
2323    #[inline]
2324    #[cfg(not(no_global_oom_handling))]
2325    unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2326        unsafe {
2327            Arc::allocate_for_layout(
2328                Layout::array::<T>(len).unwrap(),
2329                |layout| alloc.allocate(layout),
2330                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2331            )
2332        }
2333    }
2334}
2335
2336/// Specialization trait used for `From<&[T]>`.
2337#[cfg(not(no_global_oom_handling))]
2338trait ArcFromSlice<T> {
2339    fn from_slice(slice: &[T]) -> Self;
2340}
2341
2342#[cfg(not(no_global_oom_handling))]
2343impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2344    #[inline]
2345    default fn from_slice(v: &[T]) -> Self {
2346        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2347    }
2348}
2349
2350#[cfg(not(no_global_oom_handling))]
2351impl<T: TrivialClone> ArcFromSlice<T> for Arc<[T]> {
2352    #[inline]
2353    fn from_slice(v: &[T]) -> Self {
2354        // SAFETY: `T` implements `TrivialClone`, so this is sound and equivalent
2355        // to the above.
2356        unsafe { Arc::copy_from_slice(v) }
2357    }
2358}
2359
2360#[stable(feature = "rust1", since = "1.0.0")]
2361impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2362    /// Makes a clone of the `Arc` pointer.
2363    ///
2364    /// This creates another pointer to the same allocation, increasing the
2365    /// strong reference count.
2366    ///
2367    /// # Examples
2368    ///
2369    /// ```
2370    /// use std::sync::Arc;
2371    ///
2372    /// let five = Arc::new(5);
2373    ///
2374    /// let _ = Arc::clone(&five);
2375    /// ```
2376    #[inline]
2377    fn clone(&self) -> Arc<T, A> {
2378        // Using a relaxed ordering is alright here, as knowledge of the
2379        // original reference prevents other threads from erroneously deleting
2380        // the object.
2381        //
2382        // As explained in the [Boost documentation][1], Increasing the
2383        // reference counter can always be done with memory_order_relaxed: New
2384        // references to an object can only be formed from an existing
2385        // reference, and passing an existing reference from one thread to
2386        // another must already provide any required synchronization.
2387        //
2388        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2389        let old_size = self.inner().strong.fetch_add(1, Relaxed);
2390
2391        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2392        // Arcs. If we don't do this the count can overflow and users will use-after free. This
2393        // branch will never be taken in any realistic program. We abort because such a program is
2394        // incredibly degenerate, and we don't care to support it.
2395        //
2396        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2397        // But we do that check *after* having done the increment, so there is a chance here that
2398        // the worst already happened and we actually do overflow the `usize` counter. However, that
2399        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2400        // above and the `abort` below, which seems exceedingly unlikely.
2401        //
2402        // This is a global invariant, and also applies when using a compare-exchange loop to increment
2403        // counters in other methods.
2404        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2405        // and then overflow using a few `fetch_add`s.
2406        if old_size > MAX_REFCOUNT {
2407            abort();
2408        }
2409
2410        unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2411    }
2412}
2413
2414#[unstable(feature = "ergonomic_clones", issue = "132290")]
2415impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2416
2417#[stable(feature = "rust1", since = "1.0.0")]
2418impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2419    type Target = T;
2420
2421    #[inline]
2422    fn deref(&self) -> &T {
2423        &self.inner().data
2424    }
2425}
2426
2427#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2428unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2429
2430#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2431unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2432
2433#[unstable(feature = "deref_pure_trait", issue = "87121")]
2434unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2435
2436#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2437impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2438
2439#[cfg(not(no_global_oom_handling))]
2440impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2441    /// Makes a mutable reference into the given `Arc`.
2442    ///
2443    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2444    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
2445    /// referred to as clone-on-write.
2446    ///
2447    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2448    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2449    /// be cloned.
2450    ///
2451    /// See also [`get_mut`], which will fail rather than cloning the inner value
2452    /// or dissociating [`Weak`] pointers.
2453    ///
2454    /// [`clone`]: Clone::clone
2455    /// [`get_mut`]: Arc::get_mut
2456    ///
2457    /// # Examples
2458    ///
2459    /// ```
2460    /// use std::sync::Arc;
2461    ///
2462    /// let mut data = Arc::new(5);
2463    ///
2464    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2465    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2466    /// *Arc::make_mut(&mut data) += 1;         // Clones inner data
2467    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2468    /// *Arc::make_mut(&mut other_data) *= 2;   // Won't clone anything
2469    ///
2470    /// // Now `data` and `other_data` point to different allocations.
2471    /// assert_eq!(*data, 8);
2472    /// assert_eq!(*other_data, 12);
2473    /// ```
2474    ///
2475    /// [`Weak`] pointers will be dissociated:
2476    ///
2477    /// ```
2478    /// use std::sync::Arc;
2479    ///
2480    /// let mut data = Arc::new(75);
2481    /// let weak = Arc::downgrade(&data);
2482    ///
2483    /// assert!(75 == *data);
2484    /// assert!(75 == *weak.upgrade().unwrap());
2485    ///
2486    /// *Arc::make_mut(&mut data) += 1;
2487    ///
2488    /// assert!(76 == *data);
2489    /// assert!(weak.upgrade().is_none());
2490    /// ```
2491    #[inline]
2492    #[stable(feature = "arc_unique", since = "1.4.0")]
2493    pub fn make_mut(this: &mut Self) -> &mut T {
2494        let size_of_val = size_of_val::<T>(&**this);
2495
2496        // Note that we hold both a strong reference and a weak reference.
2497        // Thus, releasing our strong reference only will not, by itself, cause
2498        // the memory to be deallocated.
2499        //
2500        // Use Acquire to ensure that we see any writes to `weak` that happen
2501        // before release writes (i.e., decrements) to `strong`. Since we hold a
2502        // weak count, there's no chance the ArcInner itself could be
2503        // deallocated.
2504        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2505            // Another strong pointer exists, so we must clone.
2506            *this = Arc::clone_from_ref_in(&**this, this.alloc.clone());
2507        } else if this.inner().weak.load(Relaxed) != 1 {
2508            // Relaxed suffices in the above because this is fundamentally an
2509            // optimization: we are always racing with weak pointers being
2510            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2511
2512            // We removed the last strong ref, but there are additional weak
2513            // refs remaining. We'll move the contents to a new Arc, and
2514            // invalidate the other weak refs.
2515
2516            // Note that it is not possible for the read of `weak` to yield
2517            // usize::MAX (i.e., locked), since the weak count can only be
2518            // locked by a thread with a strong reference.
2519
2520            // Materialize our own implicit weak pointer, so that it can clean
2521            // up the ArcInner as needed.
2522            let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2523
2524            // Can just steal the data, all that's left is Weaks
2525            //
2526            // We don't need panic-protection like the above branch does, but we might as well
2527            // use the same mechanism.
2528            let mut in_progress: UniqueArcUninit<T, A> =
2529                UniqueArcUninit::new(&**this, this.alloc.clone());
2530            unsafe {
2531                // Initialize `in_progress` with move of **this.
2532                // We have to express this in terms of bytes because `T: ?Sized`; there is no
2533                // operation that just copies a value based on its `size_of_val()`.
2534                ptr::copy_nonoverlapping(
2535                    ptr::from_ref(&**this).cast::<u8>(),
2536                    in_progress.data_ptr().cast::<u8>(),
2537                    size_of_val,
2538                );
2539
2540                ptr::write(this, in_progress.into_arc());
2541            }
2542        } else {
2543            // We were the sole reference of either kind; bump back up the
2544            // strong ref count.
2545            this.inner().strong.store(1, Release);
2546        }
2547
2548        // As with `get_mut()`, the unsafety is ok because our reference was
2549        // either unique to begin with, or became one upon cloning the contents.
2550        unsafe { Self::get_mut_unchecked(this) }
2551    }
2552}
2553
2554impl<T: Clone, A: Allocator> Arc<T, A> {
2555    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2556    /// clone.
2557    ///
2558    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2559    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2560    ///
2561    /// # Examples
2562    ///
2563    /// ```
2564    /// # use std::{ptr, sync::Arc};
2565    /// let inner = String::from("test");
2566    /// let ptr = inner.as_ptr();
2567    ///
2568    /// let arc = Arc::new(inner);
2569    /// let inner = Arc::unwrap_or_clone(arc);
2570    /// // The inner value was not cloned
2571    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2572    ///
2573    /// let arc = Arc::new(inner);
2574    /// let arc2 = arc.clone();
2575    /// let inner = Arc::unwrap_or_clone(arc);
2576    /// // Because there were 2 references, we had to clone the inner value.
2577    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2578    /// // `arc2` is the last reference, so when we unwrap it we get back
2579    /// // the original `String`.
2580    /// let inner = Arc::unwrap_or_clone(arc2);
2581    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2582    /// ```
2583    #[inline]
2584    #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2585    pub fn unwrap_or_clone(this: Self) -> T {
2586        Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2587    }
2588}
2589
2590impl<T: ?Sized, A: Allocator> Arc<T, A> {
2591    /// Returns a mutable reference into the given `Arc`, if there are
2592    /// no other `Arc` or [`Weak`] pointers to the same allocation.
2593    ///
2594    /// Returns [`None`] otherwise, because it is not safe to
2595    /// mutate a shared value.
2596    ///
2597    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2598    /// the inner value when there are other `Arc` pointers.
2599    ///
2600    /// [make_mut]: Arc::make_mut
2601    /// [clone]: Clone::clone
2602    ///
2603    /// # Examples
2604    ///
2605    /// ```
2606    /// use std::sync::Arc;
2607    ///
2608    /// let mut x = Arc::new(3);
2609    /// *Arc::get_mut(&mut x).unwrap() = 4;
2610    /// assert_eq!(*x, 4);
2611    ///
2612    /// let _y = Arc::clone(&x);
2613    /// assert!(Arc::get_mut(&mut x).is_none());
2614    /// ```
2615    #[inline]
2616    #[stable(feature = "arc_unique", since = "1.4.0")]
2617    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2618        if Self::is_unique(this) {
2619            // This unsafety is ok because we're guaranteed that the pointer
2620            // returned is the *only* pointer that will ever be returned to T. Our
2621            // reference count is guaranteed to be 1 at this point, and we required
2622            // the Arc itself to be `mut`, so we're returning the only possible
2623            // reference to the inner data.
2624            unsafe { Some(Arc::get_mut_unchecked(this)) }
2625        } else {
2626            None
2627        }
2628    }
2629
2630    /// Returns a mutable reference into the given `Arc`,
2631    /// without any check.
2632    ///
2633    /// See also [`get_mut`], which is safe and does appropriate checks.
2634    ///
2635    /// [`get_mut`]: Arc::get_mut
2636    ///
2637    /// # Safety
2638    ///
2639    /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2640    /// they must not be dereferenced or have active borrows for the duration
2641    /// of the returned borrow, and their inner type must be exactly the same as the
2642    /// inner type of this Arc (including lifetimes). This is trivially the case if no
2643    /// such pointers exist, for example immediately after `Arc::new`.
2644    ///
2645    /// # Examples
2646    ///
2647    /// ```
2648    /// #![feature(get_mut_unchecked)]
2649    ///
2650    /// use std::sync::Arc;
2651    ///
2652    /// let mut x = Arc::new(String::new());
2653    /// unsafe {
2654    ///     Arc::get_mut_unchecked(&mut x).push_str("foo")
2655    /// }
2656    /// assert_eq!(*x, "foo");
2657    /// ```
2658    /// Other `Arc` pointers to the same allocation must be to the same type.
2659    /// ```no_run
2660    /// #![feature(get_mut_unchecked)]
2661    ///
2662    /// use std::sync::Arc;
2663    ///
2664    /// let x: Arc<str> = Arc::from("Hello, world!");
2665    /// let mut y: Arc<[u8]> = x.clone().into();
2666    /// unsafe {
2667    ///     // this is Undefined Behavior, because x's inner type is str, not [u8]
2668    ///     Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2669    /// }
2670    /// println!("{}", &*x); // Invalid UTF-8 in a str
2671    /// ```
2672    /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2673    /// ```no_run
2674    /// #![feature(get_mut_unchecked)]
2675    ///
2676    /// use std::sync::Arc;
2677    ///
2678    /// let x: Arc<&str> = Arc::new("Hello, world!");
2679    /// {
2680    ///     let s = String::from("Oh, no!");
2681    ///     let mut y: Arc<&str> = x.clone();
2682    ///     unsafe {
2683    ///         // this is Undefined Behavior, because x's inner type
2684    ///         // is &'long str, not &'short str
2685    ///         *Arc::get_mut_unchecked(&mut y) = &s;
2686    ///     }
2687    /// }
2688    /// println!("{}", &*x); // Use-after-free
2689    /// ```
2690    #[inline]
2691    #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2692    pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2693        // We are careful to *not* create a reference covering the "count" fields, as
2694        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2695        unsafe { &mut (*this.ptr.as_ptr()).data }
2696    }
2697
2698    /// Determine whether this is the unique reference to the underlying data.
2699    ///
2700    /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2701    /// returns `false` otherwise.
2702    ///
2703    /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2704    /// on this `Arc`, so long as no clones occur in between.
2705    ///
2706    /// # Examples
2707    ///
2708    /// ```
2709    /// #![feature(arc_is_unique)]
2710    ///
2711    /// use std::sync::Arc;
2712    ///
2713    /// let x = Arc::new(3);
2714    /// assert!(Arc::is_unique(&x));
2715    ///
2716    /// let y = Arc::clone(&x);
2717    /// assert!(!Arc::is_unique(&x));
2718    /// drop(y);
2719    ///
2720    /// // Weak references also count, because they could be upgraded at any time.
2721    /// let z = Arc::downgrade(&x);
2722    /// assert!(!Arc::is_unique(&x));
2723    /// ```
2724    ///
2725    /// # Pointer invalidation
2726    ///
2727    /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2728    /// unlike that operation it does not produce any mutable references to the underlying data,
2729    /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2730    /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2731    ///
2732    /// ```
2733    /// #![feature(arc_is_unique)]
2734    ///
2735    /// use std::sync::Arc;
2736    ///
2737    /// let arc = Arc::new(5);
2738    /// let pointer: *const i32 = &*arc;
2739    /// assert!(Arc::is_unique(&arc));
2740    /// assert_eq!(unsafe { *pointer }, 5);
2741    /// ```
2742    ///
2743    /// # Atomic orderings
2744    ///
2745    /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2746    /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2747    /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2748    ///
2749    /// Note that this operation requires locking the weak ref count, so concurrent calls to
2750    /// `downgrade` may spin-loop for a short period of time.
2751    ///
2752    /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2753    #[inline]
2754    #[unstable(feature = "arc_is_unique", issue = "138938")]
2755    pub fn is_unique(this: &Self) -> bool {
2756        // lock the weak pointer count if we appear to be the sole weak pointer
2757        // holder.
2758        //
2759        // The acquire label here ensures a happens-before relationship with any
2760        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2761        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2762        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2763        if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2764            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2765            // counter in `drop` -- the only access that happens when any but the last reference
2766            // is being dropped.
2767            let unique = this.inner().strong.load(Acquire) == 1;
2768
2769            // The release write here synchronizes with a read in `downgrade`,
2770            // effectively preventing the above read of `strong` from happening
2771            // after the write.
2772            this.inner().weak.store(1, Release); // release the lock
2773            unique
2774        } else {
2775            false
2776        }
2777    }
2778}
2779
2780#[stable(feature = "rust1", since = "1.0.0")]
2781unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2782    /// Drops the `Arc`.
2783    ///
2784    /// This will decrement the strong reference count. If the strong reference
2785    /// count reaches zero then the only other references (if any) are
2786    /// [`Weak`], so we `drop` the inner value.
2787    ///
2788    /// # Examples
2789    ///
2790    /// ```
2791    /// use std::sync::Arc;
2792    ///
2793    /// struct Foo;
2794    ///
2795    /// impl Drop for Foo {
2796    ///     fn drop(&mut self) {
2797    ///         println!("dropped!");
2798    ///     }
2799    /// }
2800    ///
2801    /// let foo  = Arc::new(Foo);
2802    /// let foo2 = Arc::clone(&foo);
2803    ///
2804    /// drop(foo);    // Doesn't print anything
2805    /// drop(foo2);   // Prints "dropped!"
2806    /// ```
2807    #[inline]
2808    fn drop(&mut self) {
2809        // Because `fetch_sub` is already atomic, we do not need to synchronize
2810        // with other threads unless we are going to delete the object. This
2811        // same logic applies to the below `fetch_sub` to the `weak` count.
2812        if self.inner().strong.fetch_sub(1, Release) != 1 {
2813            return;
2814        }
2815
2816        // This fence is needed to prevent reordering of use of the data and
2817        // deletion of the data. Because it is marked `Release`, the decreasing
2818        // of the reference count synchronizes with this `Acquire` fence. This
2819        // means that use of the data happens before decreasing the reference
2820        // count, which happens before this fence, which happens before the
2821        // deletion of the data.
2822        //
2823        // As explained in the [Boost documentation][1],
2824        //
2825        // > It is important to enforce any possible access to the object in one
2826        // > thread (through an existing reference) to *happen before* deleting
2827        // > the object in a different thread. This is achieved by a "release"
2828        // > operation after dropping a reference (any access to the object
2829        // > through this reference must obviously happened before), and an
2830        // > "acquire" operation before deleting the object.
2831        //
2832        // In particular, while the contents of an Arc are usually immutable, it's
2833        // possible to have interior writes to something like a Mutex<T>. Since a
2834        // Mutex is not acquired when it is deleted, we can't rely on its
2835        // synchronization logic to make writes in thread A visible to a destructor
2836        // running in thread B.
2837        //
2838        // Also note that the Acquire fence here could probably be replaced with an
2839        // Acquire load, which could improve performance in highly-contended
2840        // situations. See [2].
2841        //
2842        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2843        // [2]: (https://github.com/rust-lang/rust/pull/41714)
2844        acquire!(self.inner().strong);
2845
2846        // Make sure we aren't trying to "drop" the shared static for empty slices
2847        // used by Default::default.
2848        debug_assert!(
2849            !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2850            "Arcs backed by a static should never reach a strong count of 0. \
2851            Likely decrement_strong_count or from_raw were called too many times.",
2852        );
2853
2854        unsafe {
2855            self.drop_slow();
2856        }
2857    }
2858}
2859
2860impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2861    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2862    ///
2863    /// # Examples
2864    ///
2865    /// ```
2866    /// use std::any::Any;
2867    /// use std::sync::Arc;
2868    ///
2869    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2870    ///     if let Ok(string) = value.downcast::<String>() {
2871    ///         println!("String ({}): {}", string.len(), string);
2872    ///     }
2873    /// }
2874    ///
2875    /// let my_string = "Hello World".to_string();
2876    /// print_if_string(Arc::new(my_string));
2877    /// print_if_string(Arc::new(0i8));
2878    /// ```
2879    #[inline]
2880    #[stable(feature = "rc_downcast", since = "1.29.0")]
2881    pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2882    where
2883        T: Any + Send + Sync,
2884    {
2885        if (*self).is::<T>() {
2886            unsafe {
2887                let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2888                Ok(Arc::from_inner_in(ptr.cast(), alloc))
2889            }
2890        } else {
2891            Err(self)
2892        }
2893    }
2894
2895    /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2896    ///
2897    /// For a safe alternative see [`downcast`].
2898    ///
2899    /// # Examples
2900    ///
2901    /// ```
2902    /// #![feature(downcast_unchecked)]
2903    ///
2904    /// use std::any::Any;
2905    /// use std::sync::Arc;
2906    ///
2907    /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2908    ///
2909    /// unsafe {
2910    ///     assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2911    /// }
2912    /// ```
2913    ///
2914    /// # Safety
2915    ///
2916    /// The contained value must be of type `T`. Calling this method
2917    /// with the incorrect type is *undefined behavior*.
2918    ///
2919    ///
2920    /// [`downcast`]: Self::downcast
2921    #[inline]
2922    #[unstable(feature = "downcast_unchecked", issue = "90850")]
2923    pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2924    where
2925        T: Any + Send + Sync,
2926    {
2927        unsafe {
2928            let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2929            Arc::from_inner_in(ptr.cast(), alloc)
2930        }
2931    }
2932}
2933
2934impl<T> Weak<T> {
2935    /// Constructs a new `Weak<T>`, without allocating any memory.
2936    /// Calling [`upgrade`] on the return value always gives [`None`].
2937    ///
2938    /// [`upgrade`]: Weak::upgrade
2939    ///
2940    /// # Examples
2941    ///
2942    /// ```
2943    /// use std::sync::Weak;
2944    ///
2945    /// let empty: Weak<i64> = Weak::new();
2946    /// assert!(empty.upgrade().is_none());
2947    /// ```
2948    #[inline]
2949    #[stable(feature = "downgraded_weak", since = "1.10.0")]
2950    #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2951    #[must_use]
2952    pub const fn new() -> Weak<T> {
2953        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2954    }
2955}
2956
2957impl<T, A: Allocator> Weak<T, A> {
2958    /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2959    /// allocator.
2960    /// Calling [`upgrade`] on the return value always gives [`None`].
2961    ///
2962    /// [`upgrade`]: Weak::upgrade
2963    ///
2964    /// # Examples
2965    ///
2966    /// ```
2967    /// #![feature(allocator_api)]
2968    ///
2969    /// use std::sync::Weak;
2970    /// use std::alloc::System;
2971    ///
2972    /// let empty: Weak<i64, _> = Weak::new_in(System);
2973    /// assert!(empty.upgrade().is_none());
2974    /// ```
2975    #[inline]
2976    #[unstable(feature = "allocator_api", issue = "32838")]
2977    pub fn new_in(alloc: A) -> Weak<T, A> {
2978        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2979    }
2980}
2981
2982/// Helper type to allow accessing the reference counts without
2983/// making any assertions about the data field.
2984struct WeakInner<'a> {
2985    weak: &'a Atomic<usize>,
2986    strong: &'a Atomic<usize>,
2987}
2988
2989impl<T: ?Sized> Weak<T> {
2990    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2991    ///
2992    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2993    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2994    ///
2995    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2996    /// as these don't own anything; the method still works on them).
2997    ///
2998    /// # Safety
2999    ///
3000    /// The pointer must have originated from the [`into_raw`] and must still own its potential
3001    /// weak reference, and must point to a block of memory allocated by global allocator.
3002    ///
3003    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
3004    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
3005    /// count is not modified by this operation) and therefore it must be paired with a previous
3006    /// call to [`into_raw`].
3007    /// # Examples
3008    ///
3009    /// ```
3010    /// use std::sync::{Arc, Weak};
3011    ///
3012    /// let strong = Arc::new("hello".to_owned());
3013    ///
3014    /// let raw_1 = Arc::downgrade(&strong).into_raw();
3015    /// let raw_2 = Arc::downgrade(&strong).into_raw();
3016    ///
3017    /// assert_eq!(2, Arc::weak_count(&strong));
3018    ///
3019    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3020    /// assert_eq!(1, Arc::weak_count(&strong));
3021    ///
3022    /// drop(strong);
3023    ///
3024    /// // Decrement the last weak count.
3025    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3026    /// ```
3027    ///
3028    /// [`new`]: Weak::new
3029    /// [`into_raw`]: Weak::into_raw
3030    /// [`upgrade`]: Weak::upgrade
3031    #[inline]
3032    #[stable(feature = "weak_into_raw", since = "1.45.0")]
3033    pub unsafe fn from_raw(ptr: *const T) -> Self {
3034        unsafe { Weak::from_raw_in(ptr, Global) }
3035    }
3036
3037    /// Consumes the `Weak<T>` and turns it into a raw pointer.
3038    ///
3039    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
3040    /// one weak reference (the weak count is not modified by this operation). It can be turned
3041    /// back into the `Weak<T>` with [`from_raw`].
3042    ///
3043    /// The same restrictions of accessing the target of the pointer as with
3044    /// [`as_ptr`] apply.
3045    ///
3046    /// # Examples
3047    ///
3048    /// ```
3049    /// use std::sync::{Arc, Weak};
3050    ///
3051    /// let strong = Arc::new("hello".to_owned());
3052    /// let weak = Arc::downgrade(&strong);
3053    /// let raw = weak.into_raw();
3054    ///
3055    /// assert_eq!(1, Arc::weak_count(&strong));
3056    /// assert_eq!("hello", unsafe { &*raw });
3057    ///
3058    /// drop(unsafe { Weak::from_raw(raw) });
3059    /// assert_eq!(0, Arc::weak_count(&strong));
3060    /// ```
3061    ///
3062    /// [`from_raw`]: Weak::from_raw
3063    /// [`as_ptr`]: Weak::as_ptr
3064    #[must_use = "losing the pointer will leak memory"]
3065    #[stable(feature = "weak_into_raw", since = "1.45.0")]
3066    pub fn into_raw(self) -> *const T {
3067        ManuallyDrop::new(self).as_ptr()
3068    }
3069}
3070
3071impl<T: ?Sized, A: Allocator> Weak<T, A> {
3072    /// Returns a reference to the underlying allocator.
3073    #[inline]
3074    #[unstable(feature = "allocator_api", issue = "32838")]
3075    pub fn allocator(&self) -> &A {
3076        &self.alloc
3077    }
3078
3079    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
3080    ///
3081    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
3082    /// unaligned or even [`null`] otherwise.
3083    ///
3084    /// # Examples
3085    ///
3086    /// ```
3087    /// use std::sync::Arc;
3088    /// use std::ptr;
3089    ///
3090    /// let strong = Arc::new("hello".to_owned());
3091    /// let weak = Arc::downgrade(&strong);
3092    /// // Both point to the same object
3093    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
3094    /// // The strong here keeps it alive, so we can still access the object.
3095    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
3096    ///
3097    /// drop(strong);
3098    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
3099    /// // undefined behavior.
3100    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
3101    /// ```
3102    ///
3103    /// [`null`]: core::ptr::null "ptr::null"
3104    #[must_use]
3105    #[stable(feature = "weak_into_raw", since = "1.45.0")]
3106    pub fn as_ptr(&self) -> *const T {
3107        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
3108
3109        if is_dangling(ptr) {
3110            // If the pointer is dangling, we return the sentinel directly. This cannot be
3111            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
3112            ptr as *const T
3113        } else {
3114            // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
3115            // The payload may be dropped at this point, and we have to maintain provenance,
3116            // so use raw pointer manipulation.
3117            unsafe { &raw mut (*ptr).data }
3118        }
3119    }
3120
3121    /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
3122    ///
3123    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
3124    /// one weak reference (the weak count is not modified by this operation). It can be turned
3125    /// back into the `Weak<T>` with [`from_raw_in`].
3126    ///
3127    /// The same restrictions of accessing the target of the pointer as with
3128    /// [`as_ptr`] apply.
3129    ///
3130    /// # Examples
3131    ///
3132    /// ```
3133    /// #![feature(allocator_api)]
3134    /// use std::sync::{Arc, Weak};
3135    /// use std::alloc::System;
3136    ///
3137    /// let strong = Arc::new_in("hello".to_owned(), System);
3138    /// let weak = Arc::downgrade(&strong);
3139    /// let (raw, alloc) = weak.into_raw_with_allocator();
3140    ///
3141    /// assert_eq!(1, Arc::weak_count(&strong));
3142    /// assert_eq!("hello", unsafe { &*raw });
3143    ///
3144    /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
3145    /// assert_eq!(0, Arc::weak_count(&strong));
3146    /// ```
3147    ///
3148    /// [`from_raw_in`]: Weak::from_raw_in
3149    /// [`as_ptr`]: Weak::as_ptr
3150    #[must_use = "losing the pointer will leak memory"]
3151    #[unstable(feature = "allocator_api", issue = "32838")]
3152    pub fn into_raw_with_allocator(self) -> (*const T, A) {
3153        let this = mem::ManuallyDrop::new(self);
3154        let result = this.as_ptr();
3155        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
3156        let alloc = unsafe { ptr::read(&this.alloc) };
3157        (result, alloc)
3158    }
3159
3160    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
3161    /// allocator.
3162    ///
3163    /// This can be used to safely get a strong reference (by calling [`upgrade`]
3164    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
3165    ///
3166    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
3167    /// as these don't own anything; the method still works on them).
3168    ///
3169    /// # Safety
3170    ///
3171    /// The pointer must have originated from the [`into_raw`] and must still own its potential
3172    /// weak reference, and must point to a block of memory allocated by `alloc`.
3173    ///
3174    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
3175    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
3176    /// count is not modified by this operation) and therefore it must be paired with a previous
3177    /// call to [`into_raw`].
3178    /// # Examples
3179    ///
3180    /// ```
3181    /// use std::sync::{Arc, Weak};
3182    ///
3183    /// let strong = Arc::new("hello".to_owned());
3184    ///
3185    /// let raw_1 = Arc::downgrade(&strong).into_raw();
3186    /// let raw_2 = Arc::downgrade(&strong).into_raw();
3187    ///
3188    /// assert_eq!(2, Arc::weak_count(&strong));
3189    ///
3190    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3191    /// assert_eq!(1, Arc::weak_count(&strong));
3192    ///
3193    /// drop(strong);
3194    ///
3195    /// // Decrement the last weak count.
3196    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3197    /// ```
3198    ///
3199    /// [`new`]: Weak::new
3200    /// [`into_raw`]: Weak::into_raw
3201    /// [`upgrade`]: Weak::upgrade
3202    #[inline]
3203    #[unstable(feature = "allocator_api", issue = "32838")]
3204    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3205        // See Weak::as_ptr for context on how the input pointer is derived.
3206
3207        let ptr = if is_dangling(ptr) {
3208            // This is a dangling Weak.
3209            ptr as *mut ArcInner<T>
3210        } else {
3211            // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3212            // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3213            let offset = unsafe { data_offset(ptr) };
3214            // Thus, we reverse the offset to get the whole ArcInner.
3215            // SAFETY: the pointer originated from a Weak, so this offset is safe.
3216            unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3217        };
3218
3219        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3220        Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3221    }
3222}
3223
3224impl<T: ?Sized, A: Allocator> Weak<T, A> {
3225    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3226    /// dropping of the inner value if successful.
3227    ///
3228    /// Returns [`None`] if the inner value has since been dropped.
3229    ///
3230    /// # Examples
3231    ///
3232    /// ```
3233    /// use std::sync::Arc;
3234    ///
3235    /// let five = Arc::new(5);
3236    ///
3237    /// let weak_five = Arc::downgrade(&five);
3238    ///
3239    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3240    /// assert!(strong_five.is_some());
3241    ///
3242    /// // Destroy all strong pointers.
3243    /// drop(strong_five);
3244    /// drop(five);
3245    ///
3246    /// assert!(weak_five.upgrade().is_none());
3247    /// ```
3248    #[must_use = "this returns a new `Arc`, \
3249                  without modifying the original weak pointer"]
3250    #[stable(feature = "arc_weak", since = "1.4.0")]
3251    pub fn upgrade(&self) -> Option<Arc<T, A>>
3252    where
3253        A: Clone,
3254    {
3255        #[inline]
3256        fn checked_increment(n: usize) -> Option<usize> {
3257            // Any write of 0 we can observe leaves the field in permanently zero state.
3258            if n == 0 {
3259                return None;
3260            }
3261            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3262            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3263            Some(n + 1)
3264        }
3265
3266        // We use a CAS loop to increment the strong count instead of a
3267        // fetch_add as this function should never take the reference count
3268        // from zero to one.
3269        //
3270        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3271        // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3272        // value can be initialized after `Weak` references have already been created. In that case, we
3273        // expect to observe the fully initialized value.
3274        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3275            // SAFETY: pointer is not null, verified in checked_increment
3276            unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3277        } else {
3278            None
3279        }
3280    }
3281
3282    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3283    ///
3284    /// If `self` was created using [`Weak::new`], this will return 0.
3285    #[must_use]
3286    #[stable(feature = "weak_counts", since = "1.41.0")]
3287    pub fn strong_count(&self) -> usize {
3288        if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3289    }
3290
3291    /// Gets an approximation of the number of `Weak` pointers pointing to this
3292    /// allocation.
3293    ///
3294    /// If `self` was created using [`Weak::new`], or if there are no remaining
3295    /// strong pointers, this will return 0.
3296    ///
3297    /// # Accuracy
3298    ///
3299    /// Due to implementation details, the returned value can be off by 1 in
3300    /// either direction when other threads are manipulating any `Arc`s or
3301    /// `Weak`s pointing to the same allocation.
3302    #[must_use]
3303    #[stable(feature = "weak_counts", since = "1.41.0")]
3304    pub fn weak_count(&self) -> usize {
3305        if let Some(inner) = self.inner() {
3306            let weak = inner.weak.load(Acquire);
3307            let strong = inner.strong.load(Relaxed);
3308            if strong == 0 {
3309                0
3310            } else {
3311                // Since we observed that there was at least one strong pointer
3312                // after reading the weak count, we know that the implicit weak
3313                // reference (present whenever any strong references are alive)
3314                // was still around when we observed the weak count, and can
3315                // therefore safely subtract it.
3316                weak - 1
3317            }
3318        } else {
3319            0
3320        }
3321    }
3322
3323    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3324    /// (i.e., when this `Weak` was created by `Weak::new`).
3325    #[inline]
3326    fn inner(&self) -> Option<WeakInner<'_>> {
3327        let ptr = self.ptr.as_ptr();
3328        if is_dangling(ptr) {
3329            None
3330        } else {
3331            // We are careful to *not* create a reference covering the "data" field, as
3332            // the field may be mutated concurrently (for example, if the last `Arc`
3333            // is dropped, the data field will be dropped in-place).
3334            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3335        }
3336    }
3337
3338    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3339    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3340    /// this function ignores the metadata of  `dyn Trait` pointers.
3341    ///
3342    /// # Notes
3343    ///
3344    /// Since this compares pointers it means that `Weak::new()` will equal each
3345    /// other, even though they don't point to any allocation.
3346    ///
3347    /// # Examples
3348    ///
3349    /// ```
3350    /// use std::sync::Arc;
3351    ///
3352    /// let first_rc = Arc::new(5);
3353    /// let first = Arc::downgrade(&first_rc);
3354    /// let second = Arc::downgrade(&first_rc);
3355    ///
3356    /// assert!(first.ptr_eq(&second));
3357    ///
3358    /// let third_rc = Arc::new(5);
3359    /// let third = Arc::downgrade(&third_rc);
3360    ///
3361    /// assert!(!first.ptr_eq(&third));
3362    /// ```
3363    ///
3364    /// Comparing `Weak::new`.
3365    ///
3366    /// ```
3367    /// use std::sync::{Arc, Weak};
3368    ///
3369    /// let first = Weak::new();
3370    /// let second = Weak::new();
3371    /// assert!(first.ptr_eq(&second));
3372    ///
3373    /// let third_rc = Arc::new(());
3374    /// let third = Arc::downgrade(&third_rc);
3375    /// assert!(!first.ptr_eq(&third));
3376    /// ```
3377    ///
3378    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3379    #[inline]
3380    #[must_use]
3381    #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3382    pub fn ptr_eq(&self, other: &Self) -> bool {
3383        ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3384    }
3385}
3386
3387#[stable(feature = "arc_weak", since = "1.4.0")]
3388impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3389    /// Makes a clone of the `Weak` pointer that points to the same allocation.
3390    ///
3391    /// # Examples
3392    ///
3393    /// ```
3394    /// use std::sync::{Arc, Weak};
3395    ///
3396    /// let weak_five = Arc::downgrade(&Arc::new(5));
3397    ///
3398    /// let _ = Weak::clone(&weak_five);
3399    /// ```
3400    #[inline]
3401    fn clone(&self) -> Weak<T, A> {
3402        if let Some(inner) = self.inner() {
3403            // See comments in Arc::clone() for why this is relaxed. This can use a
3404            // fetch_add (ignoring the lock) because the weak count is only locked
3405            // where are *no other* weak pointers in existence. (So we can't be
3406            // running this code in that case).
3407            let old_size = inner.weak.fetch_add(1, Relaxed);
3408
3409            // See comments in Arc::clone() for why we do this (for mem::forget).
3410            if old_size > MAX_REFCOUNT {
3411                abort();
3412            }
3413        }
3414
3415        Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3416    }
3417}
3418
3419#[unstable(feature = "ergonomic_clones", issue = "132290")]
3420impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3421
3422#[stable(feature = "downgraded_weak", since = "1.10.0")]
3423impl<T> Default for Weak<T> {
3424    /// Constructs a new `Weak<T>`, without allocating memory.
3425    /// Calling [`upgrade`] on the return value always
3426    /// gives [`None`].
3427    ///
3428    /// [`upgrade`]: Weak::upgrade
3429    ///
3430    /// # Examples
3431    ///
3432    /// ```
3433    /// use std::sync::Weak;
3434    ///
3435    /// let empty: Weak<i64> = Default::default();
3436    /// assert!(empty.upgrade().is_none());
3437    /// ```
3438    fn default() -> Weak<T> {
3439        Weak::new()
3440    }
3441}
3442
3443#[stable(feature = "arc_weak", since = "1.4.0")]
3444unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3445    /// Drops the `Weak` pointer.
3446    ///
3447    /// # Examples
3448    ///
3449    /// ```
3450    /// use std::sync::{Arc, Weak};
3451    ///
3452    /// struct Foo;
3453    ///
3454    /// impl Drop for Foo {
3455    ///     fn drop(&mut self) {
3456    ///         println!("dropped!");
3457    ///     }
3458    /// }
3459    ///
3460    /// let foo = Arc::new(Foo);
3461    /// let weak_foo = Arc::downgrade(&foo);
3462    /// let other_weak_foo = Weak::clone(&weak_foo);
3463    ///
3464    /// drop(weak_foo);   // Doesn't print anything
3465    /// drop(foo);        // Prints "dropped!"
3466    ///
3467    /// assert!(other_weak_foo.upgrade().is_none());
3468    /// ```
3469    fn drop(&mut self) {
3470        // If we find out that we were the last weak pointer, then its time to
3471        // deallocate the data entirely. See the discussion in Arc::drop() about
3472        // the memory orderings
3473        //
3474        // It's not necessary to check for the locked state here, because the
3475        // weak count can only be locked if there was precisely one weak ref,
3476        // meaning that drop could only subsequently run ON that remaining weak
3477        // ref, which can only happen after the lock is released.
3478        let inner = if let Some(inner) = self.inner() { inner } else { return };
3479
3480        if inner.weak.fetch_sub(1, Release) == 1 {
3481            acquire!(inner.weak);
3482
3483            // Make sure we aren't trying to "deallocate" the shared static for empty slices
3484            // used by Default::default.
3485            debug_assert!(
3486                !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3487                "Arc/Weaks backed by a static should never be deallocated. \
3488                Likely decrement_strong_count or from_raw were called too many times.",
3489            );
3490
3491            unsafe {
3492                self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3493            }
3494        }
3495    }
3496}
3497
3498#[stable(feature = "rust1", since = "1.0.0")]
3499trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3500    fn eq(&self, other: &Arc<T, A>) -> bool;
3501    fn ne(&self, other: &Arc<T, A>) -> bool;
3502}
3503
3504#[stable(feature = "rust1", since = "1.0.0")]
3505impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3506    #[inline]
3507    default fn eq(&self, other: &Arc<T, A>) -> bool {
3508        **self == **other
3509    }
3510    #[inline]
3511    default fn ne(&self, other: &Arc<T, A>) -> bool {
3512        **self != **other
3513    }
3514}
3515
3516/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3517/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3518/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3519/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3520/// the same value, than two `&T`s.
3521///
3522/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3523#[stable(feature = "rust1", since = "1.0.0")]
3524impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3525    #[inline]
3526    fn eq(&self, other: &Arc<T, A>) -> bool {
3527        Arc::ptr_eq(self, other) || **self == **other
3528    }
3529
3530    #[inline]
3531    fn ne(&self, other: &Arc<T, A>) -> bool {
3532        !Arc::ptr_eq(self, other) && **self != **other
3533    }
3534}
3535
3536#[stable(feature = "rust1", since = "1.0.0")]
3537impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3538    /// Equality for two `Arc`s.
3539    ///
3540    /// Two `Arc`s are equal if their inner values are equal, even if they are
3541    /// stored in different allocation.
3542    ///
3543    /// If `T` also implements `Eq` (implying reflexivity of equality),
3544    /// two `Arc`s that point to the same allocation are always equal.
3545    ///
3546    /// # Examples
3547    ///
3548    /// ```
3549    /// use std::sync::Arc;
3550    ///
3551    /// let five = Arc::new(5);
3552    ///
3553    /// assert!(five == Arc::new(5));
3554    /// ```
3555    #[inline]
3556    fn eq(&self, other: &Arc<T, A>) -> bool {
3557        ArcEqIdent::eq(self, other)
3558    }
3559
3560    /// Inequality for two `Arc`s.
3561    ///
3562    /// Two `Arc`s are not equal if their inner values are not equal.
3563    ///
3564    /// If `T` also implements `Eq` (implying reflexivity of equality),
3565    /// two `Arc`s that point to the same value are always equal.
3566    ///
3567    /// # Examples
3568    ///
3569    /// ```
3570    /// use std::sync::Arc;
3571    ///
3572    /// let five = Arc::new(5);
3573    ///
3574    /// assert!(five != Arc::new(6));
3575    /// ```
3576    #[inline]
3577    fn ne(&self, other: &Arc<T, A>) -> bool {
3578        ArcEqIdent::ne(self, other)
3579    }
3580}
3581
3582#[stable(feature = "rust1", since = "1.0.0")]
3583impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3584    /// Partial comparison for two `Arc`s.
3585    ///
3586    /// The two are compared by calling `partial_cmp()` on their inner values.
3587    ///
3588    /// # Examples
3589    ///
3590    /// ```
3591    /// use std::sync::Arc;
3592    /// use std::cmp::Ordering;
3593    ///
3594    /// let five = Arc::new(5);
3595    ///
3596    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3597    /// ```
3598    fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3599        (**self).partial_cmp(&**other)
3600    }
3601
3602    /// Less-than comparison for two `Arc`s.
3603    ///
3604    /// The two are compared by calling `<` on their inner values.
3605    ///
3606    /// # Examples
3607    ///
3608    /// ```
3609    /// use std::sync::Arc;
3610    ///
3611    /// let five = Arc::new(5);
3612    ///
3613    /// assert!(five < Arc::new(6));
3614    /// ```
3615    fn lt(&self, other: &Arc<T, A>) -> bool {
3616        *(*self) < *(*other)
3617    }
3618
3619    /// 'Less than or equal to' comparison for two `Arc`s.
3620    ///
3621    /// The two are compared by calling `<=` on their inner values.
3622    ///
3623    /// # Examples
3624    ///
3625    /// ```
3626    /// use std::sync::Arc;
3627    ///
3628    /// let five = Arc::new(5);
3629    ///
3630    /// assert!(five <= Arc::new(5));
3631    /// ```
3632    fn le(&self, other: &Arc<T, A>) -> bool {
3633        *(*self) <= *(*other)
3634    }
3635
3636    /// Greater-than comparison for two `Arc`s.
3637    ///
3638    /// The two are compared by calling `>` on their inner values.
3639    ///
3640    /// # Examples
3641    ///
3642    /// ```
3643    /// use std::sync::Arc;
3644    ///
3645    /// let five = Arc::new(5);
3646    ///
3647    /// assert!(five > Arc::new(4));
3648    /// ```
3649    fn gt(&self, other: &Arc<T, A>) -> bool {
3650        *(*self) > *(*other)
3651    }
3652
3653    /// 'Greater than or equal to' comparison for two `Arc`s.
3654    ///
3655    /// The two are compared by calling `>=` on their inner values.
3656    ///
3657    /// # Examples
3658    ///
3659    /// ```
3660    /// use std::sync::Arc;
3661    ///
3662    /// let five = Arc::new(5);
3663    ///
3664    /// assert!(five >= Arc::new(5));
3665    /// ```
3666    fn ge(&self, other: &Arc<T, A>) -> bool {
3667        *(*self) >= *(*other)
3668    }
3669}
3670#[stable(feature = "rust1", since = "1.0.0")]
3671impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3672    /// Comparison for two `Arc`s.
3673    ///
3674    /// The two are compared by calling `cmp()` on their inner values.
3675    ///
3676    /// # Examples
3677    ///
3678    /// ```
3679    /// use std::sync::Arc;
3680    /// use std::cmp::Ordering;
3681    ///
3682    /// let five = Arc::new(5);
3683    ///
3684    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3685    /// ```
3686    fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3687        (**self).cmp(&**other)
3688    }
3689}
3690#[stable(feature = "rust1", since = "1.0.0")]
3691impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3692
3693#[stable(feature = "rust1", since = "1.0.0")]
3694impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3695    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3696        fmt::Display::fmt(&**self, f)
3697    }
3698}
3699
3700#[stable(feature = "rust1", since = "1.0.0")]
3701impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3702    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3703        fmt::Debug::fmt(&**self, f)
3704    }
3705}
3706
3707#[stable(feature = "rust1", since = "1.0.0")]
3708impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3709    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3710        fmt::Pointer::fmt(&(&raw const **self), f)
3711    }
3712}
3713
3714#[cfg(not(no_global_oom_handling))]
3715#[stable(feature = "rust1", since = "1.0.0")]
3716impl<T: Default> Default for Arc<T> {
3717    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3718    ///
3719    /// # Examples
3720    ///
3721    /// ```
3722    /// use std::sync::Arc;
3723    ///
3724    /// let x: Arc<i32> = Default::default();
3725    /// assert_eq!(*x, 0);
3726    /// ```
3727    fn default() -> Arc<T> {
3728        unsafe {
3729            Self::from_inner(
3730                Box::leak(Box::write(
3731                    Box::new_uninit(),
3732                    ArcInner {
3733                        strong: atomic::AtomicUsize::new(1),
3734                        weak: atomic::AtomicUsize::new(1),
3735                        data: T::default(),
3736                    },
3737                ))
3738                .into(),
3739            )
3740        }
3741    }
3742}
3743
3744/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3745/// returned by `Default::default`.
3746///
3747/// Layout notes:
3748/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3749/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3750/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3751#[repr(C, align(16))]
3752struct SliceArcInnerForStatic {
3753    inner: ArcInner<[u8; 1]>,
3754}
3755#[cfg(not(no_global_oom_handling))]
3756const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3757
3758static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3759    inner: ArcInner {
3760        strong: atomic::AtomicUsize::new(1),
3761        weak: atomic::AtomicUsize::new(1),
3762        data: [0],
3763    },
3764};
3765
3766#[cfg(not(no_global_oom_handling))]
3767#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3768impl Default for Arc<str> {
3769    /// Creates an empty str inside an Arc
3770    ///
3771    /// This may or may not share an allocation with other Arcs.
3772    #[inline]
3773    fn default() -> Self {
3774        let arc: Arc<[u8]> = Default::default();
3775        debug_assert!(core::str::from_utf8(&*arc).is_ok());
3776        let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3777        unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3778    }
3779}
3780
3781#[cfg(not(no_global_oom_handling))]
3782#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3783impl Default for Arc<core::ffi::CStr> {
3784    /// Creates an empty CStr inside an Arc
3785    ///
3786    /// This may or may not share an allocation with other Arcs.
3787    #[inline]
3788    fn default() -> Self {
3789        use core::ffi::CStr;
3790        let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3791        let inner: NonNull<ArcInner<CStr>> =
3792            NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3793        // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3794        let this: mem::ManuallyDrop<Arc<CStr>> =
3795            unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3796        (*this).clone()
3797    }
3798}
3799
3800#[cfg(not(no_global_oom_handling))]
3801#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3802impl<T> Default for Arc<[T]> {
3803    /// Creates an empty `[T]` inside an Arc
3804    ///
3805    /// This may or may not share an allocation with other Arcs.
3806    #[inline]
3807    fn default() -> Self {
3808        if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3809            // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3810            // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3811            // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3812            // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3813            let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3814            let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3815            // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3816            let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3817                unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3818            return (*this).clone();
3819        }
3820
3821        // If T's alignment is too large for the static, make a new unique allocation.
3822        let arr: [T; 0] = [];
3823        Arc::from(arr)
3824    }
3825}
3826
3827#[cfg(not(no_global_oom_handling))]
3828#[stable(feature = "pin_default_impls", since = "1.91.0")]
3829impl<T> Default for Pin<Arc<T>>
3830where
3831    T: ?Sized,
3832    Arc<T>: Default,
3833{
3834    #[inline]
3835    fn default() -> Self {
3836        unsafe { Pin::new_unchecked(Arc::<T>::default()) }
3837    }
3838}
3839
3840#[stable(feature = "rust1", since = "1.0.0")]
3841impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3842    fn hash<H: Hasher>(&self, state: &mut H) {
3843        (**self).hash(state)
3844    }
3845}
3846
3847#[cfg(not(no_global_oom_handling))]
3848#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3849impl<T> From<T> for Arc<T> {
3850    /// Converts a `T` into an `Arc<T>`
3851    ///
3852    /// The conversion moves the value into a
3853    /// newly allocated `Arc`. It is equivalent to
3854    /// calling `Arc::new(t)`.
3855    ///
3856    /// # Example
3857    /// ```rust
3858    /// # use std::sync::Arc;
3859    /// let x = 5;
3860    /// let arc = Arc::new(5);
3861    ///
3862    /// assert_eq!(Arc::from(x), arc);
3863    /// ```
3864    fn from(t: T) -> Self {
3865        Arc::new(t)
3866    }
3867}
3868
3869#[cfg(not(no_global_oom_handling))]
3870#[stable(feature = "shared_from_array", since = "1.74.0")]
3871impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3872    /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3873    ///
3874    /// The conversion moves the array into a newly allocated `Arc`.
3875    ///
3876    /// # Example
3877    ///
3878    /// ```
3879    /// # use std::sync::Arc;
3880    /// let original: [i32; 3] = [1, 2, 3];
3881    /// let shared: Arc<[i32]> = Arc::from(original);
3882    /// assert_eq!(&[1, 2, 3], &shared[..]);
3883    /// ```
3884    #[inline]
3885    fn from(v: [T; N]) -> Arc<[T]> {
3886        Arc::<[T; N]>::from(v)
3887    }
3888}
3889
3890#[cfg(not(no_global_oom_handling))]
3891#[stable(feature = "shared_from_slice", since = "1.21.0")]
3892impl<T: Clone> From<&[T]> for Arc<[T]> {
3893    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3894    ///
3895    /// # Example
3896    ///
3897    /// ```
3898    /// # use std::sync::Arc;
3899    /// let original: &[i32] = &[1, 2, 3];
3900    /// let shared: Arc<[i32]> = Arc::from(original);
3901    /// assert_eq!(&[1, 2, 3], &shared[..]);
3902    /// ```
3903    #[inline]
3904    fn from(v: &[T]) -> Arc<[T]> {
3905        <Self as ArcFromSlice<T>>::from_slice(v)
3906    }
3907}
3908
3909#[cfg(not(no_global_oom_handling))]
3910#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3911impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3912    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3913    ///
3914    /// # Example
3915    ///
3916    /// ```
3917    /// # use std::sync::Arc;
3918    /// let mut original = [1, 2, 3];
3919    /// let original: &mut [i32] = &mut original;
3920    /// let shared: Arc<[i32]> = Arc::from(original);
3921    /// assert_eq!(&[1, 2, 3], &shared[..]);
3922    /// ```
3923    #[inline]
3924    fn from(v: &mut [T]) -> Arc<[T]> {
3925        Arc::from(&*v)
3926    }
3927}
3928
3929#[cfg(not(no_global_oom_handling))]
3930#[stable(feature = "shared_from_slice", since = "1.21.0")]
3931impl From<&str> for Arc<str> {
3932    /// Allocates a reference-counted `str` and copies `v` into it.
3933    ///
3934    /// # Example
3935    ///
3936    /// ```
3937    /// # use std::sync::Arc;
3938    /// let shared: Arc<str> = Arc::from("eggplant");
3939    /// assert_eq!("eggplant", &shared[..]);
3940    /// ```
3941    #[inline]
3942    fn from(v: &str) -> Arc<str> {
3943        let arc = Arc::<[u8]>::from(v.as_bytes());
3944        unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3945    }
3946}
3947
3948#[cfg(not(no_global_oom_handling))]
3949#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3950impl From<&mut str> for Arc<str> {
3951    /// Allocates a reference-counted `str` and copies `v` into it.
3952    ///
3953    /// # Example
3954    ///
3955    /// ```
3956    /// # use std::sync::Arc;
3957    /// let mut original = String::from("eggplant");
3958    /// let original: &mut str = &mut original;
3959    /// let shared: Arc<str> = Arc::from(original);
3960    /// assert_eq!("eggplant", &shared[..]);
3961    /// ```
3962    #[inline]
3963    fn from(v: &mut str) -> Arc<str> {
3964        Arc::from(&*v)
3965    }
3966}
3967
3968#[cfg(not(no_global_oom_handling))]
3969#[stable(feature = "shared_from_slice", since = "1.21.0")]
3970impl From<String> for Arc<str> {
3971    /// Allocates a reference-counted `str` and copies `v` into it.
3972    ///
3973    /// # Example
3974    ///
3975    /// ```
3976    /// # use std::sync::Arc;
3977    /// let unique: String = "eggplant".to_owned();
3978    /// let shared: Arc<str> = Arc::from(unique);
3979    /// assert_eq!("eggplant", &shared[..]);
3980    /// ```
3981    #[inline]
3982    fn from(v: String) -> Arc<str> {
3983        Arc::from(&v[..])
3984    }
3985}
3986
3987#[cfg(not(no_global_oom_handling))]
3988#[stable(feature = "shared_from_slice", since = "1.21.0")]
3989impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3990    /// Move a boxed object to a new, reference-counted allocation.
3991    ///
3992    /// # Example
3993    ///
3994    /// ```
3995    /// # use std::sync::Arc;
3996    /// let unique: Box<str> = Box::from("eggplant");
3997    /// let shared: Arc<str> = Arc::from(unique);
3998    /// assert_eq!("eggplant", &shared[..]);
3999    /// ```
4000    #[inline]
4001    fn from(v: Box<T, A>) -> Arc<T, A> {
4002        Arc::from_box_in(v)
4003    }
4004}
4005
4006#[cfg(not(no_global_oom_handling))]
4007#[stable(feature = "shared_from_slice", since = "1.21.0")]
4008impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
4009    /// Allocates a reference-counted slice and moves `v`'s items into it.
4010    ///
4011    /// # Example
4012    ///
4013    /// ```
4014    /// # use std::sync::Arc;
4015    /// let unique: Vec<i32> = vec![1, 2, 3];
4016    /// let shared: Arc<[i32]> = Arc::from(unique);
4017    /// assert_eq!(&[1, 2, 3], &shared[..]);
4018    /// ```
4019    #[inline]
4020    fn from(v: Vec<T, A>) -> Arc<[T], A> {
4021        unsafe {
4022            let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
4023
4024            let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
4025            ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
4026
4027            // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
4028            // without dropping its contents or the allocator
4029            let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
4030
4031            Self::from_ptr_in(rc_ptr, alloc)
4032        }
4033    }
4034}
4035
4036#[stable(feature = "shared_from_cow", since = "1.45.0")]
4037impl<'a, B> From<Cow<'a, B>> for Arc<B>
4038where
4039    B: ToOwned + ?Sized,
4040    Arc<B>: From<&'a B> + From<B::Owned>,
4041{
4042    /// Creates an atomically reference-counted pointer from a clone-on-write
4043    /// pointer by copying its content.
4044    ///
4045    /// # Example
4046    ///
4047    /// ```rust
4048    /// # use std::sync::Arc;
4049    /// # use std::borrow::Cow;
4050    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
4051    /// let shared: Arc<str> = Arc::from(cow);
4052    /// assert_eq!("eggplant", &shared[..]);
4053    /// ```
4054    #[inline]
4055    fn from(cow: Cow<'a, B>) -> Arc<B> {
4056        match cow {
4057            Cow::Borrowed(s) => Arc::from(s),
4058            Cow::Owned(s) => Arc::from(s),
4059        }
4060    }
4061}
4062
4063#[stable(feature = "shared_from_str", since = "1.62.0")]
4064impl From<Arc<str>> for Arc<[u8]> {
4065    /// Converts an atomically reference-counted string slice into a byte slice.
4066    ///
4067    /// # Example
4068    ///
4069    /// ```
4070    /// # use std::sync::Arc;
4071    /// let string: Arc<str> = Arc::from("eggplant");
4072    /// let bytes: Arc<[u8]> = Arc::from(string);
4073    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
4074    /// ```
4075    #[inline]
4076    fn from(rc: Arc<str>) -> Self {
4077        // SAFETY: `str` has the same layout as `[u8]`.
4078        unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
4079    }
4080}
4081
4082#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
4083impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
4084    type Error = Arc<[T], A>;
4085
4086    fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
4087        if boxed_slice.len() == N {
4088            let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
4089            Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
4090        } else {
4091            Err(boxed_slice)
4092        }
4093    }
4094}
4095
4096#[cfg(not(no_global_oom_handling))]
4097#[stable(feature = "shared_from_iter", since = "1.37.0")]
4098impl<T> FromIterator<T> for Arc<[T]> {
4099    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
4100    ///
4101    /// # Performance characteristics
4102    ///
4103    /// ## The general case
4104    ///
4105    /// In the general case, collecting into `Arc<[T]>` is done by first
4106    /// collecting into a `Vec<T>`. That is, when writing the following:
4107    ///
4108    /// ```rust
4109    /// # use std::sync::Arc;
4110    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
4111    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
4112    /// ```
4113    ///
4114    /// this behaves as if we wrote:
4115    ///
4116    /// ```rust
4117    /// # use std::sync::Arc;
4118    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
4119    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
4120    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
4121    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
4122    /// ```
4123    ///
4124    /// This will allocate as many times as needed for constructing the `Vec<T>`
4125    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
4126    ///
4127    /// ## Iterators of known length
4128    ///
4129    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
4130    /// a single allocation will be made for the `Arc<[T]>`. For example:
4131    ///
4132    /// ```rust
4133    /// # use std::sync::Arc;
4134    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
4135    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
4136    /// ```
4137    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
4138        ToArcSlice::to_arc_slice(iter.into_iter())
4139    }
4140}
4141
4142#[cfg(not(no_global_oom_handling))]
4143/// Specialization trait used for collecting into `Arc<[T]>`.
4144trait ToArcSlice<T>: Iterator<Item = T> + Sized {
4145    fn to_arc_slice(self) -> Arc<[T]>;
4146}
4147
4148#[cfg(not(no_global_oom_handling))]
4149impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
4150    default fn to_arc_slice(self) -> Arc<[T]> {
4151        self.collect::<Vec<T>>().into()
4152    }
4153}
4154
4155#[cfg(not(no_global_oom_handling))]
4156impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
4157    fn to_arc_slice(self) -> Arc<[T]> {
4158        // This is the case for a `TrustedLen` iterator.
4159        let (low, high) = self.size_hint();
4160        if let Some(high) = high {
4161            debug_assert_eq!(
4162                low,
4163                high,
4164                "TrustedLen iterator's size hint is not exact: {:?}",
4165                (low, high)
4166            );
4167
4168            unsafe {
4169                // SAFETY: We need to ensure that the iterator has an exact length and we have.
4170                Arc::from_iter_exact(self, low)
4171            }
4172        } else {
4173            // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
4174            // length exceeding `usize::MAX`.
4175            // The default implementation would collect into a vec which would panic.
4176            // Thus we panic here immediately without invoking `Vec` code.
4177            panic!("capacity overflow");
4178        }
4179    }
4180}
4181
4182#[stable(feature = "rust1", since = "1.0.0")]
4183impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
4184    fn borrow(&self) -> &T {
4185        &**self
4186    }
4187}
4188
4189#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4190impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4191    fn as_ref(&self) -> &T {
4192        &**self
4193    }
4194}
4195
4196#[stable(feature = "pin", since = "1.33.0")]
4197impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4198
4199/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4200///
4201/// # Safety
4202///
4203/// The pointer must point to (and have valid metadata for) a previously
4204/// valid instance of T, but the T is allowed to be dropped.
4205unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4206    // Align the unsized value to the end of the ArcInner.
4207    // Because ArcInner is repr(C), it will always be the last field in memory.
4208    // SAFETY: since the only unsized types possible are slices, trait objects,
4209    // and extern types, the input safety requirement is currently enough to
4210    // satisfy the requirements of align_of_val_raw; this is an implementation
4211    // detail of the language that must not be relied upon outside of std.
4212    unsafe { data_offset_align(align_of_val_raw(ptr)) }
4213}
4214
4215#[inline]
4216fn data_offset_align(align: usize) -> usize {
4217    let layout = Layout::new::<ArcInner<()>>();
4218    layout.size() + layout.padding_needed_for(align)
4219}
4220
4221/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4222/// but will deallocate it (without dropping the value) when dropped.
4223///
4224/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4225struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4226    ptr: NonNull<ArcInner<T>>,
4227    layout_for_value: Layout,
4228    alloc: Option<A>,
4229}
4230
4231impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4232    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4233    #[cfg(not(no_global_oom_handling))]
4234    fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4235        let layout = Layout::for_value(for_value);
4236        let ptr = unsafe {
4237            Arc::allocate_for_layout(
4238                layout,
4239                |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4240                |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4241            )
4242        };
4243        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4244    }
4245
4246    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it,
4247    /// returning an error if allocation fails.
4248    fn try_new(for_value: &T, alloc: A) -> Result<UniqueArcUninit<T, A>, AllocError> {
4249        let layout = Layout::for_value(for_value);
4250        let ptr = unsafe {
4251            Arc::try_allocate_for_layout(
4252                layout,
4253                |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4254                |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4255            )?
4256        };
4257        Ok(Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) })
4258    }
4259
4260    /// Returns the pointer to be written into to initialize the [`Arc`].
4261    fn data_ptr(&mut self) -> *mut T {
4262        let offset = data_offset_align(self.layout_for_value.align());
4263        unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4264    }
4265
4266    /// Upgrade this into a normal [`Arc`].
4267    ///
4268    /// # Safety
4269    ///
4270    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4271    unsafe fn into_arc(self) -> Arc<T, A> {
4272        let mut this = ManuallyDrop::new(self);
4273        let ptr = this.ptr.as_ptr();
4274        let alloc = this.alloc.take().unwrap();
4275
4276        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4277        // for having initialized the data.
4278        unsafe { Arc::from_ptr_in(ptr, alloc) }
4279    }
4280}
4281
4282#[cfg(not(no_global_oom_handling))]
4283impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4284    fn drop(&mut self) {
4285        // SAFETY:
4286        // * new() produced a pointer safe to deallocate.
4287        // * We own the pointer unless into_arc() was called, which forgets us.
4288        unsafe {
4289            self.alloc.take().unwrap().deallocate(
4290                self.ptr.cast(),
4291                arcinner_layout_for_value_layout(self.layout_for_value),
4292            );
4293        }
4294    }
4295}
4296
4297#[stable(feature = "arc_error", since = "1.52.0")]
4298impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4299    #[allow(deprecated)]
4300    fn cause(&self) -> Option<&dyn core::error::Error> {
4301        core::error::Error::cause(&**self)
4302    }
4303
4304    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4305        core::error::Error::source(&**self)
4306    }
4307
4308    fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4309        core::error::Error::provide(&**self, req);
4310    }
4311}
4312
4313/// A uniquely owned [`Arc`].
4314///
4315/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4316/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4317/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4318///
4319/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4320/// use case is to have an object be mutable during its initialization phase but then have it become
4321/// immutable and converted to a normal `Arc`.
4322///
4323/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4324///
4325/// ```
4326/// #![feature(unique_rc_arc)]
4327/// use std::sync::{Arc, Weak, UniqueArc};
4328///
4329/// struct Gadget {
4330///     me: Weak<Gadget>,
4331/// }
4332///
4333/// fn create_gadget() -> Option<Arc<Gadget>> {
4334///     let mut rc = UniqueArc::new(Gadget {
4335///         me: Weak::new(),
4336///     });
4337///     rc.me = UniqueArc::downgrade(&rc);
4338///     Some(UniqueArc::into_arc(rc))
4339/// }
4340///
4341/// create_gadget().unwrap();
4342/// ```
4343///
4344/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4345/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4346/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4347/// including fallible or async constructors.
4348#[unstable(feature = "unique_rc_arc", issue = "112566")]
4349pub struct UniqueArc<
4350    T: ?Sized,
4351    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4352> {
4353    ptr: NonNull<ArcInner<T>>,
4354    // Define the ownership of `ArcInner<T>` for drop-check
4355    _marker: PhantomData<ArcInner<T>>,
4356    // Invariance is necessary for soundness: once other `Weak`
4357    // references exist, we already have a form of shared mutability!
4358    _marker2: PhantomData<*mut T>,
4359    alloc: A,
4360}
4361
4362#[unstable(feature = "unique_rc_arc", issue = "112566")]
4363unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4364
4365#[unstable(feature = "unique_rc_arc", issue = "112566")]
4366unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4367
4368#[unstable(feature = "unique_rc_arc", issue = "112566")]
4369// #[unstable(feature = "coerce_unsized", issue = "18598")]
4370impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4371    for UniqueArc<T, A>
4372{
4373}
4374
4375//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4376#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4377impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4378
4379#[unstable(feature = "unique_rc_arc", issue = "112566")]
4380impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4381    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4382        fmt::Display::fmt(&**self, f)
4383    }
4384}
4385
4386#[unstable(feature = "unique_rc_arc", issue = "112566")]
4387impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4388    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4389        fmt::Debug::fmt(&**self, f)
4390    }
4391}
4392
4393#[unstable(feature = "unique_rc_arc", issue = "112566")]
4394impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4395    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4396        fmt::Pointer::fmt(&(&raw const **self), f)
4397    }
4398}
4399
4400#[unstable(feature = "unique_rc_arc", issue = "112566")]
4401impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4402    fn borrow(&self) -> &T {
4403        &**self
4404    }
4405}
4406
4407#[unstable(feature = "unique_rc_arc", issue = "112566")]
4408impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4409    fn borrow_mut(&mut self) -> &mut T {
4410        &mut **self
4411    }
4412}
4413
4414#[unstable(feature = "unique_rc_arc", issue = "112566")]
4415impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4416    fn as_ref(&self) -> &T {
4417        &**self
4418    }
4419}
4420
4421#[unstable(feature = "unique_rc_arc", issue = "112566")]
4422impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4423    fn as_mut(&mut self) -> &mut T {
4424        &mut **self
4425    }
4426}
4427
4428#[unstable(feature = "unique_rc_arc", issue = "112566")]
4429impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4430
4431#[unstable(feature = "unique_rc_arc", issue = "112566")]
4432impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4433    /// Equality for two `UniqueArc`s.
4434    ///
4435    /// Two `UniqueArc`s are equal if their inner values are equal.
4436    ///
4437    /// # Examples
4438    ///
4439    /// ```
4440    /// #![feature(unique_rc_arc)]
4441    /// use std::sync::UniqueArc;
4442    ///
4443    /// let five = UniqueArc::new(5);
4444    ///
4445    /// assert!(five == UniqueArc::new(5));
4446    /// ```
4447    #[inline]
4448    fn eq(&self, other: &Self) -> bool {
4449        PartialEq::eq(&**self, &**other)
4450    }
4451}
4452
4453#[unstable(feature = "unique_rc_arc", issue = "112566")]
4454impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4455    /// Partial comparison for two `UniqueArc`s.
4456    ///
4457    /// The two are compared by calling `partial_cmp()` on their inner values.
4458    ///
4459    /// # Examples
4460    ///
4461    /// ```
4462    /// #![feature(unique_rc_arc)]
4463    /// use std::sync::UniqueArc;
4464    /// use std::cmp::Ordering;
4465    ///
4466    /// let five = UniqueArc::new(5);
4467    ///
4468    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4469    /// ```
4470    #[inline(always)]
4471    fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4472        (**self).partial_cmp(&**other)
4473    }
4474
4475    /// Less-than comparison for two `UniqueArc`s.
4476    ///
4477    /// The two are compared by calling `<` on their inner values.
4478    ///
4479    /// # Examples
4480    ///
4481    /// ```
4482    /// #![feature(unique_rc_arc)]
4483    /// use std::sync::UniqueArc;
4484    ///
4485    /// let five = UniqueArc::new(5);
4486    ///
4487    /// assert!(five < UniqueArc::new(6));
4488    /// ```
4489    #[inline(always)]
4490    fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4491        **self < **other
4492    }
4493
4494    /// 'Less than or equal to' comparison for two `UniqueArc`s.
4495    ///
4496    /// The two are compared by calling `<=` on their inner values.
4497    ///
4498    /// # Examples
4499    ///
4500    /// ```
4501    /// #![feature(unique_rc_arc)]
4502    /// use std::sync::UniqueArc;
4503    ///
4504    /// let five = UniqueArc::new(5);
4505    ///
4506    /// assert!(five <= UniqueArc::new(5));
4507    /// ```
4508    #[inline(always)]
4509    fn le(&self, other: &UniqueArc<T, A>) -> bool {
4510        **self <= **other
4511    }
4512
4513    /// Greater-than comparison for two `UniqueArc`s.
4514    ///
4515    /// The two are compared by calling `>` on their inner values.
4516    ///
4517    /// # Examples
4518    ///
4519    /// ```
4520    /// #![feature(unique_rc_arc)]
4521    /// use std::sync::UniqueArc;
4522    ///
4523    /// let five = UniqueArc::new(5);
4524    ///
4525    /// assert!(five > UniqueArc::new(4));
4526    /// ```
4527    #[inline(always)]
4528    fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4529        **self > **other
4530    }
4531
4532    /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4533    ///
4534    /// The two are compared by calling `>=` on their inner values.
4535    ///
4536    /// # Examples
4537    ///
4538    /// ```
4539    /// #![feature(unique_rc_arc)]
4540    /// use std::sync::UniqueArc;
4541    ///
4542    /// let five = UniqueArc::new(5);
4543    ///
4544    /// assert!(five >= UniqueArc::new(5));
4545    /// ```
4546    #[inline(always)]
4547    fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4548        **self >= **other
4549    }
4550}
4551
4552#[unstable(feature = "unique_rc_arc", issue = "112566")]
4553impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4554    /// Comparison for two `UniqueArc`s.
4555    ///
4556    /// The two are compared by calling `cmp()` on their inner values.
4557    ///
4558    /// # Examples
4559    ///
4560    /// ```
4561    /// #![feature(unique_rc_arc)]
4562    /// use std::sync::UniqueArc;
4563    /// use std::cmp::Ordering;
4564    ///
4565    /// let five = UniqueArc::new(5);
4566    ///
4567    /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4568    /// ```
4569    #[inline]
4570    fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4571        (**self).cmp(&**other)
4572    }
4573}
4574
4575#[unstable(feature = "unique_rc_arc", issue = "112566")]
4576impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4577
4578#[unstable(feature = "unique_rc_arc", issue = "112566")]
4579impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4580    fn hash<H: Hasher>(&self, state: &mut H) {
4581        (**self).hash(state);
4582    }
4583}
4584
4585impl<T> UniqueArc<T, Global> {
4586    /// Creates a new `UniqueArc`.
4587    ///
4588    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4589    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4590    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4591    /// point to the new [`Arc`].
4592    #[cfg(not(no_global_oom_handling))]
4593    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4594    #[must_use]
4595    pub fn new(value: T) -> Self {
4596        Self::new_in(value, Global)
4597    }
4598
4599    /// Maps the value in a `UniqueArc`, reusing the allocation if possible.
4600    ///
4601    /// `f` is called on a reference to the value in the `UniqueArc`, and the result is returned,
4602    /// also in a `UniqueArc`.
4603    ///
4604    /// Note: this is an associated function, which means that you have
4605    /// to call it as `UniqueArc::map(u, f)` instead of `u.map(f)`. This
4606    /// is so that there is no conflict with a method on the inner type.
4607    ///
4608    /// # Examples
4609    ///
4610    /// ```
4611    /// #![feature(smart_pointer_try_map)]
4612    /// #![feature(unique_rc_arc)]
4613    ///
4614    /// use std::sync::UniqueArc;
4615    ///
4616    /// let r = UniqueArc::new(7);
4617    /// let new = UniqueArc::map(r, |i| i + 7);
4618    /// assert_eq!(*new, 14);
4619    /// ```
4620    #[cfg(not(no_global_oom_handling))]
4621    #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
4622    pub fn map<U>(this: Self, f: impl FnOnce(T) -> U) -> UniqueArc<U> {
4623        if size_of::<T>() == size_of::<U>()
4624            && align_of::<T>() == align_of::<U>()
4625            && UniqueArc::weak_count(&this) == 0
4626        {
4627            unsafe {
4628                let ptr = UniqueArc::into_raw(this);
4629                let value = ptr.read();
4630                let mut allocation = UniqueArc::from_raw(ptr.cast::<mem::MaybeUninit<U>>());
4631
4632                allocation.write(f(value));
4633                allocation.assume_init()
4634            }
4635        } else {
4636            UniqueArc::new(f(UniqueArc::unwrap(this)))
4637        }
4638    }
4639
4640    /// Attempts to map the value in a `UniqueArc`, reusing the allocation if possible.
4641    ///
4642    /// `f` is called on a reference to the value in the `UniqueArc`, and if the operation succeeds,
4643    /// the result is returned, also in a `UniqueArc`.
4644    ///
4645    /// Note: this is an associated function, which means that you have
4646    /// to call it as `UniqueArc::try_map(u, f)` instead of `u.try_map(f)`. This
4647    /// is so that there is no conflict with a method on the inner type.
4648    ///
4649    /// # Examples
4650    ///
4651    /// ```
4652    /// #![feature(smart_pointer_try_map)]
4653    /// #![feature(unique_rc_arc)]
4654    ///
4655    /// use std::sync::UniqueArc;
4656    ///
4657    /// let b = UniqueArc::new(7);
4658    /// let new = UniqueArc::try_map(b, u32::try_from).unwrap();
4659    /// assert_eq!(*new, 7);
4660    /// ```
4661    #[cfg(not(no_global_oom_handling))]
4662    #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
4663    pub fn try_map<R>(
4664        this: Self,
4665        f: impl FnOnce(T) -> R,
4666    ) -> <R::Residual as Residual<UniqueArc<R::Output>>>::TryType
4667    where
4668        R: Try,
4669        R::Residual: Residual<UniqueArc<R::Output>>,
4670    {
4671        if size_of::<T>() == size_of::<R::Output>()
4672            && align_of::<T>() == align_of::<R::Output>()
4673            && UniqueArc::weak_count(&this) == 0
4674        {
4675            unsafe {
4676                let ptr = UniqueArc::into_raw(this);
4677                let value = ptr.read();
4678                let mut allocation = UniqueArc::from_raw(ptr.cast::<mem::MaybeUninit<R::Output>>());
4679
4680                allocation.write(f(value)?);
4681                try { allocation.assume_init() }
4682            }
4683        } else {
4684            try { UniqueArc::new(f(UniqueArc::unwrap(this))?) }
4685        }
4686    }
4687
4688    #[cfg(not(no_global_oom_handling))]
4689    fn unwrap(this: Self) -> T {
4690        let this = ManuallyDrop::new(this);
4691        let val: T = unsafe { ptr::read(&**this) };
4692
4693        let _weak = Weak { ptr: this.ptr, alloc: Global };
4694
4695        val
4696    }
4697}
4698
4699impl<T: ?Sized> UniqueArc<T> {
4700    #[cfg(not(no_global_oom_handling))]
4701    unsafe fn from_raw(ptr: *const T) -> Self {
4702        let offset = unsafe { data_offset(ptr) };
4703
4704        // Reverse the offset to find the original ArcInner.
4705        let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> };
4706
4707        Self {
4708            ptr: unsafe { NonNull::new_unchecked(rc_ptr) },
4709            _marker: PhantomData,
4710            _marker2: PhantomData,
4711            alloc: Global,
4712        }
4713    }
4714
4715    #[cfg(not(no_global_oom_handling))]
4716    fn into_raw(this: Self) -> *const T {
4717        let this = ManuallyDrop::new(this);
4718        Self::as_ptr(&*this)
4719    }
4720}
4721
4722impl<T, A: Allocator> UniqueArc<T, A> {
4723    /// Creates a new `UniqueArc` in the provided allocator.
4724    ///
4725    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4726    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4727    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4728    /// point to the new [`Arc`].
4729    #[cfg(not(no_global_oom_handling))]
4730    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4731    #[must_use]
4732    // #[unstable(feature = "allocator_api", issue = "32838")]
4733    pub fn new_in(data: T, alloc: A) -> Self {
4734        let (ptr, alloc) = Box::into_unique(Box::new_in(
4735            ArcInner {
4736                strong: atomic::AtomicUsize::new(0),
4737                // keep one weak reference so if all the weak pointers that are created are dropped
4738                // the UniqueArc still stays valid.
4739                weak: atomic::AtomicUsize::new(1),
4740                data,
4741            },
4742            alloc,
4743        ));
4744        Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4745    }
4746}
4747
4748impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4749    /// Converts the `UniqueArc` into a regular [`Arc`].
4750    ///
4751    /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4752    /// is passed to `into_arc`.
4753    ///
4754    /// Any weak references created before this method is called can now be upgraded to strong
4755    /// references.
4756    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4757    #[must_use]
4758    pub fn into_arc(this: Self) -> Arc<T, A> {
4759        let this = ManuallyDrop::new(this);
4760
4761        // Move the allocator out.
4762        // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4763        // a `ManuallyDrop`.
4764        let alloc: A = unsafe { ptr::read(&this.alloc) };
4765
4766        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4767        unsafe {
4768            // Convert our weak reference into a strong reference
4769            (*this.ptr.as_ptr()).strong.store(1, Release);
4770            Arc::from_inner_in(this.ptr, alloc)
4771        }
4772    }
4773
4774    #[cfg(not(no_global_oom_handling))]
4775    fn weak_count(this: &Self) -> usize {
4776        this.inner().weak.load(Acquire) - 1
4777    }
4778
4779    #[cfg(not(no_global_oom_handling))]
4780    fn inner(&self) -> &ArcInner<T> {
4781        // SAFETY: while this UniqueArc is alive we're guaranteed that the inner pointer is valid.
4782        unsafe { self.ptr.as_ref() }
4783    }
4784
4785    #[cfg(not(no_global_oom_handling))]
4786    fn as_ptr(this: &Self) -> *const T {
4787        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
4788
4789        // SAFETY: This cannot go through Deref::deref or UniqueArc::inner because
4790        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
4791        // write through the pointer after the Rc is recovered through `from_raw`.
4792        unsafe { &raw mut (*ptr).data }
4793    }
4794
4795    #[inline]
4796    #[cfg(not(no_global_oom_handling))]
4797    fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
4798        let this = mem::ManuallyDrop::new(this);
4799        (this.ptr, unsafe { ptr::read(&this.alloc) })
4800    }
4801
4802    #[inline]
4803    #[cfg(not(no_global_oom_handling))]
4804    unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
4805        Self { ptr, _marker: PhantomData, _marker2: PhantomData, alloc }
4806    }
4807}
4808
4809impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4810    /// Creates a new weak reference to the `UniqueArc`.
4811    ///
4812    /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4813    /// to a [`Arc`] using [`UniqueArc::into_arc`].
4814    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4815    #[must_use]
4816    pub fn downgrade(this: &Self) -> Weak<T, A> {
4817        // Using a relaxed ordering is alright here, as knowledge of the
4818        // original reference prevents other threads from erroneously deleting
4819        // the object or converting the object to a normal `Arc<T, A>`.
4820        //
4821        // Note that we don't need to test if the weak counter is locked because there
4822        // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4823        // the weak counter.
4824        //
4825        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4826        let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4827
4828        // See comments in Arc::clone() for why we do this (for mem::forget).
4829        if old_size > MAX_REFCOUNT {
4830            abort();
4831        }
4832
4833        Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4834    }
4835}
4836
4837#[cfg(not(no_global_oom_handling))]
4838impl<T, A: Allocator> UniqueArc<mem::MaybeUninit<T>, A> {
4839    unsafe fn assume_init(self) -> UniqueArc<T, A> {
4840        let (ptr, alloc) = UniqueArc::into_inner_with_allocator(self);
4841        unsafe { UniqueArc::from_inner_in(ptr.cast(), alloc) }
4842    }
4843}
4844
4845#[unstable(feature = "unique_rc_arc", issue = "112566")]
4846impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4847    type Target = T;
4848
4849    fn deref(&self) -> &T {
4850        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4851        unsafe { &self.ptr.as_ref().data }
4852    }
4853}
4854
4855// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4856#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4857unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4858
4859#[unstable(feature = "unique_rc_arc", issue = "112566")]
4860impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4861    fn deref_mut(&mut self) -> &mut T {
4862        // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4863        // have unique ownership and therefore it's safe to make a mutable reference because
4864        // `UniqueArc` owns the only strong reference to itself.
4865        // We also need to be careful to only create a mutable reference to the `data` field,
4866        // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4867        // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4868        unsafe { &mut (*self.ptr.as_ptr()).data }
4869    }
4870}
4871
4872#[unstable(feature = "unique_rc_arc", issue = "112566")]
4873// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4874unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4875
4876#[unstable(feature = "unique_rc_arc", issue = "112566")]
4877unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4878    fn drop(&mut self) {
4879        // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4880        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4881        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4882
4883        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4884    }
4885}
4886
4887#[unstable(feature = "allocator_api", issue = "32838")]
4888unsafe impl<T: ?Sized + Allocator, A: Allocator> Allocator for Arc<T, A> {
4889    #[inline]
4890    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
4891        (**self).allocate(layout)
4892    }
4893
4894    #[inline]
4895    fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
4896        (**self).allocate_zeroed(layout)
4897    }
4898
4899    #[inline]
4900    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4901        // SAFETY: the safety contract must be upheld by the caller
4902        unsafe { (**self).deallocate(ptr, layout) }
4903    }
4904
4905    #[inline]
4906    unsafe fn grow(
4907        &self,
4908        ptr: NonNull<u8>,
4909        old_layout: Layout,
4910        new_layout: Layout,
4911    ) -> Result<NonNull<[u8]>, AllocError> {
4912        // SAFETY: the safety contract must be upheld by the caller
4913        unsafe { (**self).grow(ptr, old_layout, new_layout) }
4914    }
4915
4916    #[inline]
4917    unsafe fn grow_zeroed(
4918        &self,
4919        ptr: NonNull<u8>,
4920        old_layout: Layout,
4921        new_layout: Layout,
4922    ) -> Result<NonNull<[u8]>, AllocError> {
4923        // SAFETY: the safety contract must be upheld by the caller
4924        unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
4925    }
4926
4927    #[inline]
4928    unsafe fn shrink(
4929        &self,
4930        ptr: NonNull<u8>,
4931        old_layout: Layout,
4932        new_layout: Layout,
4933    ) -> Result<NonNull<[u8]>, AllocError> {
4934        // SAFETY: the safety contract must be upheld by the caller
4935        unsafe { (**self).shrink(ptr, old_layout, new_layout) }
4936    }
4937}