rustc_arena/
lib.rs

1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(
16    html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
17    test(no_crate_inject, attr(deny(warnings)))
18)]
19#![doc(rust_logo)]
20#![feature(core_intrinsics)]
21#![feature(decl_macro)]
22#![feature(dropck_eyepatch)]
23#![feature(maybe_uninit_slice)]
24#![feature(rustc_attrs)]
25#![feature(rustdoc_internals)]
26#![warn(unreachable_pub)]
27// tidy-alphabetical-end
28
29use std::alloc::Layout;
30use std::cell::{Cell, RefCell};
31use std::marker::PhantomData;
32use std::mem::{self, MaybeUninit};
33use std::ptr::{self, NonNull};
34use std::{cmp, intrinsics, slice};
35
36use smallvec::SmallVec;
37
38/// This calls the passed function while ensuring it won't be inlined into the caller.
39#[inline(never)]
40#[cold]
41fn outline<F: FnOnce() -> R, R>(f: F) -> R {
42    f()
43}
44
45struct ArenaChunk<T = u8> {
46    /// The raw storage for the arena chunk.
47    storage: NonNull<[MaybeUninit<T>]>,
48    /// The number of valid entries in the chunk.
49    entries: usize,
50}
51
52unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
53    fn drop(&mut self) {
54        unsafe { drop(Box::from_raw(self.storage.as_mut())) }
55    }
56}
57
58impl<T> ArenaChunk<T> {
59    #[inline]
60    unsafe fn new(capacity: usize) -> ArenaChunk<T> {
61        ArenaChunk {
62            storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
63            entries: 0,
64        }
65    }
66
67    /// Destroys this arena chunk.
68    ///
69    /// # Safety
70    ///
71    /// The caller must ensure that `len` elements of this chunk have been initialized.
72    #[inline]
73    unsafe fn destroy(&mut self, len: usize) {
74        // The branch on needs_drop() is an -O1 performance optimization.
75        // Without the branch, dropping TypedArena<T> takes linear time.
76        if mem::needs_drop::<T>() {
77            // SAFETY: The caller must ensure that `len` elements of this chunk have
78            // been initialized.
79            unsafe {
80                let slice = self.storage.as_mut();
81                slice[..len].assume_init_drop();
82            }
83        }
84    }
85
86    // Returns a pointer to the first allocated object.
87    #[inline]
88    fn start(&mut self) -> *mut T {
89        self.storage.as_ptr() as *mut T
90    }
91
92    // Returns a pointer to the end of the allocated space.
93    #[inline]
94    fn end(&mut self) -> *mut T {
95        unsafe {
96            if mem::size_of::<T>() == 0 {
97                // A pointer as large as possible for zero-sized elements.
98                ptr::without_provenance_mut(!0)
99            } else {
100                self.start().add(self.storage.len())
101            }
102        }
103    }
104}
105
106// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
107// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
108// we stop growing. This scales well, from arenas that are barely used up to
109// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
110// the usual sizes of pages and huge pages on Linux.
111const PAGE: usize = 4096;
112const HUGE_PAGE: usize = 2 * 1024 * 1024;
113
114/// An arena that can hold objects of only one type.
115pub struct TypedArena<T> {
116    /// A pointer to the next object to be allocated.
117    ptr: Cell<*mut T>,
118
119    /// A pointer to the end of the allocated area. When this pointer is
120    /// reached, a new chunk is allocated.
121    end: Cell<*mut T>,
122
123    /// A vector of arena chunks.
124    chunks: RefCell<Vec<ArenaChunk<T>>>,
125
126    /// Marker indicating that dropping the arena causes its owned
127    /// instances of `T` to be dropped.
128    _own: PhantomData<T>,
129}
130
131impl<T> Default for TypedArena<T> {
132    /// Creates a new `TypedArena`.
133    fn default() -> TypedArena<T> {
134        TypedArena {
135            // We set both `ptr` and `end` to 0 so that the first call to
136            // alloc() will trigger a grow().
137            ptr: Cell::new(ptr::null_mut()),
138            end: Cell::new(ptr::null_mut()),
139            chunks: Default::default(),
140            _own: PhantomData,
141        }
142    }
143}
144
145impl<T> TypedArena<T> {
146    /// Allocates an object in the `TypedArena`, returning a reference to it.
147    #[inline]
148    pub fn alloc(&self, object: T) -> &mut T {
149        if self.ptr == self.end {
150            self.grow(1)
151        }
152
153        unsafe {
154            if mem::size_of::<T>() == 0 {
155                self.ptr.set(self.ptr.get().wrapping_byte_add(1));
156                let ptr = ptr::NonNull::<T>::dangling().as_ptr();
157                // Don't drop the object. This `write` is equivalent to `forget`.
158                ptr::write(ptr, object);
159                &mut *ptr
160            } else {
161                let ptr = self.ptr.get();
162                // Advance the pointer.
163                self.ptr.set(self.ptr.get().add(1));
164                // Write into uninitialized memory.
165                ptr::write(ptr, object);
166                &mut *ptr
167            }
168        }
169    }
170
171    #[inline]
172    fn can_allocate(&self, additional: usize) -> bool {
173        // FIXME: this should *likely* use `offset_from`, but more
174        // investigation is needed (including running tests in miri).
175        let available_bytes = self.end.get().addr() - self.ptr.get().addr();
176        let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
177        available_bytes >= additional_bytes
178    }
179
180    #[inline]
181    fn alloc_raw_slice(&self, len: usize) -> *mut T {
182        assert!(mem::size_of::<T>() != 0);
183        assert!(len != 0);
184
185        // Ensure the current chunk can fit `len` objects.
186        if !self.can_allocate(len) {
187            self.grow(len);
188            debug_assert!(self.can_allocate(len));
189        }
190
191        let start_ptr = self.ptr.get();
192        // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
193        // `len` elements.
194        unsafe { self.ptr.set(start_ptr.add(len)) };
195        start_ptr
196    }
197
198    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
199    ///
200    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
201    /// storing the elements in the arena.
202    #[inline]
203    pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
204        // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
205        // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
206        // reference to `self` and adding elements to the arena during iteration.
207        //
208        // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
209        // have to track that some uninitialized elements are followed by some initialized elements,
210        // else we might accidentally drop uninitialized memory if something panics or if the
211        // iterator doesn't fill all the length we expected.
212        //
213        // So we collect all the elements beforehand, which takes care of reentrancy and panic
214        // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
215        // doesn't need to be hyper-optimized.
216        assert!(mem::size_of::<T>() != 0);
217
218        let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
219        if vec.is_empty() {
220            return &mut [];
221        }
222        // Move the content to the arena by copying and then forgetting it.
223        let len = vec.len();
224        let start_ptr = self.alloc_raw_slice(len);
225        unsafe {
226            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
227            vec.set_len(0);
228            slice::from_raw_parts_mut(start_ptr, len)
229        }
230    }
231
232    /// Grows the arena.
233    #[inline(never)]
234    #[cold]
235    fn grow(&self, additional: usize) {
236        unsafe {
237            // We need the element size to convert chunk sizes (ranging from
238            // PAGE to HUGE_PAGE bytes) to element counts.
239            let elem_size = cmp::max(1, mem::size_of::<T>());
240            let mut chunks = self.chunks.borrow_mut();
241            let mut new_cap;
242            if let Some(last_chunk) = chunks.last_mut() {
243                // If a type is `!needs_drop`, we don't need to keep track of how many elements
244                // the chunk stores - the field will be ignored anyway.
245                if mem::needs_drop::<T>() {
246                    // FIXME: this should *likely* use `offset_from`, but more
247                    // investigation is needed (including running tests in miri).
248                    let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
249                    last_chunk.entries = used_bytes / mem::size_of::<T>();
250                }
251
252                // If the previous chunk's len is less than HUGE_PAGE
253                // bytes, then this chunk will be least double the previous
254                // chunk's size.
255                new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
256                new_cap *= 2;
257            } else {
258                new_cap = PAGE / elem_size;
259            }
260            // Also ensure that this chunk can fit `additional`.
261            new_cap = cmp::max(additional, new_cap);
262
263            let mut chunk = ArenaChunk::<T>::new(new_cap);
264            self.ptr.set(chunk.start());
265            self.end.set(chunk.end());
266            chunks.push(chunk);
267        }
268    }
269
270    // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
271    // chunks.
272    fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
273        // Determine how much was filled.
274        let start = last_chunk.start().addr();
275        // We obtain the value of the pointer to the first uninitialized element.
276        let end = self.ptr.get().addr();
277        // We then calculate the number of elements to be dropped in the last chunk,
278        // which is the filled area's length.
279        let diff = if mem::size_of::<T>() == 0 {
280            // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
281            // the number of zero-sized values in the last and only chunk, just out of caution.
282            // Recall that `end` was incremented for each allocated value.
283            end - start
284        } else {
285            // FIXME: this should *likely* use `offset_from`, but more
286            // investigation is needed (including running tests in miri).
287            (end - start) / mem::size_of::<T>()
288        };
289        // Pass that to the `destroy` method.
290        unsafe {
291            last_chunk.destroy(diff);
292        }
293        // Reset the chunk.
294        self.ptr.set(last_chunk.start());
295    }
296}
297
298unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
299    fn drop(&mut self) {
300        unsafe {
301            // Determine how much was filled.
302            let mut chunks_borrow = self.chunks.borrow_mut();
303            if let Some(mut last_chunk) = chunks_borrow.pop() {
304                // Drop the contents of the last chunk.
305                self.clear_last_chunk(&mut last_chunk);
306                // The last chunk will be dropped. Destroy all other chunks.
307                for chunk in chunks_borrow.iter_mut() {
308                    chunk.destroy(chunk.entries);
309                }
310            }
311            // Box handles deallocation of `last_chunk` and `self.chunks`.
312        }
313    }
314}
315
316unsafe impl<T: Send> Send for TypedArena<T> {}
317
318#[inline(always)]
319fn align_down(val: usize, align: usize) -> usize {
320    debug_assert!(align.is_power_of_two());
321    val & !(align - 1)
322}
323
324#[inline(always)]
325fn align_up(val: usize, align: usize) -> usize {
326    debug_assert!(align.is_power_of_two());
327    (val + align - 1) & !(align - 1)
328}
329
330// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
331// to optimize away alignment code.
332const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
333
334/// An arena that can hold objects of multiple different types that impl `Copy`
335/// and/or satisfy `!mem::needs_drop`.
336pub struct DroplessArena {
337    /// A pointer to the start of the free space.
338    start: Cell<*mut u8>,
339
340    /// A pointer to the end of free space.
341    ///
342    /// The allocation proceeds downwards from the end of the chunk towards the
343    /// start. (This is slightly simpler and faster than allocating upwards,
344    /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
345    /// When this pointer crosses the start pointer, a new chunk is allocated.
346    ///
347    /// This is kept aligned to DROPLESS_ALIGNMENT.
348    end: Cell<*mut u8>,
349
350    /// A vector of arena chunks.
351    chunks: RefCell<Vec<ArenaChunk>>,
352}
353
354unsafe impl Send for DroplessArena {}
355
356impl Default for DroplessArena {
357    #[inline]
358    fn default() -> DroplessArena {
359        DroplessArena {
360            // We set both `start` and `end` to 0 so that the first call to
361            // alloc() will trigger a grow().
362            start: Cell::new(ptr::null_mut()),
363            end: Cell::new(ptr::null_mut()),
364            chunks: Default::default(),
365        }
366    }
367}
368
369impl DroplessArena {
370    #[inline(never)]
371    #[cold]
372    fn grow(&self, layout: Layout) {
373        // Add some padding so we can align `self.end` while
374        // still fitting in a `layout` allocation.
375        let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
376
377        unsafe {
378            let mut chunks = self.chunks.borrow_mut();
379            let mut new_cap;
380            if let Some(last_chunk) = chunks.last_mut() {
381                // There is no need to update `last_chunk.entries` because that
382                // field isn't used by `DroplessArena`.
383
384                // If the previous chunk's len is less than HUGE_PAGE
385                // bytes, then this chunk will be least double the previous
386                // chunk's size.
387                new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
388                new_cap *= 2;
389            } else {
390                new_cap = PAGE;
391            }
392            // Also ensure that this chunk can fit `additional`.
393            new_cap = cmp::max(additional, new_cap);
394
395            let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
396            self.start.set(chunk.start());
397
398            // Align the end to DROPLESS_ALIGNMENT.
399            let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
400
401            // Make sure we don't go past `start`. This should not happen since the allocation
402            // should be at least DROPLESS_ALIGNMENT - 1 bytes.
403            debug_assert!(chunk.start().addr() <= end);
404
405            self.end.set(chunk.end().with_addr(end));
406
407            chunks.push(chunk);
408        }
409    }
410
411    #[inline]
412    pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
413        assert!(layout.size() != 0);
414
415        // This loop executes once or twice: if allocation fails the first
416        // time, the `grow` ensures it will succeed the second time.
417        loop {
418            let start = self.start.get().addr();
419            let old_end = self.end.get();
420            let end = old_end.addr();
421
422            // Align allocated bytes so that `self.end` stays aligned to
423            // DROPLESS_ALIGNMENT.
424            let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
425
426            // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
427            unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
428
429            if let Some(sub) = end.checked_sub(bytes) {
430                let new_end = align_down(sub, layout.align());
431                if start <= new_end {
432                    let new_end = old_end.with_addr(new_end);
433                    // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
434                    // preserves alignment as both `end` and `bytes` are already
435                    // aligned to DROPLESS_ALIGNMENT.
436                    self.end.set(new_end);
437                    return new_end;
438                }
439            }
440
441            // No free space left. Allocate a new chunk to satisfy the request.
442            // On failure the grow will panic or abort.
443            self.grow(layout);
444        }
445    }
446
447    #[inline]
448    pub fn alloc<T>(&self, object: T) -> &mut T {
449        assert!(!mem::needs_drop::<T>());
450        assert!(mem::size_of::<T>() != 0);
451
452        let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
453
454        unsafe {
455            // Write into uninitialized memory.
456            ptr::write(mem, object);
457            &mut *mem
458        }
459    }
460
461    /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
462    /// reference to it. Will panic if passed a zero-sized type.
463    ///
464    /// Panics:
465    ///
466    ///  - Zero-sized types
467    ///  - Zero-length slices
468    #[inline]
469    pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
470    where
471        T: Copy,
472    {
473        assert!(!mem::needs_drop::<T>());
474        assert!(mem::size_of::<T>() != 0);
475        assert!(!slice.is_empty());
476
477        let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
478
479        unsafe {
480            mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
481            slice::from_raw_parts_mut(mem, slice.len())
482        }
483    }
484
485    /// Used by `Lift` to check whether this slice is allocated
486    /// in this arena.
487    #[inline]
488    pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
489        for chunk in self.chunks.borrow_mut().iter_mut() {
490            let ptr = slice.as_ptr().cast::<u8>().cast_mut();
491            if chunk.start() <= ptr && chunk.end() >= ptr {
492                return true;
493            }
494        }
495        false
496    }
497
498    /// Allocates a string slice that is copied into the `DroplessArena`, returning a
499    /// reference to it. Will panic if passed an empty string.
500    ///
501    /// Panics:
502    ///
503    ///  - Zero-length string
504    #[inline]
505    pub fn alloc_str(&self, string: &str) -> &str {
506        let slice = self.alloc_slice(string.as_bytes());
507
508        // SAFETY: the result has a copy of the same valid UTF-8 bytes.
509        unsafe { std::str::from_utf8_unchecked(slice) }
510    }
511
512    /// # Safety
513    ///
514    /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
515    /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
516    /// if `iter.next()` allocates onto `self`.
517    #[inline]
518    unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
519        &self,
520        mut iter: I,
521        len: usize,
522        mem: *mut T,
523    ) -> &mut [T] {
524        let mut i = 0;
525        // Use a manual loop since LLVM manages to optimize it better for
526        // slice iterators
527        loop {
528            // SAFETY: The caller must ensure that `mem` is valid for writes up to
529            // `size_of::<T>() * len`.
530            unsafe {
531                match iter.next() {
532                    Some(value) if i < len => mem.add(i).write(value),
533                    Some(_) | None => {
534                        // We only return as many items as the iterator gave us, even
535                        // though it was supposed to give us `len`
536                        return slice::from_raw_parts_mut(mem, i);
537                    }
538                }
539            }
540            i += 1;
541        }
542    }
543
544    #[inline]
545    pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
546        // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
547        // allocate additional elements while we're iterating.
548        let iter = iter.into_iter();
549        assert!(mem::size_of::<T>() != 0);
550        assert!(!mem::needs_drop::<T>());
551
552        let size_hint = iter.size_hint();
553
554        match size_hint {
555            (min, Some(max)) if min == max => {
556                // We know the exact number of elements the iterator expects to produce here.
557                let len = min;
558
559                if len == 0 {
560                    return &mut [];
561                }
562
563                let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
564                // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
565                // reserved. If the iterator panics or doesn't output `len` elements, this will
566                // leave some unallocated slots in the arena, which is fine because we do not call
567                // `drop`.
568                unsafe { self.write_from_iter(iter, len, mem) }
569            }
570            (_, _) => {
571                outline(move || -> &mut [T] {
572                    // Takes care of reentrancy.
573                    let mut vec: SmallVec<[_; 8]> = iter.collect();
574                    if vec.is_empty() {
575                        return &mut [];
576                    }
577                    // Move the content to the arena by copying it and then forgetting
578                    // the content of the SmallVec
579                    unsafe {
580                        let len = vec.len();
581                        let start_ptr =
582                            self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
583                        vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
584                        vec.set_len(0);
585                        slice::from_raw_parts_mut(start_ptr, len)
586                    }
587                })
588            }
589        }
590    }
591}
592
593/// Declare an `Arena` containing one dropless arena and many typed arenas (the
594/// types of the typed arenas are specified by the arguments).
595///
596/// There are three cases of interest.
597/// - Types that are `Copy`: these need not be specified in the arguments. They
598///   will use the `DroplessArena`.
599/// - Types that are `!Copy` and `!Drop`: these must be specified in the
600///   arguments. An empty `TypedArena` will be created for each one, but the
601///   `DroplessArena` will always be used and the `TypedArena` will stay empty.
602///   This is odd but harmless, because an empty arena allocates no memory.
603/// - Types that are `!Copy` and `Drop`: these must be specified in the
604///   arguments. The `TypedArena` will be used for them.
605///
606#[rustc_macro_transparency = "semitransparent"]
607pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
608    #[derive(Default)]
609    pub struct Arena<'tcx> {
610        pub dropless: $crate::DroplessArena,
611        $($name: $crate::TypedArena<$ty>,)*
612    }
613
614    pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
615        #[allow(clippy::mut_from_ref)]
616        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
617        #[allow(clippy::mut_from_ref)]
618        fn allocate_from_iter(
619            arena: &'tcx Arena<'tcx>,
620            iter: impl ::std::iter::IntoIterator<Item = Self>,
621        ) -> &'tcx mut [Self];
622    }
623
624    // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
625    impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
626        #[inline]
627        #[allow(clippy::mut_from_ref)]
628        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
629            arena.dropless.alloc(self)
630        }
631        #[inline]
632        #[allow(clippy::mut_from_ref)]
633        fn allocate_from_iter(
634            arena: &'tcx Arena<'tcx>,
635            iter: impl ::std::iter::IntoIterator<Item = Self>,
636        ) -> &'tcx mut [Self] {
637            arena.dropless.alloc_from_iter(iter)
638        }
639    }
640    $(
641        impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
642            #[inline]
643            fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
644                if !::std::mem::needs_drop::<Self>() {
645                    arena.dropless.alloc(self)
646                } else {
647                    arena.$name.alloc(self)
648                }
649            }
650
651            #[inline]
652            #[allow(clippy::mut_from_ref)]
653            fn allocate_from_iter(
654                arena: &'tcx Arena<'tcx>,
655                iter: impl ::std::iter::IntoIterator<Item = Self>,
656            ) -> &'tcx mut [Self] {
657                if !::std::mem::needs_drop::<Self>() {
658                    arena.dropless.alloc_from_iter(iter)
659                } else {
660                    arena.$name.alloc_from_iter(iter)
661                }
662            }
663        }
664    )*
665
666    impl<'tcx> Arena<'tcx> {
667        #[inline]
668        #[allow(clippy::mut_from_ref)]
669        pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
670            value.allocate_on(self)
671        }
672
673        // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
674        #[inline]
675        #[allow(clippy::mut_from_ref)]
676        pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
677            if value.is_empty() {
678                return &mut [];
679            }
680            self.dropless.alloc_slice(value)
681        }
682
683        #[inline]
684        pub fn alloc_str(&self, string: &str) -> &str {
685            if string.is_empty() {
686                return "";
687            }
688            self.dropless.alloc_str(string)
689        }
690
691        #[allow(clippy::mut_from_ref)]
692        pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
693            &'tcx self,
694            iter: impl ::std::iter::IntoIterator<Item = T>,
695        ) -> &mut [T] {
696            T::allocate_from_iter(self, iter)
697        }
698    }
699}
700
701// Marker types that let us give different behaviour for arenas allocating
702// `Copy` types vs `!Copy` types.
703pub struct IsCopy;
704pub struct IsNotCopy;
705
706#[cfg(test)]
707mod tests;