core/iter/adapters/
step_by.rs

1use crate::intrinsics;
2#[cfg(not(feature = "ferrocene_certified"))]
3use crate::iter::{TrustedLen, TrustedRandomAccess, from_fn};
4use crate::num::NonZero;
5use crate::ops::{Range, Try};
6
7// Ferrocene addition: imports for certified subset
8#[cfg(feature = "ferrocene_certified")]
9#[rustfmt::skip]
10use crate::iter::from_fn;
11
12/// An iterator for stepping iterators by a custom amount.
13///
14/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See
15/// its documentation for more.
16///
17/// [`step_by`]: Iterator::step_by
18/// [`Iterator`]: trait.Iterator.html
19#[must_use = "iterators are lazy and do nothing unless consumed"]
20#[stable(feature = "iterator_step_by", since = "1.28.0")]
21#[cfg_attr(not(feature = "ferrocene_certified"), derive(Clone, Debug))]
22pub struct StepBy<I> {
23    /// This field is guaranteed to be preprocessed by the specialized `SpecRangeSetup::setup`
24    /// in the constructor.
25    /// For most iterators that processing is a no-op, but for Range<{integer}> types it is lossy
26    /// which means the inner iterator cannot be returned to user code.
27    /// Additionally this type-dependent preprocessing means specialized implementations
28    /// cannot be used interchangeably.
29    iter: I,
30    /// This field is `step - 1`, aka the correct amount to pass to `nth` when iterating.
31    /// It MUST NOT be `usize::MAX`, as `unsafe` code depends on being able to add one
32    /// without the risk of overflow.  (This is important so that length calculations
33    /// don't need to check for division-by-zero, for example.)
34    step_minus_one: usize,
35    first_take: bool,
36}
37
38impl<I> StepBy<I> {
39    #[inline]
40    pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> {
41        assert!(step != 0);
42        let iter = <I as SpecRangeSetup<I>>::setup(iter, step);
43        StepBy { iter, step_minus_one: step - 1, first_take: true }
44    }
45
46    /// The `step` that was originally passed to `Iterator::step_by(step)`,
47    /// aka `self.step_minus_one + 1`.
48    #[inline]
49    fn original_step(&self) -> NonZero<usize> {
50        // SAFETY: By type invariant, `step_minus_one` cannot be `MAX`, which
51        // means the addition cannot overflow and the result cannot be zero.
52        unsafe { NonZero::new_unchecked(intrinsics::unchecked_add(self.step_minus_one, 1)) }
53    }
54}
55
56#[stable(feature = "iterator_step_by", since = "1.28.0")]
57impl<I> Iterator for StepBy<I>
58where
59    I: Iterator,
60{
61    type Item = I::Item;
62
63    #[inline]
64    fn next(&mut self) -> Option<Self::Item> {
65        self.spec_next()
66    }
67
68    #[inline]
69    fn size_hint(&self) -> (usize, Option<usize>) {
70        self.spec_size_hint()
71    }
72
73    #[inline]
74    fn nth(&mut self, n: usize) -> Option<Self::Item> {
75        self.spec_nth(n)
76    }
77
78    fn try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R
79    where
80        F: FnMut(Acc, Self::Item) -> R,
81        R: Try<Output = Acc>,
82    {
83        self.spec_try_fold(acc, f)
84    }
85
86    #[inline]
87    fn fold<Acc, F>(self, acc: Acc, f: F) -> Acc
88    where
89        F: FnMut(Acc, Self::Item) -> Acc,
90    {
91        self.spec_fold(acc, f)
92    }
93}
94
95#[cfg(not(feature = "ferrocene_certified"))]
96impl<I> StepBy<I>
97where
98    I: ExactSizeIterator,
99{
100    // The zero-based index starting from the end of the iterator of the
101    // last element. Used in the `DoubleEndedIterator` implementation.
102    fn next_back_index(&self) -> usize {
103        let rem = self.iter.len() % self.original_step();
104        if self.first_take { if rem == 0 { self.step_minus_one } else { rem - 1 } } else { rem }
105    }
106}
107
108#[cfg(not(feature = "ferrocene_certified"))]
109#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")]
110impl<I> DoubleEndedIterator for StepBy<I>
111where
112    I: DoubleEndedIterator + ExactSizeIterator,
113{
114    #[inline]
115    fn next_back(&mut self) -> Option<Self::Item> {
116        self.spec_next_back()
117    }
118
119    #[inline]
120    fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
121        self.spec_nth_back(n)
122    }
123
124    fn try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R
125    where
126        F: FnMut(Acc, Self::Item) -> R,
127        R: Try<Output = Acc>,
128    {
129        self.spec_try_rfold(init, f)
130    }
131
132    #[inline]
133    fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
134    where
135        Self: Sized,
136        F: FnMut(Acc, Self::Item) -> Acc,
137    {
138        self.spec_rfold(init, f)
139    }
140}
141
142// StepBy can only make the iterator shorter, so the len will still fit.
143#[cfg(not(feature = "ferrocene_certified"))]
144#[stable(feature = "iterator_step_by", since = "1.28.0")]
145impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {}
146
147// SAFETY: This adapter is shortening. TrustedLen requires the upper bound to be calculated correctly.
148// These requirements can only be satisfied when the upper bound of the inner iterator's upper
149// bound is never `None`. I: TrustedRandomAccess happens to provide this guarantee while
150// I: TrustedLen would not.
151// This also covers the Range specializations since the ranges also implement TRA
152#[cfg(not(feature = "ferrocene_certified"))]
153#[unstable(feature = "trusted_len", issue = "37572")]
154unsafe impl<I> TrustedLen for StepBy<I> where I: Iterator + TrustedRandomAccess {}
155
156trait SpecRangeSetup<T> {
157    fn setup(inner: T, step: usize) -> T;
158}
159
160impl<T> SpecRangeSetup<T> for T {
161    #[inline]
162    default fn setup(inner: T, _step: usize) -> T {
163        inner
164    }
165}
166
167/// Specialization trait to optimize `StepBy<Range<{integer}>>` iteration.
168///
169/// # Safety
170///
171/// Technically this is safe to implement (look ma, no unsafe!), but in reality
172/// a lot of unsafe code relies on ranges over integers being correct.
173///
174/// For correctness *all* public StepBy methods must be specialized
175/// because `setup` drastically alters the meaning of the struct fields so that mixing
176/// different implementations would lead to incorrect results.
177unsafe trait StepByImpl<I> {
178    type Item;
179
180    fn spec_next(&mut self) -> Option<Self::Item>;
181
182    fn spec_size_hint(&self) -> (usize, Option<usize>);
183
184    fn spec_nth(&mut self, n: usize) -> Option<Self::Item>;
185
186    fn spec_try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R
187    where
188        F: FnMut(Acc, Self::Item) -> R,
189        R: Try<Output = Acc>;
190
191    fn spec_fold<Acc, F>(self, acc: Acc, f: F) -> Acc
192    where
193        F: FnMut(Acc, Self::Item) -> Acc;
194}
195
196/// Specialization trait for double-ended iteration.
197///
198/// See also: `StepByImpl`
199///
200/// # Safety
201///
202/// The specializations must be implemented together with `StepByImpl`
203/// where applicable. I.e. if `StepBy` does support backwards iteration
204/// for a given iterator and that is specialized for forward iteration then
205/// it must also be specialized for backwards iteration.
206#[cfg(not(feature = "ferrocene_certified"))]
207unsafe trait StepByBackImpl<I> {
208    type Item;
209
210    fn spec_next_back(&mut self) -> Option<Self::Item>
211    where
212        I: DoubleEndedIterator + ExactSizeIterator;
213
214    fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item>
215    where
216        I: DoubleEndedIterator + ExactSizeIterator;
217
218    fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R
219    where
220        I: DoubleEndedIterator + ExactSizeIterator,
221        F: FnMut(Acc, Self::Item) -> R,
222        R: Try<Output = Acc>;
223
224    fn spec_rfold<Acc, F>(self, init: Acc, f: F) -> Acc
225    where
226        I: DoubleEndedIterator + ExactSizeIterator,
227        F: FnMut(Acc, Self::Item) -> Acc;
228}
229
230unsafe impl<I: Iterator> StepByImpl<I> for StepBy<I> {
231    type Item = I::Item;
232
233    #[inline]
234    default fn spec_next(&mut self) -> Option<I::Item> {
235        let step_size = if self.first_take { 0 } else { self.step_minus_one };
236        self.first_take = false;
237        self.iter.nth(step_size)
238    }
239
240    #[inline]
241    default fn spec_size_hint(&self) -> (usize, Option<usize>) {
242        #[inline]
243        fn first_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
244            move |n| if n == 0 { 0 } else { 1 + (n - 1) / step }
245        }
246
247        #[inline]
248        fn other_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
249            move |n| n / step
250        }
251
252        let (low, high) = self.iter.size_hint();
253
254        if self.first_take {
255            let f = first_size(self.original_step());
256            (f(low), high.map(f))
257        } else {
258            let f = other_size(self.original_step());
259            (f(low), high.map(f))
260        }
261    }
262
263    #[inline]
264    default fn spec_nth(&mut self, mut n: usize) -> Option<I::Item> {
265        if self.first_take {
266            self.first_take = false;
267            let first = self.iter.next();
268            if n == 0 {
269                return first;
270            }
271            n -= 1;
272        }
273        // n and self.step_minus_one are indices, we need to add 1 to get the amount of elements
274        // When calling `.nth`, we need to subtract 1 again to convert back to an index
275        let mut step = self.original_step().get();
276        // n + 1 could overflow
277        // thus, if n is usize::MAX, instead of adding one, we call .nth(step)
278        if n == usize::MAX {
279            self.iter.nth(step - 1);
280        } else {
281            n += 1;
282        }
283
284        // overflow handling
285        loop {
286            let mul = n.checked_mul(step);
287            {
288                if intrinsics::likely(mul.is_some()) {
289                    return self.iter.nth(mul.unwrap() - 1);
290                }
291            }
292            let div_n = usize::MAX / n;
293            let div_step = usize::MAX / step;
294            let nth_n = div_n * n;
295            let nth_step = div_step * step;
296            let nth = if nth_n > nth_step {
297                step -= div_n;
298                nth_n
299            } else {
300                n -= div_step;
301                nth_step
302            };
303            self.iter.nth(nth - 1);
304        }
305    }
306
307    default fn spec_try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
308    where
309        F: FnMut(Acc, Self::Item) -> R,
310        R: Try<Output = Acc>,
311    {
312        #[inline]
313        fn nth<I: Iterator>(
314            iter: &mut I,
315            step_minus_one: usize,
316        ) -> impl FnMut() -> Option<I::Item> + '_ {
317            move || iter.nth(step_minus_one)
318        }
319
320        if self.first_take {
321            self.first_take = false;
322            match self.iter.next() {
323                None => return try { acc },
324                Some(x) => acc = f(acc, x)?,
325            }
326        }
327        from_fn(nth(&mut self.iter, self.step_minus_one)).try_fold(acc, f)
328    }
329
330    default fn spec_fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
331    where
332        F: FnMut(Acc, Self::Item) -> Acc,
333    {
334        #[inline]
335        fn nth<I: Iterator>(
336            iter: &mut I,
337            step_minus_one: usize,
338        ) -> impl FnMut() -> Option<I::Item> + '_ {
339            move || iter.nth(step_minus_one)
340        }
341
342        if self.first_take {
343            self.first_take = false;
344            match self.iter.next() {
345                None => return acc,
346                Some(x) => acc = f(acc, x),
347            }
348        }
349        from_fn(nth(&mut self.iter, self.step_minus_one)).fold(acc, f)
350    }
351}
352
353#[cfg(not(feature = "ferrocene_certified"))]
354unsafe impl<I: DoubleEndedIterator + ExactSizeIterator> StepByBackImpl<I> for StepBy<I> {
355    type Item = I::Item;
356
357    #[inline]
358    default fn spec_next_back(&mut self) -> Option<Self::Item> {
359        self.iter.nth_back(self.next_back_index())
360    }
361
362    #[inline]
363    default fn spec_nth_back(&mut self, n: usize) -> Option<I::Item> {
364        // `self.iter.nth_back(usize::MAX)` does the right thing here when `n`
365        // is out of bounds because the length of `self.iter` does not exceed
366        // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is
367        // zero-indexed
368        let n = n.saturating_mul(self.original_step().get()).saturating_add(self.next_back_index());
369        self.iter.nth_back(n)
370    }
371
372    default fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
373    where
374        F: FnMut(Acc, Self::Item) -> R,
375        R: Try<Output = Acc>,
376    {
377        #[inline]
378        fn nth_back<I: DoubleEndedIterator>(
379            iter: &mut I,
380            step_minus_one: usize,
381        ) -> impl FnMut() -> Option<I::Item> + '_ {
382            move || iter.nth_back(step_minus_one)
383        }
384
385        match self.next_back() {
386            None => try { init },
387            Some(x) => {
388                let acc = f(init, x)?;
389                from_fn(nth_back(&mut self.iter, self.step_minus_one)).try_fold(acc, f)
390            }
391        }
392    }
393
394    #[inline]
395    default fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
396    where
397        Self: Sized,
398        F: FnMut(Acc, I::Item) -> Acc,
399    {
400        #[inline]
401        fn nth_back<I: DoubleEndedIterator>(
402            iter: &mut I,
403            step_minus_one: usize,
404        ) -> impl FnMut() -> Option<I::Item> + '_ {
405            move || iter.nth_back(step_minus_one)
406        }
407
408        match self.next_back() {
409            None => init,
410            Some(x) => {
411                let acc = f(init, x);
412                from_fn(nth_back(&mut self.iter, self.step_minus_one)).fold(acc, f)
413            }
414        }
415    }
416}
417
418/// For these implementations, `SpecRangeSetup` calculates the number
419/// of iterations that will be needed and stores that in `iter.end`.
420///
421/// The various iterator implementations then rely on that to not need
422/// overflow checking, letting loops just be counted instead.
423///
424/// These only work for unsigned types, and will need to be reworked
425/// if you want to use it to specialize on signed types.
426///
427/// Currently these are only implemented for integers up to `usize` due to
428/// correctness issues around `ExactSizeIterator` impls on 16bit platforms.
429/// And since `ExactSizeIterator` is a prerequisite for backwards iteration
430/// and we must consistently specialize backwards and forwards iteration
431/// that makes the situation complicated enough that it's not covered
432/// for now.
433macro_rules! spec_int_ranges {
434    ($($t:ty)*) => ($(
435
436        const _: () = assert!(usize::BITS >= <$t>::BITS);
437
438        impl SpecRangeSetup<Range<$t>> for Range<$t> {
439            #[inline]
440            fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
441                let inner_len = r.size_hint().0;
442                // If step exceeds $t::MAX, then the count will be at most 1 and
443                // thus always fit into $t.
444                let yield_count = inner_len.div_ceil(step);
445                // Turn the range end into an iteration counter
446                r.end = yield_count as $t;
447                r
448            }
449        }
450
451        unsafe impl StepByImpl<Range<$t>> for StepBy<Range<$t>> {
452            #[inline]
453            fn spec_next(&mut self) -> Option<$t> {
454                // if a step size larger than the type has been specified fall back to
455                // t::MAX, in which case remaining will be at most 1.
456                let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
457                let remaining = self.iter.end;
458                if remaining > 0 {
459                    let val = self.iter.start;
460                    // this can only overflow during the last step, after which the value
461                    // will not be used
462                    self.iter.start = val.wrapping_add(step);
463                    self.iter.end = remaining - 1;
464                    Some(val)
465                } else {
466                    None
467                }
468            }
469
470            #[inline]
471            fn spec_size_hint(&self) -> (usize, Option<usize>) {
472                let remaining = self.iter.end as usize;
473                (remaining, Some(remaining))
474            }
475
476            // The methods below are all copied from the Iterator trait default impls.
477            // We have to repeat them here so that the specialization overrides the StepByImpl defaults
478
479            #[inline]
480            fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
481                self.advance_by(n).ok()?;
482                self.next()
483            }
484
485            #[inline]
486            fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
487                where
488                    F: FnMut(Acc, Self::Item) -> R,
489                    R: Try<Output = Acc>
490            {
491                let mut accum = init;
492                while let Some(x) = self.next() {
493                    accum = f(accum, x)?;
494                }
495                try { accum }
496            }
497
498            #[inline]
499            fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
500                where
501                    F: FnMut(Acc, Self::Item) -> Acc
502            {
503                // if a step size larger than the type has been specified fall back to
504                // t::MAX, in which case remaining will be at most 1.
505                let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
506                let remaining = self.iter.end;
507                let mut acc = init;
508                let mut val = self.iter.start;
509                for _ in 0..remaining {
510                    acc = f(acc, val);
511                    // this can only overflow during the last step, after which the value
512                    // will no longer be used
513                    val = val.wrapping_add(step);
514                }
515                acc
516            }
517        }
518    )*)
519}
520
521#[cfg(not(feature = "ferrocene_certified"))]
522macro_rules! spec_int_ranges_r {
523    ($($t:ty)*) => ($(
524        const _: () = assert!(usize::BITS >= <$t>::BITS);
525
526        unsafe impl StepByBackImpl<Range<$t>> for StepBy<Range<$t>> {
527
528            #[inline]
529            fn spec_next_back(&mut self) -> Option<Self::Item> {
530                let step = self.original_step().get() as $t;
531                let remaining = self.iter.end;
532                if remaining > 0 {
533                    let start = self.iter.start;
534                    self.iter.end = remaining - 1;
535                    Some(start + step * (remaining - 1))
536                } else {
537                    None
538                }
539            }
540
541            // The methods below are all copied from the Iterator trait default impls.
542            // We have to repeat them here so that the specialization overrides the StepByImplBack defaults
543
544            #[inline]
545            fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item> {
546                if self.advance_back_by(n).is_err() {
547                    return None;
548                }
549                self.next_back()
550            }
551
552            #[inline]
553            fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
554            where
555                F: FnMut(Acc, Self::Item) -> R,
556                R: Try<Output = Acc>
557            {
558                let mut accum = init;
559                while let Some(x) = self.next_back() {
560                    accum = f(accum, x)?;
561                }
562                try { accum }
563            }
564
565            #[inline]
566            fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
567            where
568                F: FnMut(Acc, Self::Item) -> Acc
569            {
570                let mut accum = init;
571                while let Some(x) = self.next_back() {
572                    accum = f(accum, x);
573                }
574                accum
575            }
576        }
577    )*)
578}
579
580#[cfg(target_pointer_width = "64")]
581spec_int_ranges!(u8 u16 u32 u64 usize);
582// DoubleEndedIterator requires ExactSizeIterator, which isn't implemented for Range<u64>
583#[cfg(not(feature = "ferrocene_certified"))]
584#[cfg(target_pointer_width = "64")]
585spec_int_ranges_r!(u8 u16 u32 usize);
586
587#[cfg(target_pointer_width = "32")]
588spec_int_ranges!(u8 u16 u32 usize);
589#[cfg(not(feature = "ferrocene_certified"))]
590#[cfg(target_pointer_width = "32")]
591spec_int_ranges_r!(u8 u16 u32 usize);
592
593#[cfg(target_pointer_width = "16")]
594spec_int_ranges!(u8 u16 usize);
595#[cfg(not(feature = "ferrocene_certified"))]
596#[cfg(target_pointer_width = "16")]
597spec_int_ranges_r!(u8 u16 usize);