Skip to main content

test/
lib.rs

1//! Support code for rustc's built in unit-test and micro-benchmarking
2//! framework.
3//!
4//! Almost all user code will only be interested in `Bencher` and
5//! `black_box`. All other interactions (such as writing tests and
6//! benchmarks themselves) should be done via the `#[test]` and
7//! `#[bench]` attributes.
8//!
9//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more
10//! details.
11
12// Currently, not much of this is meant for users. It is intended to
13// support the simplest interface possible for representing and
14// running tests while providing a base that other test frameworks may
15// build off of.
16
17#![unstable(feature = "test", issue = "50297")]
18#![doc(test(attr(deny(warnings))))]
19#![doc(rust_logo)]
20#![feature(rustdoc_internals)]
21#![feature(file_buffered)]
22#![feature(internal_output_capture)]
23#![feature(io_const_error)]
24#![feature(staged_api)]
25#![feature(process_exitcode_internals)]
26#![feature(panic_can_unwind)]
27#![cfg_attr(test, feature(test))]
28#![feature(thread_spawn_hook)]
29#![allow(internal_features)]
30#![warn(rustdoc::unescaped_backticks)]
31#![warn(unreachable_pub)]
32
33pub use cli::TestOpts;
34
35pub use self::ColorConfig::*;
36pub use self::bench::{Bencher, black_box};
37pub use self::console::run_tests_console;
38pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
39pub use self::types::TestName::*;
40pub use self::types::*;
41
42// Module to be used by rustc to compile tests in libtest
43pub mod test {
44    pub use crate::bench::Bencher;
45    pub use crate::cli::{TestOpts, parse_opts};
46    pub use crate::helpers::metrics::{Metric, MetricMap};
47    pub use crate::options::{Options, RunIgnored, RunStrategy, ShouldPanic};
48    pub use crate::test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
49    pub use crate::time::{TestExecTime, TestTimeOptions};
50    pub use crate::types::{
51        DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
52        TestDescAndFn, TestId, TestList, TestListOrder, TestName, TestType,
53    };
54    pub use crate::{assert_test_result, filter_tests, run_test, test_main, test_main_static};
55}
56
57use std::collections::VecDeque;
58use std::io::prelude::Write;
59use std::mem::ManuallyDrop;
60use std::panic::{self, AssertUnwindSafe, PanicHookInfo, catch_unwind};
61use std::process::{self, Command, Termination};
62use std::sync::mpsc::{Sender, channel};
63use std::sync::{Arc, Mutex};
64use std::time::{Duration, Instant};
65use std::{env, io, thread};
66
67pub mod bench;
68mod cli;
69mod console;
70mod event;
71mod formatters;
72mod helpers;
73mod options;
74pub mod stats;
75mod term;
76mod test_result;
77mod time;
78mod types;
79
80#[cfg(test)]
81mod tests;
82
83use core::any::Any;
84
85use event::{CompletedTest, TestEvent};
86use helpers::concurrency::get_concurrency;
87use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
88use options::RunStrategy;
89use test_result::*;
90use time::TestExecTime;
91
92/// Process exit code to be used to indicate test failures.
93pub const ERROR_EXIT_CODE: i32 = 101;
94
95const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
96const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS";
97
98// The default console test runner. It accepts the command line
99// arguments and a vector of test_descs.
100pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
101    test_main_with_exit_callback(args, tests, options, || {})
102}
103
104pub fn test_main_with_exit_callback<F: FnOnce()>(
105    args: &[String],
106    tests: Vec<TestDescAndFn>,
107    options: Option<Options>,
108    exit_callback: F,
109) {
110    let tests = TestList::new(tests, TestListOrder::Unsorted);
111    test_main_inner(args, tests, options, exit_callback)
112}
113
114fn test_main_inner<F: FnOnce()>(
115    args: &[String],
116    tests: TestList,
117    options: Option<Options>,
118    exit_callback: F,
119) {
120    let mut opts = match cli::parse_opts(args) {
121        Some(Ok(o)) => o,
122        Some(Err(msg)) => {
123            eprintln!("error: {msg}");
124            process::exit(ERROR_EXIT_CODE);
125        }
126        None => return,
127    };
128    if let Some(options) = options {
129        opts.options = options;
130    }
131    if opts.list {
132        if let Err(e) = console::list_tests_console(&opts, tests) {
133            eprintln!("error: io error when listing tests: {e:?}");
134            process::exit(ERROR_EXIT_CODE);
135        }
136    } else {
137        if !opts.nocapture {
138            // If we encounter a non-unwinding panic, flush any captured output from the current test,
139            // and stop capturing output to ensure that the non-unwinding panic message is visible.
140            // We also acquire the locks for both output streams to prevent output from other threads
141            // from interleaving with the panic message or appearing after it.
142            let builtin_panic_hook = panic::take_hook();
143            let hook = Box::new({
144                move |info: &'_ PanicHookInfo<'_>| {
145                    if !info.can_unwind() {
146                        std::mem::forget(std::io::stderr().lock());
147                        let mut stdout = ManuallyDrop::new(std::io::stdout().lock());
148                        if let Some(captured) = io::set_output_capture(None) {
149                            if let Ok(data) = captured.lock() {
150                                let _ = stdout.write_all(&data);
151                                let _ = stdout.flush();
152                            }
153                        }
154                    }
155                    builtin_panic_hook(info);
156                }
157            });
158            panic::set_hook(hook);
159            // Use a thread spawning hook to make new threads inherit output capturing.
160            std::thread::add_spawn_hook(|_| {
161                // Get and clone the output capture of the current thread.
162                let output_capture = io::set_output_capture(None);
163                io::set_output_capture(output_capture.clone());
164                // Set the output capture of the new thread.
165                || {
166                    io::set_output_capture(output_capture);
167                }
168            });
169        }
170        let res = console::run_tests_console(&opts, tests);
171        // Prevent Valgrind from reporting reachable blocks in users' unit tests.
172        drop(panic::take_hook());
173        exit_callback();
174        match res {
175            Ok(true) => {}
176            Ok(false) => process::exit(ERROR_EXIT_CODE),
177            Err(e) => {
178                eprintln!("error: io error when listing tests: {e:?}");
179                process::exit(ERROR_EXIT_CODE);
180            }
181        }
182    }
183}
184
185/// A variant optimized for invocation with a static test vector.
186/// This will panic (intentionally) when fed any dynamic tests.
187///
188/// This is the entry point for the main function generated by `rustc --test`
189/// when panic=unwind.
190pub fn test_main_static(tests: &[&TestDescAndFn]) {
191    let args = env::args().collect::<Vec<_>>();
192    let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
193    // Tests are sorted by name at compile time by mk_tests_slice.
194    let tests = TestList::new(owned_tests, TestListOrder::Sorted);
195    test_main_inner(&args, tests, None, || {})
196}
197
198/// A variant optimized for invocation with a static test vector.
199/// This will panic (intentionally) when fed any dynamic tests.
200///
201/// Runs tests in panic=abort mode, which involves spawning subprocesses for
202/// tests.
203///
204/// This is the entry point for the main function generated by `rustc --test`
205/// when panic=abort.
206pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
207    // If we're being run in SpawnedSecondary mode, run the test here. run_test
208    // will then exit the process.
209    if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
210        unsafe {
211            env::remove_var(SECONDARY_TEST_INVOKER_VAR);
212        }
213
214        // Convert benchmarks to tests if we're not benchmarking.
215        let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>();
216        if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() {
217            unsafe {
218                env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR);
219            }
220        } else {
221            tests = convert_benchmarks_to_tests(tests);
222        };
223
224        let test = tests
225            .into_iter()
226            .find(|test| test.desc.name.as_slice() == name)
227            .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
228        let TestDescAndFn { desc, testfn } = test;
229        match testfn.into_runnable() {
230            Runnable::Test(runnable_test) => {
231                if runnable_test.is_dynamic() {
232                    panic!("only static tests are supported");
233                }
234                run_test_in_spawned_subprocess(desc, runnable_test);
235            }
236            Runnable::Bench(_) => {
237                panic!("benchmarks should not be executed into child processes")
238            }
239        }
240    }
241
242    let args = env::args().collect::<Vec<_>>();
243    let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
244    // Tests are sorted by name at compile time by mk_tests_slice.
245    let tests = TestList::new(owned_tests, TestListOrder::Sorted);
246    test_main_inner(&args, tests, Some(Options::new().panic_abort(true)), || {})
247}
248
249/// Clones static values for putting into a dynamic vector, which test_main()
250/// needs to hand out ownership of tests to parallel test runners.
251///
252/// This will panic when fed any dynamic tests, because they cannot be cloned.
253fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
254    match test.testfn {
255        StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
256        StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
257        _ => panic!("non-static tests passed to test::test_main_static"),
258    }
259}
260
261/// Public API used by rustdoc to display the `total` and `compilation` times in the expected
262/// format.
263pub fn print_merged_doctests_times(args: &[String], total_time: f64, compilation_time: f64) {
264    let opts = match cli::parse_opts(args) {
265        Some(Ok(o)) => o,
266        Some(Err(msg)) => {
267            eprintln!("error: {msg}");
268            process::exit(ERROR_EXIT_CODE);
269        }
270        None => return,
271    };
272    let mut formatter = console::get_formatter(&opts, 0);
273    formatter.write_merged_doctests_times(total_time, compilation_time).unwrap();
274}
275
276/// Invoked when unit tests terminate. Returns `Result::Err` if the test is
277/// considered a failure. By default, invokes `report()` and checks for a `0`
278/// result.
279pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
280    let code = result.report().to_i32();
281    if code == 0 {
282        Ok(())
283    } else {
284        Err(format!(
285            "the test returned a termination value with a non-zero status code \
286             ({code}) which indicates a failure"
287        ))
288    }
289}
290
291struct FilteredTests {
292    tests: Vec<(TestId, TestDescAndFn)>,
293    benches: Vec<(TestId, TestDescAndFn)>,
294    next_id: usize,
295}
296
297impl FilteredTests {
298    fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
299        let test = TestDescAndFn { desc, testfn };
300        self.benches.push((TestId(self.next_id), test));
301        self.next_id += 1;
302    }
303    fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
304        let test = TestDescAndFn { desc, testfn };
305        self.tests.push((TestId(self.next_id), test));
306        self.next_id += 1;
307    }
308    fn total_len(&self) -> usize {
309        self.tests.len() + self.benches.len()
310    }
311}
312
313pub fn run_tests<F>(
314    opts: &TestOpts,
315    tests: TestList,
316    mut notify_about_test_event: F,
317) -> io::Result<()>
318where
319    F: FnMut(TestEvent) -> io::Result<()>,
320{
321    use std::collections::HashMap;
322    use std::hash::{BuildHasherDefault, DefaultHasher};
323    use std::sync::mpsc::RecvTimeoutError;
324
325    struct RunningTest {
326        join_handle: Option<thread::JoinHandle<()>>,
327    }
328
329    impl RunningTest {
330        fn join(self, completed_test: &mut CompletedTest) {
331            if let Some(join_handle) = self.join_handle {
332                if let Err(_) = join_handle.join() {
333                    if let TrOk = completed_test.result {
334                        completed_test.result =
335                            TrFailedMsg("panicked after reporting success".to_string());
336                    }
337                }
338            }
339        }
340    }
341
342    // Use a deterministic hasher
343    type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>;
344
345    struct TimeoutEntry {
346        id: TestId,
347        desc: TestDesc,
348        timeout: Instant,
349    }
350
351    let tests_len = tests.tests.len();
352
353    let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 };
354
355    let mut filtered_tests = filter_tests(opts, tests);
356    if !opts.bench_benchmarks {
357        filtered_tests = convert_benchmarks_to_tests(filtered_tests);
358    }
359
360    for test in filtered_tests {
361        let mut desc = test.desc;
362        desc.name = desc.name.with_padding(test.testfn.padding());
363
364        match test.testfn {
365            DynBenchFn(_) | StaticBenchFn(_) => {
366                filtered.add_bench(desc, test.testfn);
367            }
368            testfn => {
369                filtered.add_test(desc, testfn);
370            }
371        };
372    }
373
374    let filtered_out = tests_len - filtered.total_len();
375    let event = TestEvent::TeFilteredOut(filtered_out);
376    notify_about_test_event(event)?;
377
378    let shuffle_seed = get_shuffle_seed(opts);
379
380    let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed);
381    notify_about_test_event(event)?;
382
383    let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
384
385    let mut remaining = filtered.tests;
386    if let Some(shuffle_seed) = shuffle_seed {
387        shuffle_tests(shuffle_seed, &mut remaining);
388    }
389    // Store the tests in a VecDeque so we can efficiently remove the first element to run the
390    // tests in the order they were passed (unless shuffled).
391    let mut remaining = VecDeque::from(remaining);
392    let mut pending = 0;
393
394    let (tx, rx) = channel::<CompletedTest>();
395    let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
396        RunStrategy::SpawnPrimary
397    } else {
398        RunStrategy::InProcess
399    };
400
401    let mut running_tests: TestMap = HashMap::default();
402    let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
403
404    fn get_timed_out_tests(
405        running_tests: &TestMap,
406        timeout_queue: &mut VecDeque<TimeoutEntry>,
407    ) -> Vec<TestDesc> {
408        let now = Instant::now();
409        let mut timed_out = Vec::new();
410        while let Some(timeout_entry) = timeout_queue.front() {
411            if now < timeout_entry.timeout {
412                break;
413            }
414            let timeout_entry = timeout_queue.pop_front().unwrap();
415            if running_tests.contains_key(&timeout_entry.id) {
416                timed_out.push(timeout_entry.desc);
417            }
418        }
419        timed_out
420    }
421
422    fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
423        timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| {
424            let now = Instant::now();
425            if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
426        })
427    }
428
429    if concurrency == 1 {
430        while !remaining.is_empty() {
431            let (id, test) = remaining.pop_front().unwrap();
432            let event = TestEvent::TeWait(test.desc.clone());
433            notify_about_test_event(event)?;
434            let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
435            // Wait for the test to complete.
436            let mut completed_test = rx.recv().unwrap();
437            RunningTest { join_handle }.join(&mut completed_test);
438
439            let fail_fast = match completed_test.result {
440                TrIgnored | TrOk | TrBench(_) => false,
441                TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
442            };
443
444            let event = TestEvent::TeResult(completed_test);
445            notify_about_test_event(event)?;
446
447            if fail_fast {
448                return Ok(());
449            }
450        }
451    } else {
452        while pending > 0 || !remaining.is_empty() {
453            while pending < concurrency && !remaining.is_empty() {
454                let (id, test) = remaining.pop_front().unwrap();
455                let timeout = time::get_default_test_timeout();
456                let desc = test.desc.clone();
457
458                let event = TestEvent::TeWait(desc.clone());
459                notify_about_test_event(event)?; //here no pad
460                let join_handle =
461                    run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
462                running_tests.insert(id, RunningTest { join_handle });
463                timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
464                pending += 1;
465            }
466
467            let mut res;
468            loop {
469                if let Some(timeout) = calc_timeout(&timeout_queue) {
470                    res = rx.recv_timeout(timeout);
471                    for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
472                        let event = TestEvent::TeTimeout(test);
473                        notify_about_test_event(event)?;
474                    }
475
476                    match res {
477                        Err(RecvTimeoutError::Timeout) => {
478                            // Result is not yet ready, continue waiting.
479                        }
480                        _ => {
481                            // We've got a result, stop the loop.
482                            break;
483                        }
484                    }
485                } else {
486                    res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
487                    break;
488                }
489            }
490
491            let mut completed_test = res.unwrap();
492            let running_test = running_tests.remove(&completed_test.id).unwrap();
493            running_test.join(&mut completed_test);
494
495            let fail_fast = match completed_test.result {
496                TrIgnored | TrOk | TrBench(_) => false,
497                TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
498            };
499
500            let event = TestEvent::TeResult(completed_test);
501            notify_about_test_event(event)?;
502            pending -= 1;
503
504            if fail_fast {
505                // Prevent remaining test threads from panicking
506                std::mem::forget(rx);
507                return Ok(());
508            }
509        }
510    }
511
512    if opts.bench_benchmarks {
513        // All benchmarks run at the end, in serial.
514        for (id, b) in filtered.benches {
515            let event = TestEvent::TeWait(b.desc.clone());
516            notify_about_test_event(event)?;
517            let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
518            // Wait for the test to complete.
519            let mut completed_test = rx.recv().unwrap();
520            RunningTest { join_handle }.join(&mut completed_test);
521
522            let event = TestEvent::TeResult(completed_test);
523            notify_about_test_event(event)?;
524        }
525    }
526    Ok(())
527}
528
529pub fn filter_tests(opts: &TestOpts, tests: TestList) -> Vec<TestDescAndFn> {
530    let TestList { tests, order } = tests;
531    let mut filtered = tests;
532
533    // Remove tests that don't match the test filter.
534    if !opts.filters.is_empty() {
535        if opts.filter_exact && order == TestListOrder::Sorted {
536            // Let's say that `f` is the number of filters and `n` is the number
537            // of tests.
538            //
539            // The test array is sorted by name (guaranteed by the caller via
540            // TestListOrder::Sorted), so use binary search for O(f log n)
541            // exact-match lookups instead of an O(n) linear scan.
542            //
543            // This is important for Miri, where the interpreted execution makes
544            // the linear scan very expensive.
545            filtered = filter_exact_match(filtered, &opts.filters);
546        } else {
547            filtered.retain(|test| {
548                let test_name = test.desc.name.as_slice();
549                opts.filters.iter().any(|filter| {
550                    if opts.filter_exact {
551                        test_name == filter.as_str()
552                    } else {
553                        test_name.contains(filter.as_str())
554                    }
555                })
556            });
557        }
558    }
559
560    // Skip tests that match any of the skip filters
561    //
562    // After exact positive filtering above, the filtered set is small, so a
563    // linear scan is acceptable even under Miri.
564    if !opts.skip.is_empty() {
565        filtered.retain(|test| {
566            let name = test.desc.name.as_slice();
567            !opts.skip.iter().any(|sf| {
568                if opts.filter_exact { name == sf.as_str() } else { name.contains(sf.as_str()) }
569            })
570        });
571    }
572
573    // Excludes #[should_panic] tests
574    if opts.exclude_should_panic {
575        filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
576    }
577
578    // maybe unignore tests
579    match opts.run_ignored {
580        RunIgnored::Yes => {
581            filtered.iter_mut().for_each(|test| test.desc.ignore = false);
582        }
583        RunIgnored::Only => {
584            filtered.retain(|test| test.desc.ignore);
585            filtered.iter_mut().for_each(|test| test.desc.ignore = false);
586        }
587        RunIgnored::No => {}
588    }
589
590    filtered
591}
592
593/// Extract tests whose names exactly match one of the given `filters`, using
594/// binary search on the (assumed sorted) test list.
595fn filter_exact_match(mut tests: Vec<TestDescAndFn>, filters: &[String]) -> Vec<TestDescAndFn> {
596    // Binary search for each filter in the sorted test list.
597    let mut indexes: Vec<usize> = filters
598        .iter()
599        .filter_map(|f| tests.binary_search_by(|t| t.desc.name.as_slice().cmp(f.as_str())).ok())
600        .collect();
601    indexes.sort_unstable();
602    indexes.dedup();
603
604    // Extract matching tests. Process indexes in descending order so that
605    // swap_remove (which replaces the removed element with the last) does not
606    // invalidate indexes we haven't visited yet.
607    let mut result = Vec::with_capacity(indexes.len());
608    for &idx in indexes.iter().rev() {
609        result.push(tests.swap_remove(idx));
610    }
611    // Reverse to restore the original sorted order, since we extracted the
612    // matching tests in descending index order.
613    result.reverse();
614    result
615}
616
617pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
618    // convert benchmarks to tests, if we're not benchmarking them
619    tests
620        .into_iter()
621        .map(|x| {
622            let testfn = match x.testfn {
623                DynBenchFn(benchfn) => DynBenchAsTestFn(benchfn),
624                StaticBenchFn(benchfn) => StaticBenchAsTestFn(benchfn),
625                f => f,
626            };
627            TestDescAndFn { desc: x.desc, testfn }
628        })
629        .collect()
630}
631
632pub fn run_test(
633    opts: &TestOpts,
634    force_ignore: bool,
635    id: TestId,
636    test: TestDescAndFn,
637    strategy: RunStrategy,
638    monitor_ch: Sender<CompletedTest>,
639) -> Option<thread::JoinHandle<()>> {
640    let TestDescAndFn { desc, testfn } = test;
641
642    // Emscripten can catch panics but other wasm targets cannot
643    let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
644        && (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm"))
645        && !cfg!(target_os = "emscripten");
646
647    if force_ignore || desc.ignore || ignore_because_no_process_support {
648        let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
649        monitor_ch.send(message).unwrap();
650        return None;
651    }
652
653    match testfn.into_runnable() {
654        Runnable::Test(runnable_test) => {
655            if runnable_test.is_dynamic() {
656                match strategy {
657                    RunStrategy::InProcess => (),
658                    _ => panic!("Cannot run dynamic test fn out-of-process"),
659                };
660            }
661
662            let name = desc.name.clone();
663            let nocapture = opts.nocapture;
664            let time_options = opts.time_options;
665            let bench_benchmarks = opts.bench_benchmarks;
666
667            let runtest = move || match strategy {
668                RunStrategy::InProcess => run_test_in_process(
669                    id,
670                    desc,
671                    nocapture,
672                    time_options.is_some(),
673                    runnable_test,
674                    monitor_ch,
675                    time_options,
676                ),
677                RunStrategy::SpawnPrimary => spawn_test_subprocess(
678                    id,
679                    desc,
680                    nocapture,
681                    time_options.is_some(),
682                    monitor_ch,
683                    time_options,
684                    bench_benchmarks,
685                ),
686            };
687
688            // If the platform is single-threaded we're just going to run
689            // the test synchronously, regardless of the concurrency
690            // level.
691            let supports_threads = !cfg!(target_os = "emscripten")
692                && !cfg!(target_family = "wasm")
693                && !cfg!(target_os = "zkvm");
694            if supports_threads {
695                let cfg = thread::Builder::new().name(name.as_slice().to_owned());
696                let mut runtest = Arc::new(Mutex::new(Some(runtest)));
697                let runtest2 = runtest.clone();
698                match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
699                    Ok(handle) => Some(handle),
700                    Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
701                        // `ErrorKind::WouldBlock` means hitting the thread limit on some
702                        // platforms, so run the test synchronously here instead.
703                        Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
704                        None
705                    }
706                    #[allow(unused_variables)]
707                    Err(e) => panic!("failed to spawn thread to run test: {e}"),
708                }
709            } else {
710                runtest();
711                None
712            }
713        }
714        Runnable::Bench(runnable_bench) => {
715            // Benchmarks aren't expected to panic, so we run them all in-process.
716            runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture);
717            None
718        }
719    }
720}
721
722/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
723#[inline(never)]
724fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T {
725    let result = f();
726
727    // prevent this frame from being tail-call optimised away
728    black_box(result)
729}
730
731fn run_test_in_process(
732    id: TestId,
733    desc: TestDesc,
734    nocapture: bool,
735    report_time: bool,
736    runnable_test: RunnableTest,
737    monitor_ch: Sender<CompletedTest>,
738    time_opts: Option<time::TestTimeOptions>,
739) {
740    // Buffer for capturing standard I/O
741    let data = Arc::new(Mutex::new(Vec::new()));
742
743    if !nocapture {
744        io::set_output_capture(Some(data.clone()));
745    }
746
747    let start = report_time.then(Instant::now);
748    let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run())));
749    let exec_time = start.map(|start| {
750        let duration = start.elapsed();
751        TestExecTime(duration)
752    });
753
754    io::set_output_capture(None);
755
756    // Determine whether the test passed or failed, by comparing its panic
757    // payload (if any) with its `ShouldPanic` value, and by checking for
758    // fatal timeout.
759    let test_result =
760        calc_result(&desc, result.err().as_deref(), time_opts.as_ref(), exec_time.as_ref());
761    let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
762    let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
763    monitor_ch.send(message).unwrap();
764}
765
766fn fold_err<T, E>(
767    result: Result<Result<T, E>, Box<dyn Any + Send>>,
768) -> Result<T, Box<dyn Any + Send>>
769where
770    E: Send + 'static,
771{
772    match result {
773        Ok(Err(e)) => Err(Box::new(e)),
774        Ok(Ok(v)) => Ok(v),
775        Err(e) => Err(e),
776    }
777}
778
779fn spawn_test_subprocess(
780    id: TestId,
781    desc: TestDesc,
782    nocapture: bool,
783    report_time: bool,
784    monitor_ch: Sender<CompletedTest>,
785    time_opts: Option<time::TestTimeOptions>,
786    bench_benchmarks: bool,
787) {
788    let (result, test_output, exec_time) = (|| {
789        let args = env::args().collect::<Vec<_>>();
790        let current_exe = &args[0];
791
792        let mut command = Command::new(current_exe);
793        command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
794        if bench_benchmarks {
795            command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1");
796        }
797        if nocapture {
798            command.stdout(process::Stdio::inherit());
799            command.stderr(process::Stdio::inherit());
800        }
801
802        let start = report_time.then(Instant::now);
803        let output = match command.output() {
804            Ok(out) => out,
805            Err(e) => {
806                let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
807                return (TrFailed, err.into_bytes(), None);
808            }
809        };
810        let exec_time = start.map(|start| {
811            let duration = start.elapsed();
812            TestExecTime(duration)
813        });
814
815        let std::process::Output { stdout, stderr, status } = output;
816        let mut test_output = stdout;
817        formatters::write_stderr_delimiter(&mut test_output, &desc.name);
818        test_output.extend_from_slice(&stderr);
819
820        let result =
821            get_result_from_exit_code(&desc, status, time_opts.as_ref(), exec_time.as_ref());
822        (result, test_output, exec_time)
823    })();
824
825    let message = CompletedTest::new(id, desc, result, exec_time, test_output);
826    monitor_ch.send(message).unwrap();
827}
828
829fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! {
830    let builtin_panic_hook = panic::take_hook();
831    let record_result = Arc::new(move |panic_info: Option<&'_ PanicHookInfo<'_>>| {
832        let test_result = calc_result(&desc, panic_info.map(|info| info.payload()), None, None);
833
834        // We don't support serializing TrFailedMsg, so just
835        // print the message out to stderr.
836        if let TrFailedMsg(msg) = &test_result {
837            eprintln!("{msg}");
838        }
839
840        if let Some(info) = panic_info {
841            builtin_panic_hook(info);
842        }
843
844        if let TrOk = test_result {
845            process::exit(test_result::TR_OK);
846        } else {
847            process::abort();
848        }
849    });
850    let record_result2 = record_result.clone();
851    panic::set_hook(Box::new(move |info| record_result2(Some(info))));
852    #[allow(unused_variables)]
853    if let Err(message) = runnable_test.run() {
854        panic!("{}", message);
855    }
856    record_result(None);
857    unreachable!("panic=abort callback should have exited the process")
858}