1 //! Write your own tests and benchmarks that look and behave like built-in tests!
2 //!
3 //! This is a simple and small test harness that mimics the original `libtest`
4 //! (used by `cargo test`/`rustc --test`). That means: all output looks pretty
5 //! much like `cargo test` and most CLI arguments are understood and used. With
6 //! that plumbing work out of the way, your test runner can focus on the actual
7 //! testing.
8 //!
9 //! For a small real world example, see [`examples/tidy.rs`][1].
10 //!
11 //! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs
12 //!
13 //! # Usage
14 //!
15 //! To use this, you most likely want to add a manual `[[test]]` section to
16 //! `Cargo.toml` and set `harness = false`. For example:
17 //!
18 //! ```toml
19 //! [[test]]
20 //! name = "mytest"
21 //! path = "tests/mytest.rs"
22 //! harness = false
23 //! ```
24 //!
25 //! And in `tests/mytest.rs` you would call [`run`] in the `main` function:
26 //!
27 //! ```no_run
28 //! use libtest_mimic::{Arguments, Trial};
29 //!
30 //!
31 //! // Parse command line arguments
32 //! let args = Arguments::from_args();
33 //!
34 //! // Create a list of tests and/or benchmarks (in this case: two dummy tests).
35 //! let tests = vec![
36 //!     Trial::test("succeeding_test", move || Ok(())),
37 //!     Trial::test("failing_test", move || Err("Woops".into())),
38 //! ];
39 //!
40 //! // Run all tests and exit the application appropriatly.
41 //! libtest_mimic::run(&args, tests).exit();
42 //! ```
43 //!
44 //! Instead of returning `Ok` or `Err` directly, you want to actually perform
45 //! your tests, of course. See [`Trial::test`] for more information on how to
46 //! define a test. You can of course list all your tests manually. But in many
47 //! cases it is useful to generate one test per file in a directory, for
48 //! example.
49 //!
50 //! You can then run `cargo test --test mytest` to run it. To see the CLI
51 //! arguments supported by this crate, run `cargo test --test mytest -- -h`.
52 //!
53 //!
54 //! # Known limitations and differences to the official test harness
55 //!
56 //! `libtest-mimic` works on a best-effort basis: it tries to be as close to
57 //! `libtest` as possible, but there are differences for a variety of reasons.
58 //! For example, some rarely used features might not be implemented, some
59 //! features are extremely difficult to implement, and removing minor,
60 //! unimportant differences is just not worth the hassle.
61 //!
62 //! Some of the notable differences:
63 //!
64 //! - Output capture and `--nocapture`: simply not supported. The official
65 //!   `libtest` uses internal `std` functions to temporarily redirect output.
66 //!   `libtest-mimic` cannot use those. See [this issue][capture] for more
67 //!   information.
68 //! - `--format=json|junit`
69 //!
70 //! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9
71 
72 use std::{process, sync::mpsc, fmt, time::Instant};
73 
74 mod args;
75 mod printer;
76 
77 use printer::Printer;
78 use threadpool::ThreadPool;
79 
80 pub use crate::args::{Arguments, ColorSetting, FormatSetting};
81 
82 
83 
84 /// A single test or benchmark.
85 ///
86 /// `libtest` often treats benchmarks as "tests", which is a bit confusing. So
87 /// in this library, it is called "trial".
88 ///
89 /// A trial is create via [`Trial::test`] or [`Trial::bench`]. The trial's
90 /// `name` is printed and used for filtering. The `runner` is called when the
91 /// test/benchmark is executed to determine its outcome. If `runner` panics,
92 /// the trial is considered "failed". If you need the behavior of
93 /// `#[should_panic]` you need to catch the panic yourself. You likely want to
94 /// compare the panic payload to an expected value anyway.
95 pub struct Trial {
96     runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
97     info: TestInfo,
98 }
99 
100 impl Trial {
101     /// Creates a (non-benchmark) test with the given name and runner.
102     ///
103     /// The runner returning `Ok(())` is interpreted as the test passing. If the
104     /// runner returns `Err(_)`, the test is considered failed.
test<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce() -> Result<(), Failed> + Send + 'static,105     pub fn test<R>(name: impl Into<String>, runner: R) -> Self
106     where
107         R: FnOnce() -> Result<(), Failed> + Send + 'static,
108     {
109         Self {
110             runner: Box::new(move |_test_mode| match runner() {
111                 Ok(()) => Outcome::Passed,
112                 Err(failed) => Outcome::Failed(failed),
113             }),
114             info: TestInfo {
115                 name: name.into(),
116                 kind: String::new(),
117                 is_ignored: false,
118                 is_bench: false,
119             },
120         }
121     }
122 
123     /// Creates a benchmark with the given name and runner.
124     ///
125     /// If the runner's parameter `test_mode` is `true`, the runner function
126     /// should run all code just once, without measuring, just to make sure it
127     /// does not panic. If the parameter is `false`, it should perform the
128     /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
129     /// but if it's `false`, you have to return a `Measurement`, or else the
130     /// benchmark is considered a failure.
131     ///
132     /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
133     /// `false` when `--bench` is set. If `--test` is set, benchmarks are not
134     /// ran at all, and both flags cannot be set at the same time.
bench<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,135     pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
136     where
137         R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
138     {
139         Self {
140             runner: Box::new(move |test_mode| match runner(test_mode) {
141                 Err(failed) => Outcome::Failed(failed),
142                 Ok(_) if test_mode => Outcome::Passed,
143                 Ok(Some(measurement)) => Outcome::Measured(measurement),
144                 Ok(None)
145                     => Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()),
146             }),
147             info: TestInfo {
148                 name: name.into(),
149                 kind: String::new(),
150                 is_ignored: false,
151                 is_bench: true,
152             },
153         }
154     }
155 
156     /// Sets the "kind" of this test/benchmark. If this string is not
157     /// empty, it is printed in brackets before the test name (e.g.
158     /// `test [my-kind] test_name`). (Default: *empty*)
159     ///
160     /// This is the only extension to the original libtest.
with_kind(self, kind: impl Into<String>) -> Self161     pub fn with_kind(self, kind: impl Into<String>) -> Self {
162         Self {
163             info: TestInfo {
164                 kind: kind.into(),
165                 ..self.info
166             },
167             ..self
168         }
169     }
170 
171     /// Sets whether or not this test is considered "ignored". (Default: `false`)
172     ///
173     /// With the built-in test suite, you can annotate `#[ignore]` on tests to
174     /// not execute them by default (for example because they take a long time
175     /// or require a special environment). If the `--ignored` flag is set,
176     /// ignored tests are executed, too.
with_ignored_flag(self, is_ignored: bool) -> Self177     pub fn with_ignored_flag(self, is_ignored: bool) -> Self {
178         Self {
179             info: TestInfo {
180                 is_ignored,
181                 ..self.info
182             },
183             ..self
184         }
185     }
186 
187     /// Returns the name of this trial.
name(&self) -> &str188     pub fn name(&self) -> &str {
189         &self.info.name
190     }
191 
192     /// Returns the kind of this trial. If you have not set a kind, this is an
193     /// empty string.
kind(&self) -> &str194     pub fn kind(&self) -> &str {
195         &self.info.kind
196     }
197 
198     /// Returns whether this trial has been marked as *ignored*.
has_ignored_flag(&self) -> bool199     pub fn has_ignored_flag(&self) -> bool {
200         self.info.is_ignored
201     }
202 
203     /// Returns `true` iff this trial is a test (as opposed to a benchmark).
is_test(&self) -> bool204     pub fn is_test(&self) -> bool {
205         !self.info.is_bench
206     }
207 
208     /// Returns `true` iff this trial is a benchmark (as opposed to a test).
is_bench(&self) -> bool209     pub fn is_bench(&self) -> bool {
210         self.info.is_bench
211     }
212 }
213 
214 impl fmt::Debug for Trial {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result215     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
216         struct OpaqueRunner;
217         impl fmt::Debug for OpaqueRunner {
218             fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
219                 f.write_str("<runner>")
220             }
221         }
222 
223         f.debug_struct("Test")
224             .field("runner", &OpaqueRunner)
225             .field("name", &self.info.name)
226             .field("kind", &self.info.kind)
227             .field("is_ignored", &self.info.is_ignored)
228             .field("is_bench", &self.info.is_bench)
229             .finish()
230     }
231 }
232 
233 #[derive(Debug)]
234 struct TestInfo {
235     name: String,
236     kind: String,
237     is_ignored: bool,
238     is_bench: bool,
239 }
240 
241 /// Output of a benchmark.
242 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
243 pub struct Measurement {
244     /// Average time in ns.
245     pub avg: u64,
246 
247     /// Variance in ns.
248     pub variance: u64,
249 }
250 
251 /// Indicates that a test/benchmark has failed. Optionally carries a message.
252 ///
253 /// You usually want to use the `From` impl of this type, which allows you to
254 /// convert any `T: fmt::Display` (e.g. `String`, `&str`, ...) into `Failed`.
255 #[derive(Debug, Clone)]
256 pub struct Failed {
257     msg: Option<String>,
258 }
259 
260 impl Failed {
261     /// Creates an instance without message.
without_message() -> Self262     pub fn without_message() -> Self {
263         Self { msg: None }
264     }
265 
266     /// Returns the message of this instance.
message(&self) -> Option<&str>267     pub fn message(&self) -> Option<&str> {
268         self.msg.as_deref()
269     }
270 }
271 
272 impl<M: std::fmt::Display> From<M> for Failed {
from(msg: M) -> Self273     fn from(msg: M) -> Self {
274         Self {
275             msg: Some(msg.to_string())
276         }
277     }
278 }
279 
280 
281 
282 /// The outcome of performing a test/benchmark.
283 #[derive(Debug, Clone)]
284 enum Outcome {
285     /// The test passed.
286     Passed,
287 
288     /// The test or benchmark failed.
289     Failed(Failed),
290 
291     /// The test or benchmark was ignored.
292     Ignored,
293 
294     /// The benchmark was successfully run.
295     Measured(Measurement),
296 }
297 
298 /// Contains information about the entire test run. Is returned by[`run`].
299 ///
300 /// This type is marked as `#[must_use]`. Usually, you just call
301 /// [`exit()`][Conclusion::exit] on the result of `run` to exit the application
302 /// with the correct exit code. But you can also store this value and inspect
303 /// its data.
304 #[derive(Clone, Debug, PartialEq, Eq)]
305 #[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"]
306 pub struct Conclusion {
307     /// Number of tests and benchmarks that were filtered out (either by the
308     /// filter-in pattern or by `--skip` arguments).
309     pub num_filtered_out: u64,
310 
311     /// Number of passed tests.
312     pub num_passed: u64,
313 
314     /// Number of failed tests and benchmarks.
315     pub num_failed: u64,
316 
317     /// Number of ignored tests and benchmarks.
318     pub num_ignored: u64,
319 
320     /// Number of benchmarks that successfully ran.
321     pub num_measured: u64,
322 }
323 
324 impl Conclusion {
325     /// Exits the application with an appropriate error code (0 if all tests
326     /// have passed, 101 if there have been failures).
exit(&self) -> !327     pub fn exit(&self) -> ! {
328         self.exit_if_failed();
329         process::exit(0);
330     }
331 
332     /// Exits the application with error code 101 if there were any failures.
333     /// Otherwise, returns normally.
exit_if_failed(&self)334     pub fn exit_if_failed(&self) {
335         if self.has_failed() {
336             process::exit(101)
337         }
338     }
339 
340     /// Returns whether there have been any failures.
has_failed(&self) -> bool341     pub fn has_failed(&self) -> bool {
342         self.num_failed > 0
343     }
344 
empty() -> Self345     fn empty() -> Self {
346         Self {
347             num_filtered_out: 0,
348             num_passed: 0,
349             num_failed: 0,
350             num_ignored: 0,
351             num_measured: 0,
352         }
353     }
354 }
355 
356 impl Arguments {
357     /// Returns `true` if the given test should be ignored.
is_ignored(&self, test: &Trial) -> bool358     fn is_ignored(&self, test: &Trial) -> bool {
359         (test.info.is_ignored && !self.ignored && !self.include_ignored)
360             || (test.info.is_bench && self.test)
361             || (!test.info.is_bench && self.bench)
362     }
363 
is_filtered_out(&self, test: &Trial) -> bool364     fn is_filtered_out(&self, test: &Trial) -> bool {
365         let test_name = &test.info.name;
366 
367         // If a filter was specified, apply this
368         if let Some(filter) = &self.filter {
369             match self.exact {
370                 true if test_name != filter => return true,
371                 false if !test_name.contains(filter) => return true,
372                 _ => {}
373             };
374         }
375 
376         // If any skip pattern were specified, test for all patterns.
377         for skip_filter in &self.skip {
378             match self.exact {
379                 true if test_name == skip_filter => return true,
380                 false if test_name.contains(skip_filter) => return true,
381                 _ => {}
382             }
383         }
384 
385         if self.ignored && !test.info.is_ignored {
386             return true;
387         }
388 
389         false
390     }
391 }
392 
393 /// Runs all given tests.
394 ///
395 /// This is the central function of this crate. It provides the framework for
396 /// the testing harness. It does all the printing and house keeping.
397 ///
398 /// The returned value contains a couple of useful information. See
399 /// [`Conclusion`] for more information. If `--list` was specified, a list is
400 /// printed and a dummy `Conclusion` is returned.
run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion401 pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
402     let start_instant = Instant::now();
403     let mut conclusion = Conclusion::empty();
404 
405     // Apply filtering
406     if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
407         let len_before = tests.len() as u64;
408         tests.retain(|test| !args.is_filtered_out(test));
409         conclusion.num_filtered_out = len_before - tests.len() as u64;
410     }
411     let tests = tests;
412 
413     // Create printer which is used for all output.
414     let mut printer = printer::Printer::new(args, &tests);
415 
416     // If `--list` is specified, just print the list and return.
417     if args.list {
418         printer.print_list(&tests, args.ignored);
419         return Conclusion::empty();
420     }
421 
422     // Print number of tests
423     printer.print_title(tests.len() as u64);
424 
425     let mut failed_tests = Vec::new();
426     let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| {
427         printer.print_single_outcome(&outcome);
428 
429         // Handle outcome
430         match outcome {
431             Outcome::Passed => conclusion.num_passed += 1,
432             Outcome::Failed(failed) => {
433                 failed_tests.push((test, failed.msg));
434                 conclusion.num_failed += 1;
435             },
436             Outcome::Ignored => conclusion.num_ignored += 1,
437             Outcome::Measured(_) => conclusion.num_measured += 1,
438         }
439     };
440 
441     // Execute all tests.
442     let test_mode = !args.bench;
443     if args.test_threads == Some(1) {
444         // Run test sequentially in main thread
445         for test in tests {
446             // Print `test foo    ...`, run the test, then print the outcome in
447             // the same line.
448             printer.print_test(&test.info);
449             let outcome = if args.is_ignored(&test) {
450                 Outcome::Ignored
451             } else {
452                 run_single(test.runner, test_mode)
453             };
454             handle_outcome(outcome, test.info, &mut printer);
455         }
456     } else {
457         // Run test in thread pool.
458         let pool = ThreadPool::default();
459         let (sender, receiver) = mpsc::channel();
460 
461         let num_tests = tests.len();
462         for test in tests {
463             if args.is_ignored(&test) {
464                 sender.send((Outcome::Ignored, test.info)).unwrap();
465             } else {
466                 let sender = sender.clone();
467                 pool.execute(move || {
468                     // It's fine to ignore the result of sending. If the
469                     // receiver has hung up, everything will wind down soon
470                     // anyway.
471                     let outcome = run_single(test.runner, test_mode);
472                     let _ = sender.send((outcome, test.info));
473                 });
474             }
475         }
476 
477         for (outcome, test_info) in receiver.iter().take(num_tests) {
478             // In multithreaded mode, we do only print the start of the line
479             // after the test ran, as otherwise it would lead to terribly
480             // interleaved output.
481             printer.print_test(&test_info);
482             handle_outcome(outcome, test_info, &mut printer);
483         }
484     }
485 
486     // Print failures if there were any, and the final summary.
487     if !failed_tests.is_empty() {
488         printer.print_failures(&failed_tests);
489     }
490 
491     printer.print_summary(&conclusion, start_instant.elapsed());
492 
493     conclusion
494 }
495 
496 /// Runs the given runner, catching any panics and treating them as a failed test.
run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome497 fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
498     use std::panic::{catch_unwind, AssertUnwindSafe};
499 
500     catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
501         // The `panic` information is just an `Any` object representing the
502         // value the panic was invoked with. For most panics (which use
503         // `panic!` like `println!`), this is either `&str` or `String`.
504         let payload = e.downcast_ref::<String>()
505             .map(|s| s.as_str())
506             .or(e.downcast_ref::<&str>().map(|s| *s));
507 
508         let msg = match payload {
509             Some(payload) => format!("test panicked: {payload}"),
510             None => format!("test panicked"),
511         };
512         Outcome::Failed(msg.into())
513     })
514 }
515