1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
6 use Test
::Harness
::Straps
;
7 use Test
::Harness
::Assert
;
16 @ISA @EXPORT @EXPORT_OK
17 $Verbose $Switches $Debug
18 $verbose $switches $debug
27 eval q{use Time::HiRes 'time'};
28 $has_time_hires = !$@;
33 Test::Harness - Run Perl standard test scripts with statistics
43 # Backwards compatibility for exportable variable names.
45 *switches
= *Switches
;
48 $ENV{HARNESS_ACTIVE
} = 1;
49 $ENV{HARNESS_VERSION
} = $VERSION;
53 delete $ENV{HARNESS_ACTIVE
};
54 delete $ENV{HARNESS_VERSION
};
57 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR
};
59 # Stolen from Params::Util
61 (defined $_[0] and ! ref $_[0] and $_[0] =~ m/^[^\W\d]\w*(?:::\w+)*$/s) ? $_[0] : undef;
65 if ( $ENV{HARNESS_STRAPS_CLASS
} ) {
66 die 'Set HARNESS_STRAP_CLASS, singular, not HARNESS_STRAPS_CLASS';
68 my $HARNESS_STRAP_CLASS = $ENV{HARNESS_STRAP_CLASS
} || 'Test::Harness::Straps';
69 if ( $HARNESS_STRAP_CLASS =~ /\.pm$/ ) {
70 # "Class" is actually a filename, that should return the
71 # class name as its true return value.
72 $HARNESS_STRAP_CLASS = require $HARNESS_STRAP_CLASS;
73 if ( !_CLASS
($HARNESS_STRAP_CLASS) ) {
74 die "HARNESS_STRAP_CLASS '$HARNESS_STRAP_CLASS' is not a valid class name";
78 # It is a class name within the current @INC
79 if ( !_CLASS
($HARNESS_STRAP_CLASS) ) {
80 die "HARNESS_STRAP_CLASS '$HARNESS_STRAP_CLASS' is not a valid class name";
82 eval "require $HARNESS_STRAP_CLASS";
85 if ( !$HARNESS_STRAP_CLASS->isa('Test::Harness::Straps') ) {
86 die "HARNESS_STRAP_CLASS '$HARNESS_STRAP_CLASS' must be a Test::Harness::Straps subclass";
89 $Strap = $HARNESS_STRAP_CLASS->new;
91 sub strap
{ return $Strap };
94 @EXPORT = qw(&runtests);
95 @EXPORT_OK = qw(&execute_tests $verbose $switches);
97 $Verbose = $ENV{HARNESS_VERBOSE
} || 0;
98 $Debug = $ENV{HARNESS_DEBUG
} || 0;
100 $Columns = $ENV{HARNESS_COLUMNS
} || $ENV{COLUMNS
} || 80;
101 $Columns--; # Some shells have trouble with a full line of text.
102 $Timer = $ENV{HARNESS_TIMER
} || 0;
108 runtests(@test_files);
112 B<STOP!> If all you want to do is write a test script, consider
113 using Test::Simple. Test::Harness is the module that reads the
114 output from Test::Simple, Test::More and other modules based on
115 Test::Builder. You don't need to know about Test::Harness to use
118 Test::Harness runs tests and expects output from the test in a
119 certain format. That format is called TAP, the Test Anything
120 Protocol. It is defined in L<Test::Harness::TAP>.
122 C<Test::Harness::runtests(@tests)> runs all the testscripts named
123 as arguments and checks standard output for the expected strings
126 The F<prove> utility is a thin wrapper around Test::Harness.
130 Test::Harness will honor the C<-T> or C<-t> in the #! line on your
131 test files. So if you begin a test with:
135 the test will be run with taint mode on.
137 =head2 Configuration variables.
139 These variables can be used to configure the behavior of
140 Test::Harness. They are exported on request.
144 =item C<$Test::Harness::Verbose>
146 The package variable C<$Test::Harness::Verbose> is exportable and can be
147 used to let C<runtests()> display the standard output of the script
148 without altering the behavior otherwise. The F<prove> utility's C<-v>
151 =item C<$Test::Harness::switches>
153 The package variable C<$Test::Harness::switches> is exportable and can be
154 used to set perl command line options used for running the test
155 script(s). The default value is C<-w>. It overrides C<HARNESS_PERL_SWITCHES>.
157 =item C<$Test::Harness::Timer>
159 If set to true, and C<Time::HiRes> is available, print elapsed seconds
160 after each test file.
167 When tests fail, analyze the summary report:
169 t/base..............ok
170 t/nonumbers.........ok
171 t/ok................ok
172 t/test-harness......ok
173 t/waterloo..........dubious
174 Test returned status 3 (wstat 768, 0x300)
175 DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
176 Failed 10/20 tests, 50.00% okay
177 Failed Test Stat Wstat Total Fail List of Failed
178 ---------------------------------------------------------------
179 t/waterloo.t 3 768 20 10 1 3 5 7 9 11 13 15 17 19
180 Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
182 Everything passed but F<t/waterloo.t>. It failed 10 of 20 tests and
183 exited with non-zero status indicating something dubious happened.
185 The columns in the summary report mean:
191 The test file which failed.
195 If the test exited with non-zero, this is its exit status.
199 The wait status of the test.
203 Total number of tests expected to run.
207 Number which failed, either from "not ok" or because they never ran.
209 =item B<List of Failed>
211 A list of the tests which failed. Successive failures may be
212 abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
220 The following functions are available.
222 =head2 runtests( @test_files )
224 This runs all the given I<@test_files> and divines whether they passed
225 or failed based on their output to STDOUT (details above). It prints
226 out each individual test which failed along with a summary report and
227 a how long it all took.
229 It returns true if everything was ok. Otherwise it will C<die()> with
230 one of the messages in the DIAGNOSTICS section.
239 my ($tot, $failedtests,$todo_passed) = execute_tests
(tests
=> \
@tests);
240 print get_results
($tot, $failedtests,$todo_passed);
242 my $ok = _all_ok
($tot);
244 assert
(($ok xor keys %$failedtests),
245 q{ok status jives with $failedtests});
248 die("Failed $tot->{bad}/$tot->{tests} test programs. " .
249 "@{[$tot->{max} - $tot->{ok}]}/$tot->{max} subtests failed.\n");
255 # my $ok = _all_ok(\%tot);
256 # Tells you if this test run is overall successful or not.
261 return $tot->{bad
} == 0 && ($tot->{max
} || $tot->{skipped
}) ? 1 : 0;
264 # Returns all the files in a directory. This is shorthand for backwards
265 # compatibility on systems where C<glob()> doesn't work right.
271 my @f = readdir DIRH
;
277 =head2 execute_tests( tests => \@test_files, out => \*FH )
279 Runs all the given C<@test_files> (just like C<runtests()>) but
280 doesn't generate the final report. During testing, progress
281 information will be written to the currently selected output
282 filehandle (usually C<STDOUT>), or to the filehandle given by the
283 C<out> parameter. The I<out> is optional.
285 Returns a list of two values, C<$total> and C<$failed>, describing the
286 results. C<$total> is a hash ref summary of all the tests run. Its
287 keys and values are this:
289 bonus Number of individual todo tests unexpectedly passed
290 max Number of individual tests ran
291 ok Number of individual tests passed
292 sub_skipped Number of individual tests skipped
293 todo Number of individual todo tests
295 files Number of test files ran
296 good Number of test files passed
297 bad Number of test files failed
298 tests Number of test files originally given
299 skipped Number of test files skipped
301 If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
302 got a successful test.
304 C<$failed> is a hash ref of all the test scripts that failed. Each key
305 is the name of a test script, each value is another hash representing
306 how that script failed. Its keys are these:
308 name Name of the test which failed
309 estat Script's exit value
310 wstat Script's wait status
311 max Number of individual tests
312 failed Number which failed
313 canon List of tests which failed (as string).
315 C<$failed> should be empty if everything passed.
321 my @tests = @{$args{tests
}};
322 my $out = $args{out
} || select();
324 # We allow filehandles that are symbolic refs
327 _autoflush
(\
*STDERR
);
340 tests
=> scalar @tests,
348 @dir_files = _globdir
$Files_In_Dir if defined $Files_In_Dir;
349 my $run_start_time = new Benchmark
;
351 my $width = _leader_width
(@tests);
352 foreach my $tfile (@tests) {
353 $Last_ML_Print = 0; # so each test prints at least once
354 my($leader, $ml) = _mk_leader
($tfile, $width);
361 $Strap->{_seen_header
} = 0;
362 if ( $Test::Harness
::Debug
) {
363 print $out "# Running: ", $Strap->_command_line($tfile), "\n";
365 my $test_start_time = $Timer ? time : 0;
366 my $results = $Strap->analyze_file($tfile) or
367 do { warn $Strap->{error
}, "\n"; next };
370 $elapsed = time - $test_start_time;
371 if ( $has_time_hires ) {
372 $elapsed = sprintf( " %8d ms", $elapsed*1000 );
375 $elapsed = sprintf( " %8s s", $elapsed ? $elapsed : "<1" );
382 # state of the current test.
383 my @failed = grep { !$results->details->[$_-1]{ok
} }
384 1..@{$results->details};
385 my @todo_pass = grep { $results->details->[$_-1]{actual_ok
} &&
386 $results->details->[$_-1]{type
} eq 'todo' }
387 1..@{$results->details};
391 'next' => $Strap->{'next'},
392 max
=> $results->max,
394 todo_pass
=> \
@todo_pass,
395 todo
=> $results->todo,
396 bonus
=> $results->bonus,
397 skipped
=> $results->skip,
398 skip_reason
=> $results->skip_reason,
399 skip_all
=> $Strap->{skip_all
},
403 $tot{bonus
} += $results->bonus;
404 $tot{max
} += $results->max;
405 $tot{ok
} += $results->ok;
406 $tot{todo
} += $results->todo;
407 $tot{sub_skipped
} += $results->skip;
409 my $estatus = $results->exit;
410 my $wstatus = $results->wait;
412 if ( $results->passing ) {
413 # XXX Combine these first two
414 if ($test{max
} and $test{skipped
} + $test{bonus
}) {
416 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
419 my ($txt, $canon) = _canondetail
($test{todo
},0,'TODO passed',
420 @{$test{todo_pass
}});
421 $todo_passed{$tfile} = {
424 failed
=> $test{bonus
},
430 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded\n$txt");
432 print $out "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n";
434 elsif ( $test{max
} ) {
435 print $out "$test{ml}ok$elapsed\n";
437 elsif ( defined $test{skip_all
} and length $test{skip_all
} ) {
438 print $out "skipped\n all skipped: $test{skip_all}\n";
442 print $out "skipped\n all skipped: no reason given\n";
448 # List unrun tests as failures.
449 if ($test{'next'} <= $test{max
}) {
450 push @{$test{failed
}}, $test{'next'}..$test{max
};
452 # List overruns as failures.
454 my $details = $results->details;
455 foreach my $overrun ($test{max
}+1..@$details) {
456 next unless ref $details->[$overrun-1];
457 push @{$test{failed
}}, $overrun
462 $failedtests{$tfile} = _dubious_return
(\
%test, \
%tot,
464 $failedtests{$tfile}{name
} = $tfile;
466 elsif ( $results->seen ) {
467 if (@{$test{failed
}} and $test{max
}) {
468 my ($txt, $canon) = _canondetail
($test{max
},$test{skipped
},'Failed',
470 print $out "$test{ml}$txt";
471 $failedtests{$tfile} = { canon
=> $canon,
473 failed
=> scalar @{$test{failed
}},
480 print $out "Don't know which tests failed: got $test{ok} ok, ".
481 "expected $test{max}\n";
482 $failedtests{$tfile} = { canon
=> '??',
493 print $out "FAILED before any test output arrived\n";
495 $failedtests{$tfile} = { canon
=> '??',
505 if (defined $Files_In_Dir) {
506 my @new_dir_files = _globdir
$Files_In_Dir;
507 if (@new_dir_files != @dir_files) {
509 @f{@new_dir_files} = (1) x
@new_dir_files;
510 delete @f{@dir_files};
511 my @f = sort keys %f;
512 print $out "LEAKED FILES: @f\n";
513 @dir_files = @new_dir_files;
517 $tot{bench
} = timediff
(new Benchmark
, $run_start_time);
519 $Strap->_restore_PERL5LIB;
521 return(\
%tot, \
%failedtests, \
%todo_passed);
524 # Turns on autoflush for the handle passed
526 my $flushy_fh = shift;
527 my $old_fh = select $flushy_fh;
532 =for private _mk_leader
534 my($leader, $ml) = _mk_leader($test_file, $width);
536 Generates the 't/foo........' leader for the given C<$test_file> as well
537 as a similar version which will overwrite the current line (by use of
538 \r and such). C<$ml> may be empty if Test::Harness doesn't think you're
541 The C<$width> is the width of the "yada/blah.." string.
546 my($te, $width) = @_;
551 $te =~ s/^.*\.t\./\[.t./s;
553 my $leader = "$te" . '.' x
($width - length($te));
556 if ( -t STDOUT
and not $ENV{HARNESS_NOTTY
} and not $Verbose ) {
557 $ml = "\r" . (' ' x
77) . "\r$leader"
560 return($leader, $ml);
563 =for private _leader_width
565 my($width) = _leader_width(@test_files);
567 Calculates how wide the leader should be based on the length of the
576 my $suf = /\.(\w+)$/ ? $1 : '';
578 my $suflen = length $suf;
579 $maxlen = $len if $len > $maxlen;
580 $maxsuflen = $suflen if $suflen > $maxsuflen;
582 # + 3 : we want three dots between the test name and the "ok"
583 return $maxlen + 3 - $maxsuflen;
588 my $failedtests = shift;
589 my $todo_passed = shift;
593 my $bonusmsg = _bonusmsg
($tot);
596 $out .= "All tests successful$bonusmsg.\n";
598 my($fmt_top, $fmt) = _create_fmts
("Passed TODO",$todo_passed);
599 # Now write to formats
600 $out .= swrite
( $fmt_top );
601 for my $script (sort keys %{$todo_passed||{}}) {
602 my $Curtest = $todo_passed->{$script};
603 $out .= swrite
( $fmt, @{ $Curtest }{qw(name estat wstat max failed canon)} );
607 elsif (!$tot->{tests
}){
608 die "FAILED--no tests were run for some reason.\n";
610 elsif (!$tot->{max
}) {
611 my $blurb = $tot->{tests
}==1 ? "script" : "scripts";
612 die "FAILED--$tot->{tests} test $blurb could be run, ".
613 "alas--no output ever seen\n";
616 my $subresults = sprintf( " %d/%d subtests failed.",
617 $tot->{max
} - $tot->{ok
}, $tot->{max
} );
619 my($fmt_top, $fmt1, $fmt2) = _create_fmts
("Failed Test",$failedtests);
621 # Now write to formats
622 $out .= swrite
( $fmt_top );
623 for my $script (sort keys %$failedtests) {
624 my $Curtest = $failedtests->{$script};
625 $out .= swrite
( $fmt1, @{ $Curtest }{qw(name estat wstat max failed canon)} );
626 $out .= swrite
( $fmt2, $Curtest->{canon
} );
629 $bonusmsg =~ s/^,\s*//;
630 $out .= "$bonusmsg.\n" if $bonusmsg;
631 $out .= "Failed $tot->{bad}/$tot->{tests} test scripts.$subresults\n";
635 $out .= sprintf("Files=%d, Tests=%d, %s\n",
636 $tot->{files
}, $tot->{max
}, timestr
($tot->{bench
}, 'nop'));
643 formline($format,@_);
651 header
=> \
&header_handler
,
652 test
=> \
&test_handler
,
653 bailout
=> \
&bailout_handler
,
656 $Strap->set_callback(\
&strap_callback
);
658 my($self, $line, $type, $totals) = @_;
659 print $line if $Verbose;
661 my $meth = $Handlers{$type};
662 $meth->($self, $line, $type, $totals) if $meth;
667 my($self, $line, $type, $totals) = @_;
669 warn "Test header seen more than once!\n" if $self->{_seen_header
};
671 $self->{_seen_header
}++;
673 warn "1..M can only appear at the beginning or end of tests\n"
674 if $totals->seen && ($totals->max < $totals->seen);
678 my($self, $line, $type, $totals) = @_;
680 my $curr = $totals->seen;
681 my $next = $self->{'next'};
682 my $max = $totals->max;
683 my $detail = $totals->details->[-1];
685 if( $detail->{ok
} ) {
686 _print_ml_less
("ok $curr/$max");
688 if( $detail->{type
} eq 'skip' ) {
689 $totals->set_skip_reason( $detail->{reason
} )
690 unless defined $totals->skip_reason;
691 $totals->set_skip_reason( 'various reasons' )
692 if $totals->skip_reason ne $detail->{reason
};
696 _print_ml
("NOK $curr/$max");
699 if( $curr > $next ) {
700 print "Test output counter mismatch [test $curr]\n";
702 elsif( $curr < $next ) {
703 print "Confused test output: test $curr answered after ".
704 "test ", $next - 1, "\n";
709 sub bailout_handler
{
710 my($self, $line, $type, $totals) = @_;
712 die "FAILED--Further testing stopped" .
713 ($self->{bailout_reason
} ? ": $self->{bailout_reason}\n" : ".\n");
718 print join '', $ML, @_ if $ML;
722 # Print updates only once per second.
724 my $now = CORE
::time;
725 if ( $Last_ML_Print != $now ) {
727 $Last_ML_Print = $now;
735 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus
} > 1 ? 's' : '').
736 " UNEXPECTEDLY SUCCEEDED)")
739 if ($tot->{skipped
}) {
740 $bonusmsg .= ", $tot->{skipped} test"
741 . ($tot->{skipped
} != 1 ? 's' : '');
742 if ($tot->{sub_skipped
}) {
743 $bonusmsg .= " and $tot->{sub_skipped} subtest"
744 . ($tot->{sub_skipped
} != 1 ? 's' : '');
746 $bonusmsg .= ' skipped';
748 elsif ($tot->{sub_skipped
}) {
749 $bonusmsg .= ", $tot->{sub_skipped} subtest"
750 . ($tot->{sub_skipped
} != 1 ? 's' : '')
756 # Test program go boom.
757 sub _dubious_return
{
758 my($test, $tot, $estatus, $wstatus) = @_;
763 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
764 "(wstat %d, 0x%x)\n",
766 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
771 if ($test->{'next'} == $test->{max
} + 1 and not @{$test->{failed
}}) {
772 print "\tafter all the subtests completed successfully\n";
773 $failed = 0; # But we do not set $canon!
776 push @{$test->{failed
}}, $test->{'next'}..$test->{max
};
777 $failed = @{$test->{failed
}};
778 (my $txt, $canon) = _canondetail
($test->{max
},$test->{skipped
},'Failed',@{$test->{failed
}});
783 return { canon
=> $canon, max
=> $test->{max
} || '??',
785 estat
=> $estatus, wstat
=> $wstatus,
791 my $failed_str = shift;
792 my $failedtests = shift;
794 my ($type) = split /\s/,$failed_str;
795 my $short = substr($type,0,4);
796 my $total = $short eq 'Pass' ? 'TODOs' : 'Total';
797 my $middle_str = " Stat Wstat $total $short ";
798 my $list_str = "List of $type";
800 # Figure out our longest name string for formatting purposes.
801 my $max_namelen = length($failed_str);
802 foreach my $script (keys %$failedtests) {
803 my $namelen = length $failedtests->{$script}->{name
};
804 $max_namelen = $namelen if $namelen > $max_namelen;
807 my $list_len = $Columns - length($middle_str) - $max_namelen;
808 if ($list_len < length($list_str)) {
809 $list_len = length($list_str);
810 $max_namelen = $Columns - length($middle_str) - $list_len;
811 if ($max_namelen < length($failed_str)) {
812 $max_namelen = length($failed_str);
813 $Columns = $max_namelen + length($middle_str) + $list_len;
817 my $fmt_top = sprintf("%-${max_namelen}s", $failed_str)
823 my $fmt1 = "@" . "<" x
($max_namelen - 1)
824 . " @>> @>>>> @>>>> @>>> "
825 . "^" . "<" x
($list_len - 1) . "\n";
826 my $fmt2 = "~~" . " " x
($Columns - $list_len - 2) . "^"
827 . "<" x
($list_len - 1) . "\n";
829 return($fmt_top, $fmt1, $fmt2);
838 @detail = sort {$a <=> $b} grep !$seen{$_}++, @detail;
839 my $detail = @detail;
843 my $last = $min = shift @detail;
845 my $uc_type = uc($type);
847 for (@detail, $detail[-1]) { # don't forget the last one
848 if ($_ > $last+1 || $_ == $last) {
849 push @canon, ($min == $last) ? $last : "$min-$last";
855 push @result, "$uc_type tests
@canon\n";
856 $canon = join ' ', @canon;
859 push @result, "$uc_type test
$last\n";
863 return (join("", @result), $canon)
865 push @result, "\t$type $detail/$max tests
, ";
867 push @result, sprintf("%.2f",100*(1-$detail/$max)), "% okay
";
870 push @result, "?% okay
";
872 my $ender = 's' x ($skipped > 1);
874 my $good = $max - $detail - $skipped;
875 my $skipmsg = " (less
$skipped skipped test
$ender: $good okay
, ";
877 my $goodper = sprintf("%.2f",100*($good/$max));
878 $skipmsg .= "$goodper%)";
883 push @result, $skipmsg;
886 my $txt = join "", @result;
887 return ($txt, $canon);
896 C<&runtests> is exported by Test::Harness by default.
898 C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are
899 exported upon request.
905 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
907 If all tests are successful some statistics about the performance are
910 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
912 For any single script that has failing subtests statistics like the
915 =item C<Test returned status %d (wstat %d)>
917 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
918 and C<$?> are printed in a message similar to the above.
920 =item C<Failed 1 test, %.2f%% okay. %s>
922 =item C<Failed %d/%d tests, %.2f%% okay. %s>
924 If not all tests were successful, the script dies with one of the
927 =item C<FAILED--Further testing stopped: %s>
929 If a single subtest decides that further testing will not make sense,
930 the script dies with this message.
934 =head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS
936 Test::Harness sets these before executing the individual tests.
940 =item C<HARNESS_ACTIVE>
942 This is set to a true value. It allows the tests to determine if they
943 are being executed through the harness or by any other means.
945 =item C<HARNESS_VERSION>
947 This is the version of Test::Harness.
951 =head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS
955 =item C<HARNESS_COLUMNS>
957 This value will be used for the width of the terminal. If it is not
958 set then it will default to C<COLUMNS>. If this is not set, it will
959 default to 80. Note that users of Bourne-sh based shells will need to
960 C<export COLUMNS> for this module to use that variable.
962 =item C<HARNESS_COMPILE_TEST>
964 When true it will make harness attempt to compile the test using
965 C<perlcc> before running it.
967 B<NOTE> This currently only works when sitting in the perl source
970 =item C<HARNESS_DEBUG>
972 If true, Test::Harness will print debugging information about itself as
973 it runs the tests. This is different from C<HARNESS_VERBOSE>, which prints
974 the output from the test being run. Setting C<$Test::Harness::Debug> will
975 override this, or you can use the C<-d> switch in the F<prove> utility.
977 =item C<HARNESS_FILELEAK_IN_DIR>
979 When set to the name of a directory, harness will check after each
980 test whether new files appeared in that directory, and report them as
982 LEAKED FILES: scr.tmp 0 my.db
984 If relative, directory name is with respect to the current directory at
985 the moment runtests() was called. Putting absolute path into
986 C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
988 =item C<HARNESS_NOTTY>
990 When set to a true value, forces it to behave as though STDOUT were
991 not a console. You may need to set this if you don't want harness to
992 output more frequent progress messages using carriage returns. Some
993 consoles may not handle carriage returns properly (which results in a
994 somewhat messy output).
996 =item C<HARNESS_PERL>
998 Usually your tests will be run by C<$^X>, the currently-executing Perl.
999 However, you may want to have it run by a different executable, such as
1000 a threading perl, or a different version.
1002 If you're using the F<prove> utility, you can use the C<--perl> switch.
1004 =item C<HARNESS_PERL_SWITCHES>
1006 Its value will be prepended to the switches used to invoke perl on
1007 each test. For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
1008 run all tests with all warnings enabled.
1010 =item C<HARNESS_TIMER>
1012 Setting this to true will make the harness display the number of
1013 milliseconds each test took. You can also use F<prove>'s C<--timer>
1016 =item C<HARNESS_VERBOSE>
1018 If true, Test::Harness will output the verbose results of running
1019 its tests. Setting C<$Test::Harness::verbose> will override this,
1020 or you can use the C<-v> switch in the F<prove> utility.
1022 If true, Test::Harness will output the verbose results of running
1023 its tests. Setting C<$Test::Harness::verbose> will override this,
1024 or you can use the C<-v> switch in the F<prove> utility.
1026 =item C<HARNESS_STRAP_CLASS>
1028 Defines the Test::Harness::Straps subclass to use. The value may either
1029 be a filename or a class name.
1031 If HARNESS_STRAP_CLASS is a class name, the class must be in C<@INC>
1032 like any other class.
1034 If HARNESS_STRAP_CLASS is a filename, the .pm file must return the name
1035 of the class, instead of the canonical "1".
1041 Here's how Test::Harness tests itself
1043 $ cd ~/src/devel/Test-Harness
1044 $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
1045 $verbose=0; runtests
@ARGV;' t/*.t
1046 Using /home/schwern/src/devel/Test-Harness/blib
1047 t/base..............ok
1048 t/nonumbers.........ok
1049 t/ok................ok
1050 t/test-harness......ok
1051 All tests successful.
1052 Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
1056 The included F<prove> utility for running test scripts from the command line,
1057 L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
1058 the underlying timing routines, and L<Devel::Cover> for test coverage
1063 Provide a way of running tests quietly (ie. no printing) for automated
1064 validation of tests. This will probably take the form of a version
1065 of runtests() which rather than printing its output returns raw data
1066 on the state of the tests. (Partially done in Test::Harness::Straps)
1068 Document the format.
1070 Fix HARNESS_COMPILE_TEST without breaking its core usage.
1072 Figure a way to report test names in the failure summary.
1074 Rework the test summary so long test names are not truncated as badly.
1075 (Partially done with new skip test styles)
1077 Add option for coverage analysis.
1081 Implement Straps total_results()
1085 Completely redo the print summary code.
1087 Straps->analyze_file() not taint clean, don't know
if it can be
1089 Fix that damned VMS nit
.
1091 Add a test
for verbose
.
1093 Change internal list of test results to a hash
.
1095 Fix stats display
when there
's an overrun.
1097 Fix so perls with spaces in the filename work.
1099 Keeping whittling away at _run_all_tests()
1101 Clean up how the summary is printed. Get rid of those damned formats.
1105 Please report any bugs or feature requests to
1106 C<bug-test-harness at rt.cpan.org>, or through the web interface at
1107 L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>.
1108 I will be notified, and then you'll automatically be notified of progress on
1109 your bug as I make changes
.
1113 You can find documentation for this module with the F<perldoc> command.
1115 perldoc Test::Harness
1117 You can get docs for F<prove> with
1121 You can also look for information at:
1125 =item * AnnoCPAN: Annotated CPAN documentation
1127 L<http://annocpan.org/dist/Test-Harness>
1129 =item * CPAN Ratings
1131 L<http://cpanratings.perl.org/d/Test-Harness>
1133 =item * RT: CPAN's request tracker
1135 L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Test-Harness>
1139 L<http://search.cpan.org/dist/Test-Harness>
1145 The source code repository for Test::Harness is at
1146 L<http://svn.perl.org/modules/Test-Harness>.
1150 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
1151 sure is, that it was inspired by Larry Wall's F<TEST> script that came
1152 with perl distributions for ages. Numerous anonymous contributors
1153 exist. Andreas Koenig held the torch for many years, and then
1156 Current maintainer is Andy Lester C<< <andy at petdance.com> >>.
1161 by Michael G Schwern C<< <schwern at pobox.com> >>,
1162 Andy Lester C<< <andy at petdance.com> >>.
1164 This program is free software; you can redistribute it and/or
1165 modify it under the same terms as Perl itself.
1167 See L<http://www.perl.com/perl/misc/Artistic.html>.