Commit b2a5d5aa authored by Alexandre Duret-Lutz's avatar Alexandre Duret-Lutz
Browse files

ltlcheck: add options --json and --csv

* src/bin/ltlcheck.cc: Add option --json and --csv.
* src/bin/man/ltlcheck.x: Adjust examples.
* src/tgbatest/ltlcheck.test: Do not output any statistic.
* src/tgbatest/ltlcheck2.test: Output both JSON and CSV stats.
parent fc374c07
......@@ -51,6 +51,7 @@
#include "misc/formater.hh"
#include "tgbaalgos/stats.hh"
#include "tgbaalgos/isdet.hh"
#include "misc/escape.hh"
// Disable handling of timeout on systems that miss kill() or alarm().
// For instance MinGW.
......@@ -74,6 +75,8 @@ Exit status:\n\
#define OPT_STATES 1
#define OPT_DENSITY 2
#define OPT_JSON 3
#define OPT_CSV 4
static const argp_option options[] =
{
......@@ -104,6 +107,12 @@ static const argp_option options[] =
"probability, between 0.0 and 1.0, to add a transition between "
"two states (0.1 by default)", 0 },
/**************************************************/
{ 0, 0, 0, 0, "Statistics ouput:", 5 },
{ "json", OPT_JSON, "FILENAME", OPTION_ARG_OPTIONAL,
"output statistics as JSON in FILENAME or on standard output", 0 },
{ "csv", OPT_CSV, "FILENAME", OPTION_ARG_OPTIONAL,
"output statistics as CSV in FILENAME or on standard output", 0 },
/**************************************************/
{ 0, 0, 0, 0, "Miscellaneous options:", -1 },
{ 0, 0, 0, 0, 0, 0 }
};
......@@ -118,6 +127,9 @@ spot::bdd_dict dict;
unsigned states = 200;
float density = 0.1;
unsigned timeout = 0;
const char* json_output = 0;
const char* csv_output = 0;
bool want_stats = false;
std::vector<char*> translators;
bool global_error_flag = false;
......@@ -147,17 +159,17 @@ struct statistics
static void
fields(std::ostream& os)
{
os << (" \"states\",\n"
" \"edges\",\n"
" \"transitions\",\n"
" \"acc\",\n"
" \"scc\",\n"
" \"nondetstates\",\n"
" \"nondeterministic\",\n"
" \"time\",\n"
" \"product_states\",\n"
" \"product_transitions\",\n"
" \"product_scc\"");
os << (" \"states\","
" \"edges\","
" \"transitions\","
" \"acc\","
" \"scc\","
" \"nondetstates\","
" \"nondeterministic\","
" \"time\","
" \"product_states\","
" \"product_transitions\","
" \"product_scc\"");
}
void
......@@ -239,9 +251,17 @@ parse_opt(int key, char* arg, struct argp_state*)
<< "on your platform" << std::endl;
#endif
break;
case OPT_CSV:
want_stats = true;
csv_output = arg ? arg : "-";
break;
case OPT_DENSITY:
density = to_probability(arg);
break;
case OPT_JSON:
want_stats = true;
json_output = arg ? arg : "-";
break;
case OPT_STATES:
states = to_pos_int(arg);
break;
......@@ -540,7 +560,7 @@ namespace
case printable_result_filename::Lbtt:
{
std::string error;
std::fstream f(output.val().c_str());
std::ifstream f(output.val().c_str());
if (!f)
{
global_error() << "Cannot open " << output.val()
......@@ -562,7 +582,7 @@ namespace
}
}
// Compute statistics.
if (res)
if (res && want_stats)
{
statistics* st = &(*fstats)[translator_num];
st->ok = true;
......@@ -788,10 +808,13 @@ namespace
pos_map[i] = sm;
// Statistics
(*pstats)[i].product_scc = sm->scc_count();
spot::tgba_statistics s = spot::stats_reachable(p);
(*pstats)[i].product_states = s.states;
(*pstats)[i].product_transitions = s.transitions;
if (want_stats)
{
(*pstats)[i].product_scc = sm->scc_count();
spot::tgba_statistics s = spot::stats_reachable(p);
(*pstats)[i].product_states = s.states;
(*pstats)[i].product_transitions = s.transitions;
}
}
for (size_t i = 0; i < m; ++i)
if (neg[i])
......@@ -803,10 +826,13 @@ namespace
neg_map[i] = sm;
// Statistics
(*nstats)[i].product_scc = sm->scc_count();
spot::tgba_statistics s = spot::stats_reachable(p);
(*nstats)[i].product_states = s.states;
(*nstats)[i].product_transitions = s.transitions;
if (want_stats)
{
(*nstats)[i].product_scc = sm->scc_count();
spot::tgba_statistics s = spot::stats_reachable(p);
(*nstats)[i].product_states = s.states;
(*nstats)[i].product_transitions = s.transitions;
}
}
// cross-comparison test
......@@ -848,37 +874,95 @@ namespace
}
static void
print_stats_json()
print_stats_csv(const char* filename)
{
std::ofstream* outfile = 0;
std::ostream* out;
if (!strncmp(filename, "-", 2))
{
out = &std::cout;
}
else
{
out = outfile = new std::ofstream(filename);
if (!outfile)
error(2, errno, "cannot open '%s'", filename);
}
unsigned ntrans = translators.size();
unsigned rounds = vstats.size();
if (!rounds)
return;
assert(rounds = formulas.size());
*out << "\"formula\", \"tool\", ";
statistics::fields(*out);
*out << "\n";
for (unsigned r = 0; r < rounds; ++r)
for (unsigned t = 0; t < ntrans; ++t)
if (vstats[r][t].ok)
{
*out << "\"";
spot::escape_str(*out, formulas[r]);
*out << "\", \"";
spot::escape_str(*out, translators[t]);
*out << "\", ";
vstats[r][t].to_csv(*out);
*out << "\n";
}
delete outfile;
}
static void
print_stats_json(const char* filename)
{
std::ofstream* outfile = 0;
std::ostream* out;
if (!strncmp(filename, "-", 2))
{
out = &std::cout;
}
else
{
out = outfile = new std::ofstream(filename);
if (!outfile)
error(2, errno, "cannot open '%s'", filename);
}
unsigned ntrans = translators.size();
unsigned rounds = vstats.size();
assert(rounds = formulas.size());
std::cout << "{\n tools: [\n \"" << translators[0];
*out << "{\n \"tools\": [\n \"";
spot::escape_str(*out, translators[0]);
for (unsigned t = 1; t < ntrans; ++t)
std::cout << "\",\n \"" << translators[t];
std::cout << "\"\n ],\n inputs: [\n \"" << formulas[0];
{
*out << "\",\n \"";
spot::escape_str(*out, translators[t]);
}
*out << "\"\n ],\n \"inputs\": [\n \"";
spot::escape_str(*out, formulas[0]);
for (unsigned r = 1; r < rounds; ++r)
std::cout << "\",\n \"" << formulas[r];
std::cout << ("\"\n ],\n fields: [\n \"input\",\n"
" \"tool\",\n");
statistics::fields(std::cout);
std::cout << "\n ],\n results: [";
{
*out << "\",\n \"";
spot::escape_str(*out, formulas[r]);
}
*out << ("\"\n ],\n \"fields\": [\n \"input\", \"tool\",");
statistics::fields(*out);
*out << "\n ],\n \"results\": [";
bool notfirst = false;
for (unsigned r = 0; r < rounds; ++r)
for (unsigned t = 0; t < ntrans; ++t)
if (vstats[r][t].ok)
{
if (notfirst)
std::cout << ",";
*out << ",";
notfirst = true;
std::cout << "\n [ " << r << ", " << t << ", ";
vstats[r][t].to_csv(std::cout);
std::cout << " ]";
*out << "\n [ " << r << ", " << t << ", ";
vstats[r][t].to_csv(*out);
*out << " ]";
}
std::cout << "\n ]\n}\n";
*out << "\n ]\n}\n";
delete outfile;
}
int
......@@ -905,6 +989,25 @@ main(int argc, char** argv)
if (p.run())
return 2;
print_stats_json();
if (formulas.empty())
{
error(2, 0, "no formula to translate");
}
else
{
if (global_error_flag)
std::cerr
<< ("error: some error was detected during the above runs,\n"
" please search for 'error:' messages in the above trace.")
<< std::endl;
else
std::cerr << "no problem detected" << std::endl;
}
if (json_output)
print_stats_json(json_output);
if (csv_output)
print_stats_csv(csv_output);
return global_error_flag;
}
......@@ -5,21 +5,34 @@
The following commands compare never claims produced by ltl2tgba(1)
and spin(1) and 100 random formulas, using a timeout of 2 minutes. A
trace of the execution of the two tools, including any potential issue
detected, is reported on standard error, while statistics gathered
(normally sent to standard output) are redicted to \f(CWresults.json\fR.
detected, is reported on standard error, while statistics are
written to \f(CWresults.json\fR.
\f(CW% randltl -n100 --tree-size=20..30 a b c | \e
.br
ltlcheck -T120 'ltl2tgba -s %f > %N' 'spin -f %s > %N' > results.json\fR
.nf
% randltl \-n100 \-\-tree\-size=20..30 a b c | \e
ltlcheck \-T120 'ltl2tgba \-s %f >%N' 'spin \-f %s >%N' \-\-json=results.json
.fi
.LP
The next command compare lbt, ltl3ba, and ltl2tgba(1) on a set of
formulas saved in file \f(CWinput.ltl\fR. Statistis are again
redirected to \f(CWresults.json\fR. Note the use of \f(CW%L\fR to
indicate that the LBT formula should be written into a file, and
\f(CW%T\fR to read the output in LBTT's format (which is a superset of
the format output by LBT).
formulas saved in file \f(CWinput.ltl\fR. Statistics are again writen
as CSV into \f(CWresults.csv\fR. Note the use of \f(CW%L\fR to
indicate that the formula passed to lbt should be written into a file
in LBT's format, and \f(CW%T\fR to read the output in LBTT's format
(which is a superset of the format output by LBT).
\f(CW% ltlcheck -F input.ltl 'lbt < %L > %T' 'ltl2tgba --lbtt %f > %T'\fR
.nf
% ltlcheck \-F input.ltl \-\-csv=results.csv \e
'lbt <%L >%T' \e
'ltl3ba \-f %s >%N' \e
'ltl2tgba \-\-lbtt %f >%T'
.fi
.LP
If you use ltlcheck in an automated testsuite just to check for
potential problems, avoid the \fB\-\-csv\fR and \fB\-\-json\fR
options: ltlcheck is faster when it does not have to compute these
statistics.
[SEE ALSO]
randltl(1), genltl(1), ltlfilt(1), ltl2tgba(1)
......@@ -28,7 +41,7 @@ randltl(1), genltl(1), ltlfilt(1), ltl2tgba(1)
ltlcheck is a Spot-based reimplementation of a tool called LBTT. LBTT
was developped by Heikki Tauriainen at the Helsinki University of
Technology. The main motivation for the reimplementation was to
support PSL, and output more statistics about the translation.
support PSL, and output more statistics about the translations.
The sanity checks performed on the result of each translator (by
either LBTT or ltlcheck) are described in the following paper. Our
......
......@@ -45,7 +45,7 @@ ltl2tgba=../ltl2tgba
"$ltl2tgba -t -f -x -p -L -D %f > %T" \
"$ltl2tgba -t -taa -r4 %f > %T" \
"$ltl2tgba -t -taa -r4 -c %f > %T" \
"$ltl2tgba -t -taa -r4 -R3 -RDS %f > %T" > output.json
"$ltl2tgba -t -taa -r4 -R3 -RDS %f > %T"
# Disabled because too slow, and too big automata produced.
# "$ltl2tgba -t -lo -r4 %f > %T"
......
......@@ -23,7 +23,6 @@
ltl2tgba=../../bin/ltl2tgba
../../bin/randltl -P -n 100 p1 p2 p3 p4 p5 p6 --tree-size 5..15 |
../../bin/ltlfilt --remove-wm |
../../bin/ltlcheck \
"$ltl2tgba --lbtt --any --small %f > %T" \
"$ltl2tgba --lbtt --any --medium %f > %T" \
......@@ -33,7 +32,7 @@ ltl2tgba=../../bin/ltl2tgba
"$ltl2tgba --lbtt --deterministic --high %f > %T" \
"$ltl2tgba --lbtt --high --small %f > %T" \
"$ltl2tgba --lbtt --high --medium %f > %T" \
"$ltl2tgba --lbtt --high --high %f > %T" > output.json
"$ltl2tgba --lbtt --high --high %f > %T" \
--json=output.json --csv=output.csv
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment