about summary refs log tree commit diff homepage
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt10
-rw-r--r--cmake/modules/FindSQLite3.cmake37
-rw-r--r--lib/Core/CMakeLists.txt2
-rw-r--r--lib/Core/StatsTracker.cpp256
-rw-r--r--lib/Core/StatsTracker.h8
-rwxr-xr-xtools/klee-stats/klee-stats185
6 files changed, 262 insertions, 236 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 30a7292c..ebcbe6f6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -416,6 +416,16 @@ else()
 endif()
 
 ################################################################################
+# Detect SQLite3
+################################################################################
+find_package(SQLite3)
+if (SQLITE3_FOUND)
+  include_directories(${SQLITE3_INCLUDE_DIRS})
+else()
+    message( FATAL_ERROR "SQLite3 not found, please install" )
+endif()
+
+################################################################################
 # Detect libcap
 ################################################################################
 check_include_file("sys/capability.h" HAVE_SYS_CAPABILITY_H)
diff --git a/cmake/modules/FindSQLite3.cmake b/cmake/modules/FindSQLite3.cmake
new file mode 100644
index 00000000..9c99ae5c
--- /dev/null
+++ b/cmake/modules/FindSQLite3.cmake
@@ -0,0 +1,37 @@
+# Copyright (C) 2007-2009 LuaDist.
+# Created by Peter Kapec <kapecp@gmail.com>
+# Redistribution and use of this file is allowed according to the terms of the MIT license.
+# For details see the COPYRIGHT file distributed with LuaDist.
+#	Note:
+#		Searching headers and libraries is very simple and is NOT as powerful as scripts
+#		distributed with CMake, because LuaDist defines directories to search for.
+#		Everyone is encouraged to contact the author with improvements. Maybe this file
+#		becomes part of CMake distribution sometimes.
+
+# - Find sqlite3
+# Find the native SQLITE3 headers and libraries.
+#
+# SQLITE3_INCLUDE_DIRS	- where to find sqlite3.h, etc.
+# SQLITE3_LIBRARIES	- List of libraries when using sqlite.
+# SQLITE3_FOUND	- True if sqlite found.
+
+# Look for the header file.
+FIND_PATH(SQLITE3_INCLUDE_DIR NAMES sqlite3.h)
+
+# Look for the library.
+FIND_LIBRARY(SQLITE3_LIBRARY NAMES sqlite3)
+
+# Handle the QUIETLY and REQUIRED arguments and set SQLITE3_FOUND to TRUE if all listed variables are TRUE.
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(SQLITE3 DEFAULT_MSG SQLITE3_LIBRARY SQLITE3_INCLUDE_DIR)
+
+# Copy the results to the output variables.
+IF(SQLITE3_FOUND)
+	SET(SQLITE3_LIBRARIES ${SQLITE3_LIBRARY})
+	SET(SQLITE3_INCLUDE_DIRS ${SQLITE3_INCLUDE_DIR})
+ELSE(SQLITE3_FOUND)
+	SET(SQLITE3_LIBRARIES)
+	SET(SQLITE3_INCLUDE_DIRS)
+ENDIF(SQLITE3_FOUND)
+
+MARK_AS_ADVANCED(SQLITE3_INCLUDE_DIRS SQLITE3_LIBRARIES)
diff --git a/lib/Core/CMakeLists.txt b/lib/Core/CMakeLists.txt
index 4d0ec635..9e4a592d 100644
--- a/lib/Core/CMakeLists.txt
+++ b/lib/Core/CMakeLists.txt
@@ -40,7 +40,7 @@ set(LLVM_COMPONENTS
 )
 
 klee_get_llvm_libs(LLVM_LIBS ${LLVM_COMPONENTS})
-target_link_libraries(kleeCore PUBLIC ${LLVM_LIBS} sqlite3)
+target_link_libraries(kleeCore PUBLIC ${LLVM_LIBS}  ${SQLITE3_LIBRARIES})
 target_link_libraries(kleeCore PRIVATE
   kleeBasic
   kleeModule
diff --git a/lib/Core/StatsTracker.cpp b/lib/Core/StatsTracker.cpp
index 4b557df2..e853bfae 100644
--- a/lib/Core/StatsTracker.cpp
+++ b/lib/Core/StatsTracker.cpp
@@ -80,12 +80,12 @@ cl::opt<unsigned> StatsWriteAfterInstructions(
         "Write statistics after each n instructions, 0 to disable (default=0)"),
     cl::cat(StatsCat));
 
-cl::opt<unsigned> CommitEvery(
-    "stats-commit-after", cl::init(0),
-     cl::desc("Commit the statistics every N writes. Setting to 0 commits every "
-         "write in interval mode (default) or every 1000 writes in write after "
-         "N instructions (default=0)"),
-     cl::cat(StatsCat));
+  cl::opt<unsigned> CommitEvery(
+      "stats-commit-after", cl::init(0),
+      cl::desc("Commit the statistics every N writes. By default commit every "
+               "write with -stats-write-interval or every 1000 writes with "
+               "-stats-write-after-instructions. (default=0)"),
+      cl::cat(StatsCat));
 
 cl::opt<std::string> IStatsWriteInterval(
     "istats-write-interval", cl::init("10s"),
@@ -205,13 +205,11 @@ StatsTracker::StatsTracker(Executor &_executor, std::string _objectFilename,
 
   KModule *km = executor.kmodule.get();
   if(CommitEvery > 0) {
-      commitEvery = CommitEvery.getValue();
+      statsCommitEvery = CommitEvery;
   } else {
-      commitEvery = StatsWriteInterval > 0 ? 1 : 1000;
+      statsCommitEvery = StatsWriteInterval > 0 ? 1 : 1000;
   }
 
-
-
   if (!sys::path::is_absolute(objectFilename)) {
     SmallString<128> current(objectFilename);
     if(sys::fs::make_absolute(current)) {
@@ -248,11 +246,12 @@ StatsTracker::StatsTracker(Executor &_executor, std::string _objectFilename,
   }
 
   if (OutputStats) {
-    if(sqlite3_open(executor.interpreterHandler->getOutputFilename("run.stats").c_str(), &statsFile)){
-       std::stringstream error_s;
-       error_s << "Can't open database: " << sqlite3_errmsg(statsFile);
-       sqlite3_close(statsFile);
-       klee_error("%s", error_s.str().c_str());
+    auto db_filename = executor.interpreterHandler->getOutputFilename("run.stats");
+    if(sqlite3_open(db_filename.c_str(), &statsFile) != SQLITE_OK){
+      std::ostringstream errorstream;
+      errorstream << "Can't open database: " << sqlite3_errmsg(statsFile);
+      sqlite3_close(statsFile);
+      klee_error("%s",errorstream.str().c_str());
     } else {
       writeStatsHeader();
       writeStatsLine();
@@ -280,8 +279,7 @@ StatsTracker::StatsTracker(Executor &_executor, std::string _objectFilename,
 }
 
 StatsTracker::~StatsTracker() {
-  char *zErrMsg;
-  sqlite3_exec(statsFile, "END TRANSACTION", NULL, NULL, &zErrMsg);
+  sqlite3_exec(statsFile, "END TRANSACTION", nullptr, nullptr, nullptr);
   sqlite3_finalize(insertStmt);
   sqlite3_close(statsFile);
 }
@@ -387,6 +385,12 @@ void StatsTracker::framePopped(ExecutionState &es) {
   // XXX remove me?
 }
 
+std::string sqlite3ErrToStringAndFree(const std::string& prefix , char* sqlite3ErrMsg) {
+  std::ostringstream sstream;
+  sstream << prefix << sqlite3ErrMsg;
+  sqlite3_free(sqlite3ErrMsg);
+  return sstream.str();
+}
 
 void StatsTracker::markBranchVisited(ExecutionState *visitedTrue, 
                                      ExecutionState *visitedFalse) {
@@ -413,92 +417,92 @@ void StatsTracker::markBranchVisited(ExecutionState *visitedTrue,
 }
 
 void StatsTracker::writeStatsHeader() {
-  std::stringstream create,  insert;
+  std::ostringstream create, insert;
   create << "CREATE TABLE stats ";
-  create     << "(Instructions int,"
-             << "FullBranches int,"
-             << "PartialBranches int,"
-             << "NumBranches int,"
-             << "UserTime int,"
-             << "NumStates int,"
-             << "MallocUsage int,"
-             << "NumQueries int,"
-             << "NumQueryConstructs int,"
-             << "NumObjects int,"
-             << "WallTime int,"
-             << "CoveredInstructions int,"
-             << "UncoveredInstructions int,"
-             << "QueryTime int,"
-             << "SolverTime int,"
-             << "CexCacheTime int,"
-             << "ForkTime int,"
-             << "ResolveTime int,"
-             << "QueryCexCacheMisses int,"
+  create     << "(Instructions INTEGER,"
+             << "FullBranches INTEGER,"
+             << "PartialBranches INTEGER,"
+             << "NumBranches INTEGER,"
+             << "UserTime REAL,"
+             << "NumStates INTEGER,"
+             << "MallocUsage INTEGER,"
+             << "NumQueries INTEGER,"
+             << "NumQueryConstructs INTEGER,"
+             << "NumObjects INTEGER,"
+             << "WallTime REAL,"
+             << "CoveredInstructions INTEGER,"
+             << "UncoveredInstructions INTEGER,"
+             << "QueryTime INTEGER,"
+             << "SolverTime INTEGER,"
+             << "CexCacheTime INTEGER,"
+             << "ForkTime INTEGER,"
+             << "ResolveTime INTEGER,"
+             << "QueryCexCacheMisses INTEGER,"
 #ifdef KLEE_ARRAY_DEBUG
-	     << "ArrayHashTime int,"
+	           << "ArrayHashTime INTEGER,"
 #endif
-             << "QueryCexCacheHits int"
+             << "QueryCexCacheHits INTEGER"
              << ")";
-       char *zErrMsg = 0;
-       if(sqlite3_exec(statsFile, create.str().c_str(), NULL, NULL, &zErrMsg)) {
-         klee_error("ERROR creating table: %s", zErrMsg);
-         return;
-       }
-       insert << "INSERT INTO stats ( "
-              << "Instructions ,"
-              << "FullBranches ,"
-              << "PartialBranches ,"
-              << "NumBranches ,"
-              << "UserTime ,"
-              << "NumStates ,"
-              << "MallocUsage ,"
-              << "NumQueries ,"
-              << "NumQueryConstructs ,"
-              << "NumObjects ,"
-              << "WallTime ,"
-              << "CoveredInstructions ,"
-              << "UncoveredInstructions ,"
-              << "QueryTime ,"
-              << "SolverTime ,"
-              << "CexCacheTime ,"
-              << "ForkTime ,"
-              << "ResolveTime ,"
-              << "QueryCexCacheMisses ,"
+ char *zErrMsg = nullptr;
+ if(sqlite3_exec(statsFile, create.str().c_str(), nullptr, nullptr, &zErrMsg)) {
+   klee_error("%s", sqlite3ErrToStringAndFree("ERROR creating table: ", zErrMsg).c_str());
+   return;
+ }
+ insert << "INSERT INTO stats ( "
+             << "Instructions ,"
+             << "FullBranches ,"
+             << "PartialBranches ,"
+             << "NumBranches ,"
+             << "UserTime ,"
+             << "NumStates ,"
+             << "MallocUsage ,"
+             << "NumQueries ,"
+             << "NumQueryConstructs ,"
+             << "NumObjects ,"
+             << "WallTime ,"
+             << "CoveredInstructions ,"
+             << "UncoveredInstructions ,"
+             << "QueryTime ,"
+             << "SolverTime ,"
+             << "CexCacheTime ,"
+             << "ForkTime ,"
+             << "ResolveTime ,"
+             << "QueryCexCacheMisses ,"
 #ifdef KLEE_ARRAY_DEBUG
-              << "ArrayHashTime,"
+             << "ArrayHashTime,"
 #endif
-              << "QueryCexCacheHits "
-              << ") VALUES ( "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
-              << "?, "
+             << "QueryCexCacheHits "
+             << ") VALUES ( "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
+             << "?, "
 #ifdef KLEE_ARRAY_DEBUG
-              << "?, "
+             << "?, "
 #endif
-              << "? "
-              << ")";
-    if(sqlite3_prepare_v2(statsFile, insert.str().c_str(), -1, &insertStmt, 0)) {
-        klee_error("Cannot create prepared statement! %s", sqlite3_errmsg(statsFile));
-        return;
-    }
-    sqlite3_exec(statsFile, "PRAGMA synchronous = OFF", NULL, NULL, &zErrMsg);
-    sqlite3_exec(statsFile, "BEGIN TRANSACTION", NULL, NULL, &zErrMsg);
+             << "? "
+             << ")";
+  if(sqlite3_prepare_v2(statsFile, insert.str().c_str(), -1, &insertStmt, nullptr) != SQLITE_OK) {
+      klee_error("Cannot create prepared statement! %s", sqlite3_errmsg(statsFile));
+      return;
+  }
+  sqlite3_exec(statsFile, "PRAGMA synchronous = OFF", nullptr, nullptr, nullptr);
+  sqlite3_exec(statsFile, "BEGIN TRANSACTION", nullptr, nullptr, nullptr);
 }
 
 time::Span StatsTracker::elapsed() {
@@ -506,37 +510,41 @@ time::Span StatsTracker::elapsed() {
 }
 
 void StatsTracker::writeStatsLine() {
-             sqlite3_bind_int64(insertStmt, 1, stats::instructions);
-             sqlite3_bind_int64(insertStmt, 2, fullBranches);
-             sqlite3_bind_int64(insertStmt, 3, partialBranches);
-             sqlite3_bind_int64(insertStmt, 4, numBranches);
-             sqlite3_bind_int64(insertStmt, 5, time::getUserTime().toMicroseconds());
-             sqlite3_bind_int64(insertStmt, 6, executor.states.size());
-             sqlite3_bind_int64(insertStmt, 7, util::GetTotalMallocUsage() + executor.memory->getUsedDeterministicSize());
-             sqlite3_bind_int64(insertStmt, 8, stats::queries);
-             sqlite3_bind_int64(insertStmt, 9, stats::queryConstructs);
-             sqlite3_bind_int64(insertStmt, 10, 0);  // was numObjects
-             sqlite3_bind_int64(insertStmt, 11, elapsed().toMicroseconds());
-             sqlite3_bind_int64(insertStmt, 12, stats::coveredInstructions);
-             sqlite3_bind_int64(insertStmt, 13, stats::uncoveredInstructions);
-             sqlite3_bind_int64(insertStmt, 14, stats::queryTime);
-             sqlite3_bind_int64(insertStmt, 15, stats::solverTime);
-             sqlite3_bind_int64(insertStmt, 16, stats::cexCacheTime);
-             sqlite3_bind_int64(insertStmt, 17, stats::forkTime);
-             sqlite3_bind_int64(insertStmt, 18, stats::resolveTime);
-             sqlite3_bind_int64(insertStmt, 19, stats::queryCexCacheMisses);
-             sqlite3_bind_int64(insertStmt, 20, stats::queryCexCacheHits);
+  sqlite3_bind_int64(insertStmt, 1, stats::instructions);
+  sqlite3_bind_int64(insertStmt, 2, fullBranches);
+  sqlite3_bind_int64(insertStmt, 3, partialBranches);
+  sqlite3_bind_int64(insertStmt, 4, numBranches);
+  sqlite3_bind_int64(insertStmt, 5, time::getUserTime().toMicroseconds());
+  sqlite3_bind_int64(insertStmt, 6, executor.states.size());
+  sqlite3_bind_int64(insertStmt, 7, util::GetTotalMallocUsage() + executor.memory->getUsedDeterministicSize());
+  sqlite3_bind_int64(insertStmt, 8, stats::queries);
+  sqlite3_bind_int64(insertStmt, 9, stats::queryConstructs);
+  sqlite3_bind_int64(insertStmt, 10, 0);  // was numObjects
+  sqlite3_bind_int64(insertStmt, 11, elapsed().toMicroseconds());
+  sqlite3_bind_int64(insertStmt, 12, stats::coveredInstructions);
+  sqlite3_bind_int64(insertStmt, 13, stats::uncoveredInstructions);
+  sqlite3_bind_int64(insertStmt, 14, stats::queryTime);
+  sqlite3_bind_int64(insertStmt, 15, stats::solverTime);
+  sqlite3_bind_int64(insertStmt, 16, stats::cexCacheTime);
+  sqlite3_bind_int64(insertStmt, 17, stats::forkTime);
+  sqlite3_bind_int64(insertStmt, 18, stats::resolveTime);
+  sqlite3_bind_int64(insertStmt, 19, stats::queryCexCacheMisses);
+  sqlite3_bind_int64(insertStmt, 20, stats::queryCexCacheHits);
 #ifdef KLEE_ARRAY_DEBUG
-             sqlite3_bind_int64(insertStmt, 21, stats::arrayHashTime);
+  sqlite3_bind_int64(insertStmt, 21, stats::arrayHashTime);
 #endif
-             sqlite3_step(insertStmt);
-             sqlite3_reset(insertStmt);
-             if(writeCount % commitEvery == 0 ) {
-               char *zErrMsg = 0;
-               sqlite3_exec(statsFile, "END TRANSACTION", NULL, NULL, &zErrMsg);
-               sqlite3_exec(statsFile, "BEGIN TRANSACTION", NULL, NULL, &zErrMsg);
-             }
-             writeCount++;
+  int errCode = sqlite3_step(insertStmt);
+  if(errCode != SQLITE_DONE) klee_error("Error %d in sqlite3_step function", errCode);
+
+  errCode = sqlite3_reset(insertStmt);
+  if(errCode != SQLITE_OK) klee_error("Error %d in sqlite3_reset function", errCode);
+
+  if(statsWriteCount == statsCommitEvery) {
+    sqlite3_exec(statsFile, "END TRANSACTION", nullptr, nullptr, nullptr);
+    sqlite3_exec(statsFile, "BEGIN TRANSACTION", nullptr, nullptr, nullptr);
+    statsWriteCount = 0;
+  }
+  statsWriteCount++;
 }
 
 void StatsTracker::updateStateStatistics(uint64_t addend) {
diff --git a/lib/Core/StatsTracker.h b/lib/Core/StatsTracker.h
index f09f7638..f1dea77b 100644
--- a/lib/Core/StatsTracker.h
+++ b/lib/Core/StatsTracker.h
@@ -40,10 +40,10 @@ namespace klee {
     std::string objectFilename;
 
     std::unique_ptr<llvm::raw_fd_ostream> istatsFile;
-    sqlite3 *statsFile;
-    sqlite3_stmt *insertStmt;
-    unsigned writeCount;
-    unsigned commitEvery;
+    sqlite3 *statsFile = nullptr;
+    sqlite3_stmt *insertStmt = nullptr;
+    std::uint32_t statsCommitEvery;
+    std::uint32_t statsWriteCount = 0;
     time::Point startWallTime;
 
     unsigned numBranches;
diff --git a/tools/klee-stats/klee-stats b/tools/klee-stats/klee-stats
index 901cab55..38d8f750 100755
--- a/tools/klee-stats/klee-stats
+++ b/tools/klee-stats/klee-stats
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 # -*- encoding: utf-8 -*-
 
 # ===-- klee-stats --------------------------------------------------------===##
@@ -12,24 +12,18 @@
 
 """Output statistics logged by Klee."""
 
-# use '/' to mean true division and '//' to mean floor division
-from __future__ import division
-from __future__ import print_function
-
 import os
-import re
 import sys
 import argparse
 import sqlite3
 
-from operator import itemgetter
 try:
-    from tabulate import TableFormat, Line, DataRow, tabulate
+    from tabulate import TableFormat, Line, DataRow, tabulate, _table_formats
 except:
     print('Error: Package "tabulate" required for table formatting. '
-          'Please install it using "pip" or your package manager.',
+          'Please install it using "pip" or your package manager.'
+          'You can still use -grafana and -to-csv without tabulate.',
           file=sys.stderr)
-    exit(1)
 
 Legend = [
     ('Instrs', 'number of executed instructions'),
@@ -63,35 +57,27 @@ def getLogFile(path):
     """Return the path to run.stats."""
     return os.path.join(path, 'run.stats')
 
-
 class LazyEvalList:
     """Store all the lines in run.stats and eval() when needed."""
     def __init__(self, fileName):
         # The first line in the records contains headers.
-      #  self.lines = lines[1:]
-      self.conn = sqlite3.connect(fileName);
+      self.conn = sqlite3.connect(fileName)
       self.c = self.conn.cursor()
       self.c.execute("SELECT * FROM stats ORDER BY Instructions DESC LIMIT 1")
-      self.lines = self.c.fetchone()
+      self.line = self.c.fetchone()
 
     def aggregateRecords(self):
-        memC = self.conn.cursor()
-        memC.execute("SELECT max(MallocUsage) / 1024 / 1024, avg(MallocUsage) / 1024 / 1024 from stats")
-        maxMem, avgMem = memC.fetchone()
-
-        stateC = self.conn.cursor()
-        stateC.execute("SELECT max(NumStates), avg(NumStates) from stats")
-        maxStates, avgStates = stateC.fetchone()
-        return (maxMem, avgMem, maxStates, avgStates)
-
-
-    def __getitem__(self, index):
-        return self.lines
-
-    def __len__(self):
-        return len(self.lines)
+      memC = self.conn.cursor()
+      memC.execute("SELECT max(MallocUsage) / 1024 / 1024, avg(MallocUsage) / 1024 / 1024 from stats")
+      maxMem, avgMem = memC.fetchone()
 
+      stateC = self.conn.cursor()
+      stateC.execute("SELECT max(NumStates), avg(NumStates) from stats")
+      maxStates, avgStates = stateC.fetchone()
+      return (maxMem, avgMem, maxStates, avgStates)
 
+    def getLastRecord(self):
+      return self.line
 
 def stripCommonPathPrefix(paths):
     paths = map(os.path.normpath, paths)
@@ -103,7 +89,6 @@ def stripCommonPathPrefix(paths):
             break
     return ['/'.join(p[i:]) for p in paths]
 
-
 def getKleeOutDirs(dirs):
     kleeOutDirs = []
     for dir in dirs:
@@ -117,7 +102,6 @@ def getKleeOutDirs(dirs):
                         kleeOutDirs.append(path)
     return kleeOutDirs
 
-
 def getLabels(pr):
     if pr == 'all':
         labels = ('Path', 'Instrs', 'Time(s)', 'ICov(%)', 'BCov(%)', 'ICount',
@@ -149,6 +133,7 @@ def getRow(record, stats, pr):
     if BTot == 0:
         BFull = BTot = 1
 
+    Ts, Tcex, Tf, Tr = [e / 1000000 for e in [Ts, Tcex, Tf, Tr]] #convert from microseconds
     Mem = Mem / 1024 / 1024
     AvgQC = int(QCon / max(1, QTot))
 
@@ -177,69 +162,60 @@ def getRow(record, stats, pr):
 
 
 def grafana(dirs):
-  dr = getLogFile(dirs[0])
-  from flask import Flask, jsonify, request
-  from json import dumps
-  import datetime
-  import time
-  app = Flask(__name__)
-
-  def toepoch(date_text):
-    dt = datetime.datetime.strptime(date_text, "%Y-%m-%dT%H:%M:%S.%fZ")
-    epoch = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
-    return epoch
-
-  @app.route('/')
-  def status():
-      return 'OK'
-
-  @app.route('/search', methods=['GET', 'POST'])
-  def search():
-      jsn = request.get_json()
-      conn = sqlite3.connect(dr);
-      cursor = conn.execute('SELECT * FROM stats')
-      names = [description[0] for description in cursor.description]
-      return jsonify(names)
-
-  @app.route('/query', methods=['POST'])
-  def query():
-      jsn = request.get_json()
-#      print(dumps(jsn))
-      interval = jsn["intervalMs"]
-      limit = jsn["maxDataPoints"]
-      frm = toepoch(jsn["range"]["from"])
-      to = toepoch(jsn["range"]["to"])
-      targets = [str(t["target"]) for t in jsn["targets"]]
-      startTime = os.path.getmtime(dr)
-      fromTime = frm - startTime if frm - startTime > 0 else 0
-      toTime =  to - startTime if to - startTime > fromTime else fromTime + 100
-#      print(startTime)
-      conn = sqlite3.connect(dr);
-      s = "SELECT WallTime + " + str(startTime) + ","  + ",".join(targets) + " FROM stats GROUP BY WallTime/3 LIMIT ?"
-      s = "SELECT WallTime + {startTime} , {fields} " \
-          + " FROM stats" \
-          + " WHERE WallTime >= {fromTime} AND WallTime <= {toTime}" \
-          + " GROUP BY WallTime/{intervalSeconds} LIMIT {limit}"
-      s = s.format(startTime=startTime,
-                   fields=",".join(["AVG( {0} )".format(t) for t in targets]),
-                   intervalSeconds=interval/1000,
-                   fromTime=fromTime,
-                   toTime=toTime,
-                   limit=limit)
-      cursor = conn.execute(s)
-      result = [ {"target": t, "datapoints": []} for t in targets ]
-      for line in cursor:
-          unixtimestamp = int(line[0] * 1000) #miliseconds
-          for field, datastream in zip(line[1:], result):
-              datastream["datapoints"].append([field, unixtimestamp])
-
-#      print(len(result[0]["datapoints"]))
-      ret = jsonify(result)
-#      print(result)
-      return ret
-
-  app.run()
-  return 0
+    dr = getLogFile(dirs[0])
+    from flask import Flask, jsonify, request
+    import datetime
+    app = Flask(__name__)
+
+    def toEpoch(date_text):
+        dt = datetime.datetime.strptime(date_text, "%Y-%m-%dT%H:%M:%S.%fZ")
+        epoch = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
+        return epoch
+
+    @app.route('/')
+    def status():
+        return 'OK'
+
+    @app.route('/search', methods=['GET', 'POST'])
+    def search():
+        conn = sqlite3.connect(dr)
+        cursor = conn.execute('SELECT * FROM stats')
+        names = [description[0] for description in cursor.description]
+        return jsonify(names)
+
+    @app.route('/query', methods=['POST'])
+    def query():
+        jsn = request.get_json()
+        interval = jsn["intervalMs"]
+        limit = jsn["maxDataPoints"]
+        frm = toEpoch(jsn["range"]["from"])
+        to = toEpoch(jsn["range"]["to"])
+        targets = [str(t["target"]) for t in jsn["targets"]]
+        startTime = os.path.getmtime(dr)
+        fromTime = frm - startTime if frm - startTime > 0 else 0
+        toTime = to - startTime if to - startTime > fromTime else fromTime + 100
+        sqlTarget = ",".join(["AVG( {0} )".format(t) for t in targets if t.isalnum()])
+
+        conn = sqlite3.connect(dr)
+
+        s = "SELECT WallTime + ? , {fields} " \
+            + " FROM stats" \
+            + " WHERE WallTime >= ? AND WallTime <= ?" \
+            + " GROUP BY WallTime/? LIMIT ?"
+        s = s.format(fields=sqlTarget) #can't use prepared staments for this one
+
+        cursor = conn.execute(s, ( startTime, fromTime, toTime, interval/1000, limit))
+        result = [ {"target": t, "datapoints": []} for t in targets ]
+        for line in cursor:
+            unixtimestamp = int(line[0]) * 1000  #miliseconds
+            for field, datastream in zip(line[1:], result):
+                datastream["datapoints"].append([field, unixtimestamp])
+
+        ret = jsonify(result)
+        return ret
+
+    app.run()
+    return 0
 
 def main():
     parser = argparse.ArgumentParser(
@@ -250,16 +226,15 @@ def main():
     parser.add_argument('dir', nargs='+', help='klee output directory')
 
     parser.add_argument('--table-format',
-                        choices=['plain', 'simple', 'grid', 'pipe', 'orgtbl',
-                                 'rst', 'mediawiki', 'latex', 'klee'],
-                        dest='tableFormat', default='klee',
-                        help='Table format for the summary.')
+                          choices=['klee'] + list(_table_formats.keys()),
+                          dest='tableFormat', default='klee',
+                          help='Table format for the summary.')
     parser.add_argument('-to-csv',
                           action='store_true', dest='toCsv',
-                          help='Dump run.stats to STDOUT in CSV format')
+                          help='Output stats as comma-separated values (CSV)')
     parser.add_argument('-grafana',
                           action='store_true', dest='grafana',
-                          help='Start a graphan web server')
+                          help='Start a grafana web server')
 
     # argument group for controlling output verboseness
     pControl = parser.add_mutually_exclusive_group(required=False)
@@ -314,18 +289,14 @@ def main():
         # write data                          
         for result in sql3_cursor:
           csv_out.writerow(result)
- 
         return
+
     if len(data) > 1:
         dirs = stripCommonPathPrefix(dirs)
     # attach the stripped path
     data = list(zip(dirs, data))
 
     labels = getLabels(pr)
-    # labels in the same order as in the run.stats file. used by --compare-by.
-    # current impl needs monotonic values, so only keep the ones making sense.
-    rawLabels = ('Instrs', '', '', '', '', '', '', 'Queries',
-                 '', '', 'Time', 'ICov', '', '', '', '', '', '')
 
     # build the main body of the table
     table = []
@@ -335,8 +306,8 @@ def main():
         row = [path]
         stats = records.aggregateRecords()
         totStats.append(stats)
-        row.extend(getRow(records[-1], stats, pr))
-        totRecords.append(records[-1])
+        row.extend(getRow(records.getLastRecord(), stats, pr))
+        totRecords.append(records.getLastRecord())
         table.append(row)
     # calculate the total
     totRecords = [sum(e) for e in zip(*totRecords)]