#!/usr/bin/env python3 # -*- encoding: utf-8 -*- # ===-- klee-stats --------------------------------------------------------===## # # The KLEE Symbolic Virtual Machine # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # # ===----------------------------------------------------------------------===## """Output statistics logged by KLEE.""" import os import sys import argparse import sqlite3 import collections # Mapping of: (column head, explanation, internal klee name) # column head must start with a capital letter Legend = [ # core stats ('Instrs', 'number of executed instructions', "Instructions"), ('Time(s)', 'total wall time', "WallTime"), ('ICov(%)', 'instruction coverage in the LLVM bitcode', "ICov"), ('BCov(%)', 'conditional branch (br) coverage in the LLVM bitcode', "BCov"), ('ICount', 'total static instructions in the LLVM bitcode', "ICount"), ('TSolver(%)', 'relative time spent in the solver chain wrt wall time (incl. caches and constraint solver)', "RelSolverTime"), # extended stats # - code and coverage ('ICovered', 'total covered instructions in the LLVM bitcode', "CoveredInstructions"), ('IUncovered', 'total uncovered instructions in the LLVM bitcode', "UncoveredInstructions"), ('Branches', 'number of conditional branch (br) instructions in the LLVM bitcode', 'NumBranches'), ('FullBranches', 'number of fully-explored conditional branch (br) instructions in the LLVM bitcode', 'FullBranches'), ('PartialBranches', 'number of partially-explored conditional branch (br) instructions in the LLVM bitcode', 'PartialBranches'), ('ExternalCalls', 'number of external calls', 'ExternalCalls'), # - time ('TUser(s)', 'total user time', "UserTime"), ('TResolve(s)', 'time spent in object resolution', "ResolveTime"), ('TResolve(%)', 'relative time spent in object resolution wrt wall time', "RelResolveTime"), ('TCex(s)', 'time spent in the counterexample caching code (incl. constraint solver)', "CexCacheTime"), ('TCex(%)', 'relative time spent in the counterexample caching code wrt wall time (incl. constraint solver)', "RelCexCacheTime"), ('TQuery(s)', 'time spent in the constraint solver', "QueryTime"), ('TSolver(s)', 'time spent in the solver chain (incl. caches and constraint solver)', "SolverTime"), # - states ('States', 'number of created states', "States"), ('ActiveStates', 'number of currently active states (0 after successful termination)', "NumStates"), ('MaxActiveStates', 'maximum number of active states', "MaxStates"), ('AvgActiveStates', 'average number of active states', "AvgStates"), ('InhibitedForks', 'number of inhibited state forks due to e.g. memory pressure', "InhibitedForks"), # - constraint caching/solving ('Queries', 'number of queries issued to the solver chain', "Queries"), ('SolverQueries', 'number of queries issued to the constraint solver', "SolverQueries"), ('SolverQueryConstructs', 'number of query constructs for all queries send to the constraint solver', "NumQueryConstructs"), ('AvgSolverQuerySize', 'average number of query constructs per query issued to the constraint solver', "AvgQC"), ('QCacheMisses', 'Query cache misses', "QueryCacheMisses"), ('QCacheHits', 'Query cache hits', "QueryCacheHits"), ('QCexCacheMisses', 'Counterexample cache misses', "QueryCexCacheMisses"), ('QCexCacheHits', 'Counterexample cache hits', "QueryCexCacheHits"), # - memory ('Allocations', 'number of allocated heap objects of the program under test', "Allocations"), ('Mem(MiB)', 'mebibytes of memory currently used', "MallocUsage"), ('MaxMem(MiB)', 'maximum memory usage', "MaxMem"), ('AvgMem(MiB)', 'average memory usage', "AvgMem"), # - branch types ('BrConditional', 'number of forks caused by symbolic branch conditions (br)', "BranchesConditional"), ('BrIndirect', 'number of forks caused by indirect branches (indirectbr) with symbolic address', "BranchesIndirect"), ('BrSwitch', 'number of forks caused by switch with symbolic value', "BranchesSwitch"), ('BrCall', 'number of forks caused by symbolic function pointers', "BranchesCall"), ('BrMemOp', 'number of forks caused by memory operation with symbolic address', "BranchesMemOp"), ('BrResolvePointer', 'number of forks caused by symbolic pointers', "BranchesResolvePointer"), ('BrAlloc', 'number of forks caused by symbolic allocation size', "BranchesAlloc"), ('BrRealloc', 'number of forks caused by symbolic reallocation size', "BranchesRealloc"), ('BrFree', 'number of forks caused by freeing a symbolic pointer', "BranchesFree"), ('BrGetVal', 'number of forks caused by user-invoked concretization while seeding', "BranchesGetVal"), # - termination classes ('TermExit', 'number of states that reached end of execution path', "TerminationExit"), ('TermEarly', 'number of early terminated states (e.g. due to memory pressure, state limt)', "TerminationEarly"), ('TermSolverErr', 'number of states terminated due to solver errors', "TerminationSolverError"), ('TermProgrErr', 'number of states terminated due to program errors (e.g. division by zero)', "TerminationProgramError"), ('TermUserErr', 'number of states terminated due to user errors (e.g. misuse of KLEE API)', "TerminationUserError"), ('TermExecErr', 'number of states terminated due to execution errors (e.g. unsupported intrinsics)', "TerminationExecutionError"), ('TermEarlyAlgo', 'number of state terminations required by algorithm (e.g. state merging or replaying)', "TerminationEarlyAlgorithm"), ('TermEarlyUser', 'number of states terminated via klee_silent_exit()', "TerminationEarlyUser"), # - debugging ('TArrayHash(s)', 'time spent hashing arrays (if KLEE_ARRAY_DEBUG enabled, otherwise -1)', "ArrayHashTime"), ('TFork(s)', 'time spent forking states', "ForkTime"), ('TFork(%)', 'relative time spent forking states wrt wall time', "RelForkTime"), ('TUser(%)', 'relative user time wrt wall time', "RelUserTime"), ] def getInfoFile(path): """Return the path to info""" return os.path.join(path, 'info') def getLogFile(path): """Return the path to run.stats.""" return os.path.join(path, 'run.stats') class LazyEvalList: """Store all the lines in run.stats and eval() when needed.""" def __init__(self, fileName): # The first line in the records contains headers. self.filename = fileName def conn(self): return sqlite3.connect(self.filename) def aggregateRecords(self): try: memC = self.conn().execute("SELECT max(MallocUsage)*1.0 / 1024 / 1024, avg(MallocUsage) / 1024 / 1024 from stats") maxMem, avgMem = memC.fetchone() except sqlite3.OperationalError as e: maxMem, avgMem = None, None try: stateC = self.conn().execute("SELECT max(NumStates), avg(NumStates) from stats") maxStates, avgStates = stateC.fetchone() except sqlite3.OperationalError as e: maxStates, avgStates = None, None return {"MaxMem":maxMem, "AvgMem": avgMem, "MaxStates": maxStates, "AvgStates": avgStates} def getLastRecord(self): try: cursor = self.conn().execute("SELECT * FROM stats ORDER BY rowid DESC LIMIT 1") column_names = [description[0] for description in cursor.description] return dict(zip(column_names, cursor.fetchone())) except (sqlite3.OperationalError, TypeError) as e: return None def stripCommonPathPrefix(paths): paths = map(os.path.normpath, paths) paths = [p.split('/') for p in paths] zipped = zip(*paths) i = 0 for i, elts in enumerate(zipped): if len(set(elts)) > 1: break return ['/'.join(p[i:]) for p in paths] def isValidKleeOutDir(dir): return os.path.exists(os.path.join(dir, 'info')) and os.path.exists(os.path.join(dir, 'run.stats')) def getKleeOutDirs(dirs): kleeOutDirs = [] for dir in dirs: if isValidKleeOutDir(dir): kleeOutDirs.append(dir) else: for root, subdirs, _ in os.walk(dir): for d in subdirs: path = os.path.join(root, d) if isValidKleeOutDir(path): kleeOutDirs.append(path) return kleeOutDirs def select_columns(record, pr): if pr == 'all': return record if pr == 'reltime': s_column = ['Path', 'WallTime', 'RelUserTime', 'RelSolverTime', 'RelCexCacheTime', 'RelForkTime', 'RelResolveTime'] elif pr == 'abstime': s_column = ['Path', 'WallTime', 'UserTime', 'SolverTime', 'CexCacheTime', 'ForkTime', 'ResolveTime'] elif pr == 'more': s_column = ['Path', 'Instructions', 'WallTime', 'ICov', 'BCov', 'ICount', 'RelSolverTime', 'NumStates', 'MaxStates', 'MallocUsage', 'MaxMem'] else: s_column = ['Path', 'Instructions', 'WallTime', 'ICov', 'BCov', 'ICount', 'RelSolverTime'] # filter record return { column:record[column] for column in s_column if column in record } def add_artificial_columns(record): # Convert recorded times from microseconds to seconds for key in ["UserTime", "WallTime", "QueryTime", "SolverTime", "CexCacheTime", "ForkTime", "ResolveTime"]: if not key in record: continue record[key] /= 1000000 # Convert memory from byte to MiB if "MallocUsage" in record: record["MallocUsage"] /= 1024 * 1024 # Calculate avg. query construct if "NumQueryConstructs" in record and "NumQueries" in record: record["AvgQC"] = int(record["NumQueryConstructs"] / max(1, record["NumQueries"])) # Calculate total number of instructions if "CoveredInstructions" in record and "UncoveredInstructions" in record: record["ICount"] = (record["CoveredInstructions"] + record["UncoveredInstructions"]) # Calculate relative instruction coverage if "CoveredInstructions" in record and "ICount" in record: record["ICov"] = 100 * record["CoveredInstructions"] / record["ICount"] # Calculate branch coverage if "FullBranches" in record and "PartialBranches" in record and "NumBranches" in record: record["BCov"] = 100.0 if record["NumBranches"] != 0: record["BCov"] *= (2 * record["FullBranches"] + record["PartialBranches"]) / (2 * record["NumBranches"]) # Add relative times for key in ["SolverTime", "CexCacheTime", "ForkTime", "ResolveTime", "UserTime"]: if "WallTime" in record and key in record: record["Rel"+key] = 100 * record[key] / record["WallTime"] return record def grafana(dirs, host_address, port): dr = getLogFile(dirs[0]) from flask import Flask, jsonify, request import datetime app = Flask(__name__) import re from dateutil import parser def getKleeStartTime(): with open(getInfoFile(dirs[0]), "r") as file: for line in file: m = re.match("Started: (.*)", line) if m: dateString = m.group(1) return parser.parse(dateString).timestamp() print("Error: Couldn't find klee's start time", file=sys.stderr) sys.exit() def toEpoch(date_text): dt = datetime.datetime.strptime(date_text, "%Y-%m-%dT%H:%M:%S.%fZ") epoch = (dt - datetime.datetime(1970, 1, 1)).total_seconds() return epoch @app.route('/') def status(): return 'OK' @app.route('/search', methods=['GET', 'POST']) def search(): conn = sqlite3.connect(dr) cursor = conn.execute('SELECT * FROM stats LIMIT 1') names = [description[0] for description in cursor.description] return jsonify(names) @app.route('/query', methods=['POST']) def query(): jsn = request.get_json() interval = jsn["intervalMs"] limit = jsn["maxDataPoints"] frm = toEpoch(jsn["range"]["from"]) to = toEpoch(jsn["range"]["to"]) targets = [str(t["target"]) for t in jsn["targets"]] startTime = getKleeStartTime() fromTime = frm - startTime if frm - startTime > 0 else 0 toTime = to - startTime if to - startTime > fromTime else fromTime + 100 #convert to microseconds startTime, fromTime, toTime = startTime*1000000, fromTime*1000000, toTime*1000000 sqlTarget = ",".join(["AVG( {0} )".format(t) for t in targets if t.isalnum()]) conn = sqlite3.connect(dr) s = "SELECT WallTime + ? , {fields} " \ + " FROM stats" \ + " WHERE WallTime >= ? AND WallTime <= ?" \ + " GROUP BY WallTime/? LIMIT ?" s = s.format(fields=sqlTarget) #can't use prepared staments for this one #All times need to be in microseconds, interval is in milliseconds cursor = conn.execute(s, ( startTime, fromTime, toTime, interval*1000, limit)) result = [ {"target": t, "datapoints": []} for t in targets ] for line in cursor: unixtimestamp = int(line[0]) / 1000 #Convert from microsecond to miliseconds for field, datastream in zip(line[1:], result): if "Time" in datastream["target"] and "Wall" not in datastream["target"]\ and "User" not in datastream["target"]: val = (field/(line[0]-startTime))*100 datastream["datapoints"].append([val, unixtimestamp]) else: datastream["datapoints"].append([field, unixtimestamp]) ret = jsonify(result) return ret app.run(host=host_address, port=port) return 0 def write_csv(data): import csv data = data[0] c = data.conn().cursor() sql3_cursor = c.execute("SELECT * FROM stats") csv_out = csv.writer(sys.stdout) # write header csv_out.writerow([d[0] for d in sql3_cursor.description]) # write data for result in sql3_cursor: csv_out.writerow(result) def rename_columns(row, name_mapping): """ Renames the columns in a row based on the mapping. If a column name is not found in the mapping, keep the old name :param row: :param name_mapping: :return: updated row """ keys = list(row.keys()) for k in keys: new_key = name_mapping.get(k, k) if new_key == k: continue row[new_key] = row.pop(k) return row def write_table(args, data, dirs, pr): from tabulate import TableFormat, Line, DataRow, tabulate KleeTable = TableFormat(lineabove=Line("-", "-", "-", "-"), linebelowheader=Line("-", "-", "-", "-"), linebetweenrows=None, linebelow=Line("-", "-", "-", "-"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=0, with_header_hide=None) if len(data) > 1: dirs = stripCommonPathPrefix(dirs) # attach the stripped path data = list(zip(dirs, data)) # build the main body of the table table = dict() for i, (path, records) in enumerate(data): stats = records.aggregateRecords() # Get raw row single_row = records.getLastRecord() if single_row is None: # empty or corrupt SQLite database single_row = {} single_row['Path'] = path single_row.update(stats) # Extend row with additional entries single_row = add_artificial_columns(single_row) single_row = select_columns(single_row, pr) for key in set.union(set(single_row.keys()), set(table.keys())): # Not all columns in row: add "None" if key not in single_row: table[key].append(None) continue # New column: extend if necessary for previous rows if key not in table: table[key] = [None] * i # add the value table[key].append(single_row[key]) # Rename columns name_mapping = dict() for entry in Legend: name_mapping[entry[2]] = entry[0] table = rename_columns(table, name_mapping) # Apply column filter provided by user user_columns = [] if args.columns is not None: user_columns = [c for c in map(lambda v: v.strip(), args.columns.split(',')) if c] column_names = list(table.keys()) # error when user-provided column does not exist diff = set(user_columns) - set(column_names) if diff: print('Column(s) not found:', ', '.join(diff), file=sys.stderr) sys.exit(1) # error when no name given if not user_columns: print('No column name specified for --print-columns.', file=sys.stderr) sys.exit(1) # filter for k in column_names: if k not in user_columns: table.pop(k) # Add a summary row max_len = len(data) if max_len > 1 and args.tableFormat not in ['csv', 'readable-csv']: # calculate the total for k in table: if k == "Path": # Skip path continue # TODO: this is a bit bad but ... . In a nutshell, if the name of a column starts or ends with certain # pattern change the summary function. if k.startswith("Avg") or k.endswith("(%)"): total = sum([e for e in table[k] if e is not None])/max_len elif k.startswith("Max"): total = max([e for e in table[k] if e is not None]) else: total = sum([e for e in table[k] if e is not None]) table[k].append(total) table['Path'].append('Total ({0})'.format(max_len)) # Prepare the order of the header: start to order entries according to the order in legend and add the unknown entries at the end headers = user_columns if not headers: headers = ["Path"] available_headers = list(table.keys()) for entry in Legend: l_name = entry[0] if l_name in available_headers: headers.append(l_name) available_headers.remove(l_name) available_headers.sort() headers += available_headers # Make sure we keep the correct order of the column entries final_table = collections.OrderedDict() for column in headers: final_table[column] = table[column] table = final_table # Output table # - KLEE standard format if args.tableFormat == 'klee': stream = tabulate( table, headers='keys', tablefmt=KleeTable, floatfmt='.{p}f'.format(p=2), numalign='right', stralign='center') # add a line separator before the total line if len(data) > 1: stream = stream.splitlines() stream.insert(-2, stream[-1]) stream = '\n'.join(stream) print(stream) # - (readable) csv elif args.tableFormat in ['csv', 'readable-csv']: CsvTable = TableFormat( lineabove = None, linebelowheader = None, linebetweenrows = None, linebelow = None, headerrow = DataRow('', ',', ''), datarow = DataRow('', ',', ''), padding = 0, with_header_hide = None) print(tabulate( table, headers='keys', tablefmt=CsvTable, floatfmt='.{p}f'.format(p=2), numalign='decimal' if args.tableFormat == 'readable-csv' else None, stralign='left' if args.tableFormat == 'readable-csv' else None)) # - user-defined else: print(tabulate( table, headers='keys', tablefmt=args.tableFormat, floatfmt='.{p}f'.format(p=2), numalign='right', stralign='center')) def main(): tabulate_available = False epilog = "" try: from tabulate import tabulate, _table_formats epilog = 'LEGEND\n' + tabulate([(f[:2]) for f in Legend]) tabulate_available = True except: pass parser = argparse.ArgumentParser( description='output statistics logged by klee', epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('dir', nargs='+', help='KLEE output directory') if tabulate_available: tf_choices = ['klee', 'csv', 'readable-csv'] + list(_table_formats.keys()) tf_help = 'Table format for the summary.' tf_default = 'klee' else: tf_choices = [] tf_help = '(not available due to missing "tabulate" package)' tf_default = None parser.add_argument('--table-format', choices=tf_choices, dest='tableFormat', default=tf_default, help=tf_help) parser.add_argument('--to-csv', action='store_true', dest='toCsv', help='Output run.stats data as comma-separated values (CSV)') parser.add_argument('--grafana', action='store_true', dest='grafana', help='Start a grafana web server') parser.add_argument('--grafana-host', dest='grafana_host', help='IP address grafana web server should listen to', default="127.0.0.1") parser.add_argument('--grafana-port', dest='grafana_port', type=int, help='Port grafana web server should listen to', default=5000) # argument group for controlling output verboseness pControl = parser.add_mutually_exclusive_group(required=False) pControl.add_argument('--print-all', action='store_true', dest='pAll', help='Print all available information.') pControl.add_argument('--print-rel-times', action='store_true', dest='pRelTimes', help='Print only values of measured times. ' 'Values are relative to the measured system ' 'execution time.') pControl.add_argument('--print-abs-times', action='store_true', dest='pAbsTimes', help='Print only values of measured times. ' 'Absolute values (in seconds) are printed.') pControl.add_argument('--print-more', action='store_true', dest='pMore', help='Print extra information (needed when ' 'monitoring an ongoing run).') pControl.add_argument('--print-columns', type=str, dest='columns', default=None, help='Comma-separated list of table columns, e.g \'Path,Time(s),ICov(%%)\'.') args = parser.parse_args() if not tabulate_available and not (args.grafana or args.toCsv): print('Error: Package "tabulate" required for table formatting. ' 'Please install it using "pip" or your package manager. ' 'You can still use --grafana and --to-csv without tabulate.', file=sys.stderr) sys.exit(1) # get print controls pr = 'NONE' if args.pAll or args.columns: pr = 'all' elif args.pRelTimes: pr = 'reltime' elif args.pAbsTimes: pr = 'abstime' elif args.pMore: pr = 'more' dirs = getKleeOutDirs(args.dir) if len(dirs) == 0: print('No KLEE output directory found', file=sys.stderr) sys.exit(1) if args.grafana: return grafana(dirs, args.grafana_host, args.grafana_port) # Filter non-existing files, useful for star operations valid_log_files = [getLogFile(f) for f in dirs if os.path.isfile(getLogFile(f))] # read contents from every run.stats file into LazyEvalList data = [LazyEvalList(d) for d in valid_log_files] if args.toCsv: if len(valid_log_files) > 1: print('Error: --to-csv only supports a single input directory ', file=sys.stderr) sys.exit(1) write_csv(data) return write_table(args, data, dirs, pr) if __name__ == '__main__': main()