aboutsummaryrefslogtreecommitdiffhomepage
path: root/scripts
diff options
context:
space:
mode:
authorDaniel Dunbar <daniel@zuster.org>2009-05-21 04:36:41 +0000
committerDaniel Dunbar <daniel@zuster.org>2009-05-21 04:36:41 +0000
commit6f290d8f9e9d7faac295cb51fc96884a18f4ded4 (patch)
tree46e7d426abc0c9f06ac472ac6f7f9e661b5d78cb /scripts
parenta55960edd4dcd7535526de8d2277642522aa0209 (diff)
downloadklee-6f290d8f9e9d7faac295cb51fc96884a18f4ded4.tar.gz
Initial KLEE checkin.
- Lots more tweaks, documentation, and web page content is needed, but this should compile & work on OS X & Linux. git-svn-id: https://llvm.org/svn/llvm-project/klee/trunk@72205 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/IStatsMerge.py175
-rwxr-xr-xscripts/IStatsSum.py129
-rwxr-xr-xscripts/PrintStats.py231
-rwxr-xr-xscripts/klee-control89
-rwxr-xr-xscripts/objdump41
5 files changed, 665 insertions, 0 deletions
diff --git a/scripts/IStatsMerge.py b/scripts/IStatsMerge.py
new file mode 100755
index 00000000..1bd61c3e
--- /dev/null
+++ b/scripts/IStatsMerge.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+
+from __future__ import division
+
+import sys, os
+
+class MergeError(Exception):
+ pass
+
+def checkAssemblies(directories):
+ def read(d):
+ try:
+ return open(os.path.join(d,'assembly.ll')).read()
+ except:
+ raise MergeError("unable to open assembly for: %s"%(`d`,))
+
+ reference = read(directories[0])
+ for d in directories[1:]:
+ if reference != read(d):
+ return False
+ return True
+
+def allEqual(l):
+ return not [i for i in l if i!=l[0]]
+
+def merge(inputs, output, outputDir):
+ inputs = [[None,iter(i)] for i in inputs]
+
+ def getLine(elt):
+ la,i = elt
+ if la is None:
+ try:
+ ln = i.next()
+ except StopIteration:
+ ln = None
+ except:
+ raise MergeError("unexpected IO error")
+ return ln
+ else:
+ elt[0] = None
+ return la
+ def getLines():
+ return map(getLine,inputs)
+ def putback(ln,elt):
+ assert elt[0] is None
+ elt[0] = ln
+
+ events = None
+
+ # read header (up to ob=)
+ while 1:
+ lns = getLines()
+ ln = lns[0]
+ if ln.startswith('ob='):
+ if [l for l in lns if not l.startswith('ob=')]:
+ raise MergeError("headers differ")
+ output.write('ob=%s\n'%(os.path.join(outputDir,'assembly.ll'),))
+ break
+ else:
+ if ln.startswith('positions:'):
+ if ln!='positions: instr line\n':
+ raise MergeError("unexpected 'positions' directive")
+ elif ln.startswith('events:'):
+ events = ln[len('events: '):].strip().split(' ')
+ if not allEqual(lns):
+ raise MergeError("headers differ")
+ output.write(ln)
+
+ if events is None:
+ raise MergeError('missing events directive')
+ boolTypes = set(['Icov','Iuncov'])
+ numEvents = len(events)
+ eventTypes = [e in boolTypes for e in events]
+
+ def mergeStats(datas):
+ if not allEqual([d[:2] for d in datas]):
+ raise MergeError("instruction or line specifications differ")
+ elif [d for d in datas if len(d)!=numEvents+2]:
+ raise MergeError("statistics differ in event counts")
+
+ result = [datas[0][0],datas[0][1]]
+ for i,ev in enumerate(events):
+ s = sum([d[i+2] for d in datas])
+ if ev=='Icov':
+ result.append(max([d[i+2] for d in datas])) # true iff any are non-zero
+ elif ev=='Iuncov':
+ result.append(min([d[i+2] for d in datas])) # true iff any are non-zero
+ else:
+ result.append(s)
+ return result
+
+ def readCalls():
+ results = {}
+ for elt in inputs:
+ while 1:
+ ln = getLine(elt)
+ if ln is not None and (ln.startswith('cfl=') or ln.startswith('cfn=')):
+ if ln.startswith('cfl='):
+ cfl = ln
+ cfn = getLine(elt)
+ if not cfn.startswith('cfn='):
+ raise MergeError("unexpected cfl directive without function")
+ else:
+ cfl = None
+ cfn = ln
+ target = getLine(elt)
+ if not target.startswith('calls='):
+ raise MergeError("unexpected cfn directive with calls")
+ stat = map(int,getLine(elt).strip().split(' '))
+ key = target
+ existing = results.get(target)
+ if existing is None:
+ results[key] = [cfl,cfn,target,stat]
+ else:
+ if existing[0]!=cfl or existing[1]!=cfn:
+ raise MergeError("multiple call descriptions for a single target")
+ existing[3] = mergeStats([existing[3],stat])
+ else:
+ putback(ln, elt)
+ break
+ return results
+
+ # read statistics
+ while 1:
+ lns = getLines()
+ ln = lns[0]
+ if ln is None:
+ if not allEqual(lns):
+ raise MergeError("unexpected end of input")
+ break
+ elif ln.startswith('fn') or ln.startswith('fl'):
+ if not allEqual(lns):
+ raise MergeError("files differ")
+ output.write(ln)
+ else:
+ # an actual statistic
+ data = [map(int,ln.strip().split(' ')) for ln in lns]
+ print >>output,' '.join(map(str,mergeStats(data)))
+
+ # read any associated calls
+ for cfl,cfn,calls,stat in readCalls().values():
+ if cfl is not None:
+ output.write(cfl)
+ output.write(cfn)
+ output.write(calls)
+ print >>output,' '.join(map(str,stat))
+
+def main(args):
+ from optparse import OptionParser
+ op = OptionParser("usage: %prog [options] directories+ output")
+ opts,args = op.parse_args()
+
+ output = args.pop()
+ directories = args
+
+ if len(directories)<=1:
+ op.error("incorrect number of arguments")
+
+ print 'Merging:',', '.join(directories)
+ print 'Into:',output
+
+ if not checkAssemblies(directories):
+ raise MergeError("executables differ")
+
+ if not os.path.exists(output):
+ os.mkdir(output)
+
+ assembly = open(os.path.join(directories[0],'assembly.ll')).read()
+ open(os.path.join(output,'assembly.ll'),'w').write(assembly)
+
+ inputs = [open(os.path.join(d,'run.istats')) for d in directories]
+ merge(inputs, open(os.path.join(output,'run.istats'),'w'), output)
+
+if __name__=='__main__':
+ main(sys.argv)
diff --git a/scripts/IStatsSum.py b/scripts/IStatsSum.py
new file mode 100755
index 00000000..ce680c7b
--- /dev/null
+++ b/scripts/IStatsSum.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+
+from __future__ import division
+
+import sys, os
+
+def getSummary(input):
+ inputs = [[None,iter(open(input))]]
+ def getLine(elt):
+ la,i = elt
+ if la is None:
+ try:
+ ln = i.next()
+ except StopIteration:
+ ln = None
+ except:
+ raise ValueError("unexpected IO error")
+ return ln
+ else:
+ elt[0] = None
+ return la
+ def getLines():
+ return map(getLine,inputs)
+ def putback(ln,elt):
+ assert elt[0] is None
+ elt[0] = ln
+
+ events = None
+
+ # read header (up to ob=)
+ while 1:
+ lns = getLines()
+ ln = lns[0]
+ if ln.startswith('ob='):
+ break
+ else:
+ if ln.startswith('positions:'):
+ if ln!='positions: instr line\n':
+ raise ValueError("unexpected 'positions' directive")
+ elif ln.startswith('events:'):
+ events = ln[len('events: '):].strip().split(' ')
+
+ if events is None:
+ raise ValueError('missing events directive')
+ boolTypes = set(['Icov','Iuncov'])
+ numEvents = len(events)
+ eventTypes = [e in boolTypes for e in events]
+
+ def readCalls():
+ results = {}
+ for elt in inputs:
+ while 1:
+ ln = getLine(elt)
+ if ln is not None and (ln.startswith('cfl=') or ln.startswith('cfn=')):
+ if ln.startswith('cfl='):
+ cfl = ln
+ cfn = getLine(elt)
+ if not cfn.startswith('cfn='):
+ raise ValueError("unexpected cfl directive without function")
+ else:
+ cfl = None
+ cfn = ln
+ target = getLine(elt)
+ if not target.startswith('calls='):
+ raise ValueError("unexpected cfn directive with calls")
+ stat = map(int,getLine(elt).strip().split(' '))
+ key = target
+ existing = results.get(target)
+ if existing is None:
+ results[key] = [cfl,cfn,target,stat]
+ else:
+ if existing[0]!=cfl or existing[1]!=cfn:
+ raise ValueError("multiple call descriptions for a single target")
+ existing[3] = mergeStats([existing[3],stat])
+ else:
+ putback(ln, elt)
+ break
+ return results
+
+ summed = [0]*len(events)
+
+ # read statistics
+ while 1:
+ lns = getLines()
+ ln = lns[0]
+ if ln is None:
+ break
+ elif ln.startswith('fn') or ln.startswith('fl'):
+ pass
+ elif ln.strip():
+ # an actual statistic
+ data = [map(int,ln.strip().split(' ')) for ln in lns][0]
+ summed = map(lambda a,b: a+b, data[2:], summed)
+
+ # read any associated calls
+ for cfl,cfn,calls,stat in readCalls().values():
+ pass
+
+ return events,summed
+
+def main(args):
+ from optparse import OptionParser
+ op = OptionParser("usage: %prog [options] file")
+ opts,args = op.parse_args()
+
+ total = {}
+ for i in args:
+ events,summed = getSummary(i)
+ for e,s in zip(events,summed):
+ total[e] = total.get(e,[0,0])
+ total[e][0] += s
+ total[e][1] += 1
+ print '-- %s --'%(i,)
+ items = zip(events,summed)
+ items.sort()
+ for e,s in items:
+ print '%s: %s'%(e,s)
+
+ print '-- totals --'
+ items = total.items()
+ table = []
+ for e,(s,N) in items:
+ table.append((str(e),str(s),str(N),str(s//N)))
+ w = map(lambda l: max(map(len,l)), zip(*table))
+ for (a,b,c,d) in table:
+ print '%-*s: %*s (in %*s files, avg: %*s)'%(w[0],a,w[1],b,w[2],c,w[3],d)
+
+if __name__=='__main__':
+ main(sys.argv)
diff --git a/scripts/PrintStats.py b/scripts/PrintStats.py
new file mode 100755
index 00000000..40994f87
--- /dev/null
+++ b/scripts/PrintStats.py
@@ -0,0 +1,231 @@
+#!/usr/bin/python
+
+from __future__ import division
+
+import sys
+import os
+
+def getFile(dir):
+ return os.path.join(dir,'run.stats')
+
+def getLastRecord(dir):
+ f = open(getFile(dir))
+ try:
+ f.seek(-1024, 2)
+ except IOError:
+ pass # at beginning?
+ for ln in f.read(1024).split('\n')[::-1]:
+ ln = ln.strip()
+ if ln.startswith('(') and ln.endswith(')'):
+ if '(' in ln[1:]:
+ print >>sys.stderr, 'WARNING: corrupted line in file, out of disk space?'
+ ln = ln[ln.index('(',1):]
+ return eval(ln)
+ raise IOError
+
+
+class LazyEvalList:
+ def __init__(self, lines):
+ self.lines = lines
+
+ def __getitem__(self, index):
+ item = self.lines[index]
+ if isinstance(item,str):
+ item = self.lines[index] = eval(item)
+ return item
+
+ def __len__(self):
+ return len(self.lines)
+
+
+def getMatchedRecord(data,reference,key):
+ refKey = key(reference)
+ lo = 1 # header
+ hi = len(data)-1
+ while lo<hi:
+ mid = (lo+hi)//2
+ if key(data[mid])<=refKey:
+ lo = mid + 1
+ else:
+ hi = mid
+ return data[lo]
+
+
+def stripCommonPathPrefix(table, col):
+ paths = map(os.path.normpath, [row[col] for row in table])
+ pathElts = [p.split('/') for p in paths]
+ zipped = zip(*pathElts)
+ idx = 0
+ for idx,elts in enumerate(zipped):
+ if len(set(elts))>1:
+ break
+ paths = ['/'.join(elts[idx:]) for elts in pathElts]
+ for i,row in enumerate(table):
+ table[i] = row[:col] + (paths[i],) + row[col+1:]
+
+
+def getKeyIndex(keyName,labels):
+ def normalizedKey(key):
+ if key.endswith("(#)") or key.endswith("(%)") or key.endswith("(s)"):
+ key = key[:-3]
+ return key.lower()
+
+ keyIndex = None
+ for i,title in enumerate(labels):
+ if normalizedKey(title)==normalizedKey(keyName):
+ keyIndex = i
+ break
+ else:
+ raise ValueError,'invalid keyName to sort by: %s'%`keyName`
+ return keyIndex
+
+
+def sortTable(table, labels, keyName, ascending=False):
+ indices = range(len(table))
+ keyIndex = getKeyIndex(keyName,labels)
+ indices.sort(key = lambda n: table[n][keyIndex])
+ if not ascending:
+ indices.reverse()
+ table[:] = [table[n] for n in indices]
+
+
+def printTable(table):
+ def strOrNone(ob):
+ if ob is None:
+ return ''
+ elif isinstance(ob,float):
+ return '%.2f'%ob
+ else:
+ return str(ob)
+ def printRow(row):
+ if row is None:
+ print header
+ else:
+ out.write('|')
+ for j,elt in enumerate(row):
+ if j:
+ out.write(' %*s |'%(widths[j],elt))
+ else:
+ out.write(' %-*s |'%(widths[j],elt))
+ out.write('\n')
+ maxLen = max([len(r) for r in table if r])
+ for i,row in enumerate(table):
+ if row:
+ table[i] = row + (None,)*(maxLen-len(row))
+ table = [row and map(strOrNone,row) or None for row in table]
+ tableLens = [map(len,row) for row in table if row]
+ from pprint import pprint
+ widths = map(max, zip(*tableLens))
+ out = sys.stdout
+ header = '-'*(sum(widths) + maxLen*3 + 1)
+ map(printRow, table)
+
+
+def main(args):
+ from optparse import OptionParser
+ op = OptionParser(usage="usage: %prog [options] directories*",
+ epilog=
+ "LEGEND "
+ "------ "
+ "Instrs: Number of executed instructions "
+ "Time: Total wall time (s) "
+ "ICov: Instruction coverage in the LLVM bitcode (%) "
+ "BCov: Branch coverage in the LLVM bitcode (%) "
+ "ICount: Total static instructions in the LLVM bitcode "
+ "Solver: Time spent in the constraint solver (%) "
+ "States: Number of currently active states "
+ "Mem: Megabytes of memory currently used "
+ "Queries: Number of queries issued to STP "
+ "AvgQC: Average number of query constructs per query "
+ "Tcex: Time spent in the counterexample caching code (%) "
+ "Tfork: Time spent forking (%) ")
+
+ op.add_option('', '--print-more', dest='printMore',
+ action='store_true', default=False,
+ help='Print extra information (needed when monitoring an ongoing run).')
+ op.add_option('', '--print-all', dest='printAll',
+ action='store_true', default=False,
+ help='Print all available information.')
+ op.add_option('','--sort-by', dest='sortBy',
+ help='key value to sort by, e.g. --sort-by=Instrs')
+ op.add_option('','--ascending', dest='ascending',
+ action='store_true', default=False,
+ help='sort in ascending order (default is descending)')
+ op.add_option('','--compare-by', dest='compBy',
+ help="key value on which to compare runs to the reference one (which is the first one). E.g., --compare-by=Instrs shows how each run compares to the reference run after executing the same number of instructions as the reference run. If a run hasn't executed as many instructions as the reference one, we simply print the statistics at the end of that run.")
+ opts,dirs = op.parse_args()
+
+ actualDirs = []
+ for dir in dirs:
+ if os.path.exists(os.path.join(dir,'info')):
+ actualDirs.append(dir)
+ else:
+ for root,dirs,_ in os.walk(dir):
+ for d in dirs:
+ p = os.path.join(root,d)
+ if os.path.exists(os.path.join(p,'info')):
+ actualDirs.append(p)
+ dirs = actualDirs
+
+ summary = []
+
+ if (opts.printAll):
+ labels = ('Path','Instrs','Time(s)','ICov(%)','BCov(%)','ICount','Solver(%)', 'States', 'Mem(MB)', 'Queries', 'AvgQC', 'Tcex(%)', 'Tfork(%)')
+ elif (opts.printMore):
+ labels = ('Path','Instrs','Time(s)','ICov(%)','BCov(%)','ICount','Solver(%)', 'States', 'Mem(MB)')
+ else:
+ labels = ('Path','Instrs','Time(s)','ICov(%)','BCov(%)','ICount','Solver(%)')
+
+
+ def addRecord(Path,rec):
+ (I,BFull,BPart,BTot,T,St,Mem,QTot,QCon,NObjs,Treal,SCov,SUnc,QT,Ts,Tcex,Tf) = rec
+ Mem=Mem/1024./1024.
+ AvgQC = int(QCon/max(1,QTot))
+ if (opts.printAll):
+ table.append((Path, I, Treal, 100.*SCov/(SCov+SUnc), 100.*(2*BFull+BPart)/(2.*BTot),
+ SCov+SUnc, 100.*Ts/Treal, St, Mem, QTot, AvgQC, 100.*Tcex/Treal, 100.*Tf/Treal))
+ elif (opts.printMore):
+ table.append((Path, I, Treal, 100.*SCov/(SCov+SUnc), 100.*(2*BFull+BPart)/(2.*BTot),
+ SCov+SUnc, 100.*Ts/Treal, St, Mem))
+ else:
+ table.append((Path, I, Treal, 100.*SCov/(SCov+SUnc), 100.*(2*BFull+BPart)/(2.*BTot),
+ SCov+SUnc, 100.*Ts/Treal))
+
+ def addRow(Path,data):
+ data = tuple(data[:17]) + (None,)*(17-len(data))
+ addRecord(Path,data)
+ if not summary:
+ summary[:] = list(data)
+ else:
+ summary[:] = [(a+b) for a,b in zip(summary,data)]
+
+ datas = [(dir,LazyEvalList(list(open(getFile(dir))))) for dir in dirs]
+ reference = datas[0][1][-1]
+
+ table = []
+
+ for dir,data in datas:
+ try:
+ if opts.compBy:
+ addRow(dir,getMatchedRecord(data,reference,lambda f: f[getKeyIndex(opts.compBy,labels)-1]))
+ else:
+ addRow(dir, data[len(data)-1]) #getLastRecord(dir))
+ except IOError:
+ print 'Unable to open: ',dir
+ continue
+
+ stripCommonPathPrefix(table, 0)
+ if opts.sortBy:
+ sortTable(table, labels, opts.sortBy, opts.ascending)
+ if not table:
+ sys.exit(1)
+ elif len(table)>1:
+ table.append(None)
+ addRecord('Total (%d)'%(len(table)-1,),summary)
+ table[0:0] = [None,labels,None]
+ table.append(None)
+ printTable(table)
+
+
+if __name__=='__main__':
+ main(sys.argv)
diff --git a/scripts/klee-control b/scripts/klee-control
new file mode 100755
index 00000000..50c1f82d
--- /dev/null
+++ b/scripts/klee-control
@@ -0,0 +1,89 @@
+#!/usr/bin/python
+
+import os, signal, popen2
+
+def getPID(dir):
+ f = open(os.path.join(dir,'info'))
+ for ln in f.readlines():
+ if ln.startswith('PID: '):
+ return int(ln[5:])
+ return None
+
+def execCmd(pid, gdbCmd, opts):
+ cmd = ("gdb " +
+ "--batch " +
+ "--pid=%d " +
+ "--eval-command=\"%s\" " +
+ "--eval-command=detach")%(pid,gdbCmd)
+ cout,cin = popen2.popen2(cmd)
+ cin.close()
+ return cout.read()
+
+def main():
+ from optparse import OptionParser
+ op = OptionParser("usage: %prog <PID | test directory>")
+ op.add_option('','--backtrace', dest='backtrace',
+ action='store_true', default=False)
+ op.add_option('-s','--stop-forking', dest='stopForking',
+ action='store_true', default=False)
+ op.add_option('-H','--halt-execution', dest='haltExecution',
+ action='store_true', default=False)
+ op.add_option('-d','--dump-states', dest='dumpStates',
+ action='store_true', default=False)
+ op.add_option('-t','--dump-tree', dest='dumpTree',
+ action='store_true', default=False)
+ op.add_option('-i','--int', dest='int',
+ action='store_true', default=False)
+ op.add_option('-k','--kill', dest='kill',
+ action='store_true', default=False)
+ op.add_option('','--print-pid', dest='printPid',
+ action='store_true', default=False)
+ op.add_option('','--print-ticks', dest='printTicks',
+ action='store_true', default=False)
+ opts,args = op.parse_args()
+
+ if len(args) != 1:
+ op.error("invalid arguments")
+
+ try:
+ pid = int(args[0])
+ except:
+ pid = None
+
+ if pid is None:
+ try:
+ pid = getPID(args[0])
+ except:
+ pid = None
+
+ if pid is None:
+ op.error("unable to determine PID (bad pid or test directory)")
+
+ if opts.printPid:
+ print pid
+ return
+ print 'pid: %d'%pid
+ if opts.backtrace:
+ execCmd(pid, 'bt', opts)
+ if opts.dumpStates:
+ execCmd(pid, "p dumpStates = 1", opts)
+ if opts.dumpTree:
+ execCmd(pid, "p dumpPTree = 1", opts)
+ if opts.stopForking:
+ execCmd(pid, 'p stop_forking()', opts)
+ if opts.haltExecution:
+ execCmd(pid, 'p halt_execution()', opts)
+ if opts.printTicks:
+ res = execCmd(pid, 'p timerTicks', opts)
+ lns = res.split('\n')
+ for ln in lns:
+ if ln.startswith('$1') and '=' in ln:
+ print ln.split('=',1)[1].strip()
+ if opts.int:
+ os.kill(pid, signal.SIGINT)
+ if opts.kill:
+ os.kill(pid, signal.SIGKILL)
+
+if __name__=='__main__':
+ main()
+
diff --git a/scripts/objdump b/scripts/objdump
new file mode 100755
index 00000000..45fe6384
--- /dev/null
+++ b/scripts/objdump
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+
+"""
+An objdump wrapper for use with klee & kcachegrind.
+
+This allows klee to output instruction level statistic information which
+kcachegrind can then display by intercepting kcachegrind's calls to objdump
+and returning the LLVM instructions in a format that it can parse.
+"""
+
+import os, sys
+
+kActualObjdump = os.path.join(os.path.dirname(__file__),'objdump.actual')
+if not os.path.exists(kActualObjdump):
+ for path in ['/usr/bin','/usr/local/bin']:
+ p = os.path.join(path,'objdump')
+ if os.path.exists(p):
+ kActualObjdump = p
+
+def fakeObjdumpOutput(file, start, end):
+ print 'Using fake objdump output:\n\n'
+
+ lines = open(file).readlines()
+ for i in range(max(0,start-1),min(end-1,len(lines))):
+ print '%x: _ %s'%(i+1,lines[i])
+
+def main(args):
+ # exact pattern match kcachegrind's calling sequence
+ if (len(args)>=6 and
+ args[1]=='-C' and
+ args[2]=='-d' and
+ args[3].startswith('--start-address=') and
+ args[4].startswith('--stop-address=') and
+ args[5].endswith('.ll')):
+ fakeObjdumpOutput(args[5], eval(args[3].split('=',1)[1]), eval(args[4].split('=',1)[1]))
+ else:
+ os.execv(kActualObjdump, args)
+ raise RuntimeError
+
+if __name__=='__main__':
+ main(sys.argv)