aboutsummaryrefslogtreecommitdiffhomepage
path: root/lib/Core
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Core')
-rw-r--r--lib/Core/AddressSpace.cpp334
-rw-r--r--lib/Core/AddressSpace.h131
-rw-r--r--lib/Core/CallPathManager.cpp103
-rw-r--r--lib/Core/CallPathManager.h83
-rw-r--r--lib/Core/Common.cpp110
-rw-r--r--lib/Core/Common.h56
-rw-r--r--lib/Core/CoreStats.cpp29
-rw-r--r--lib/Core/CoreStats.h53
-rw-r--r--lib/Core/ExecutionState.cpp417
-rw-r--r--lib/Core/Executor.cpp3260
-rw-r--r--lib/Core/Executor.h445
-rw-r--r--lib/Core/ExecutorTimers.cpp220
-rw-r--r--lib/Core/ExecutorUtil.cpp144
-rw-r--r--lib/Core/ExternalDispatcher.cpp230
-rw-r--r--lib/Core/ExternalDispatcher.h50
-rw-r--r--lib/Core/ImpliedValue.cpp274
-rw-r--r--lib/Core/ImpliedValue.h38
-rwxr-xr-xlib/Core/Makefile16
-rw-r--r--lib/Core/Memory.cpp812
-rw-r--r--lib/Core/Memory.h239
-rw-r--r--lib/Core/MemoryManager.cpp69
-rw-r--r--lib/Core/MemoryManager.h41
-rw-r--r--lib/Core/ObjectHolder.h33
-rw-r--r--lib/Core/PTree.cpp103
-rw-r--r--lib/Core/PTree.h53
-rw-r--r--lib/Core/Searcher.cpp575
-rw-r--r--lib/Core/Searcher.h279
-rw-r--r--lib/Core/SeedInfo.cpp151
-rw-r--r--lib/Core/SeedInfo.h48
-rw-r--r--lib/Core/SpecialFunctionHandler.cpp727
-rw-r--r--lib/Core/SpecialFunctionHandler.h106
-rw-r--r--lib/Core/StatsTracker.cpp814
-rw-r--r--lib/Core/StatsTracker.h93
-rw-r--r--lib/Core/TimingSolver.cpp147
-rw-r--r--lib/Core/TimingSolver.h70
-rw-r--r--lib/Core/UserSearcher.cpp175
-rw-r--r--lib/Core/UserSearcher.h25
37 files changed, 10553 insertions, 0 deletions
diff --git a/lib/Core/AddressSpace.cpp b/lib/Core/AddressSpace.cpp
new file mode 100644
index 00000000..fb032fd5
--- /dev/null
+++ b/lib/Core/AddressSpace.cpp
@@ -0,0 +1,334 @@
+//===-- AddressSpace.cpp --------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddressSpace.h"
+#include "CoreStats.h"
+#include "Memory.h"
+#include "TimingSolver.h"
+
+#include "klee/Expr.h"
+#include "klee/TimerStatIncrementer.h"
+
+using namespace klee;
+
+///
+
+void AddressSpace::bindObject(const MemoryObject *mo, ObjectState *os) {
+ assert(os->copyOnWriteOwner==0 && "object already has owner");
+ os->copyOnWriteOwner = cowKey;
+ objects = objects.replace(std::make_pair(mo, os));
+}
+
+void AddressSpace::unbindObject(const MemoryObject *mo) {
+ objects = objects.remove(mo);
+}
+
+const ObjectState *AddressSpace::findObject(const MemoryObject *mo) const {
+ const MemoryMap::value_type *res = objects.lookup(mo);
+
+ return res ? res->second : 0;
+}
+
+ObjectState *AddressSpace::getWriteable(const MemoryObject *mo,
+ const ObjectState *os) {
+ assert(!os->readOnly);
+
+ if (cowKey==os->copyOnWriteOwner) {
+ return const_cast<ObjectState*>(os);
+ } else {
+ ObjectState *n = new ObjectState(*os);
+ n->copyOnWriteOwner = cowKey;
+ objects = objects.replace(std::make_pair(mo, n));
+ return n;
+ }
+}
+
+///
+
+bool AddressSpace::resolveOne(uint64_t addr64, ObjectPair &result) {
+ unsigned address = (unsigned) addr64;
+ MemoryObject hack(address);
+
+ if (const MemoryMap::value_type *res = objects.lookup_previous(&hack)) {
+ const MemoryObject *mo = res->first;
+ if ((mo->size==0 && address==mo->address) ||
+ (address - mo->address < mo->size)) {
+ result = *res;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool AddressSpace::resolveOne(ExecutionState &state,
+ TimingSolver *solver,
+ ref<Expr> address,
+ ObjectPair &result,
+ bool &success) {
+ if (address.isConstant()) {
+ success = resolveOne(address.getConstantValue(), result);
+ return true;
+ } else {
+ TimerStatIncrementer timer(stats::resolveTime);
+
+ // try cheap search, will succeed for any inbounds pointer
+
+ ref<Expr> cex(0);
+ if (!solver->getValue(state, address, cex))
+ return false;
+ unsigned example = (unsigned) cex.getConstantValue();
+ MemoryObject hack(example);
+ const MemoryMap::value_type *res = objects.lookup_previous(&hack);
+
+ if (res) {
+ const MemoryObject *mo = res->first;
+ if (example - mo->address < mo->size) {
+ result = *res;
+ success = true;
+ return true;
+ }
+ }
+
+ // didn't work, now we have to search
+
+ MemoryMap::iterator oi = objects.upper_bound(&hack);
+ MemoryMap::iterator begin = objects.begin();
+ MemoryMap::iterator end = objects.end();
+
+ MemoryMap::iterator start = oi;
+ while (oi!=begin) {
+ --oi;
+ const MemoryObject *mo = oi->first;
+
+ bool mayBeTrue;
+ if (!solver->mayBeTrue(state,
+ mo->getBoundsCheckPointer(address), mayBeTrue))
+ return false;
+ if (mayBeTrue) {
+ result = *oi;
+ success = true;
+ return true;
+ } else {
+ bool mustBeTrue;
+ if (!solver->mustBeTrue(state,
+ UgeExpr::create(address, mo->getBaseExpr()),
+ mustBeTrue))
+ return false;
+ if (mustBeTrue)
+ break;
+ }
+ }
+
+ // search forwards
+ for (oi=start; oi!=end; ++oi) {
+ const MemoryObject *mo = oi->first;
+
+ bool mustBeTrue;
+ if (!solver->mustBeTrue(state,
+ UltExpr::create(address, mo->getBaseExpr()),
+ mustBeTrue))
+ return false;
+ if (mustBeTrue) {
+ break;
+ } else {
+ bool mayBeTrue;
+
+ if (!solver->mayBeTrue(state,
+ mo->getBoundsCheckPointer(address),
+ mayBeTrue))
+ return false;
+ if (mayBeTrue) {
+ result = *oi;
+ success = true;
+ return true;
+ }
+ }
+ }
+
+ success = false;
+ return true;
+ }
+}
+
+bool AddressSpace::resolve(ExecutionState &state,
+ TimingSolver *solver,
+ ref<Expr> p,
+ ResolutionList &rl,
+ unsigned maxResolutions,
+ double timeout) {
+ if (p.isConstant()) {
+ ObjectPair res;
+ if (resolveOne(p.getConstantValue(), res))
+ rl.push_back(res);
+ return false;
+ } else {
+ TimerStatIncrementer timer(stats::resolveTime);
+ uint64_t timeout_us = (uint64_t) (timeout*1000000.);
+
+ // XXX in general this isn't exactly what we want... for
+ // a multiple resolution case (or for example, a \in {b,c,0})
+ // we want to find the first object, find a cex assuming
+ // not the first, find a cex assuming not the second...
+ // etc.
+
+ // XXX how do we smartly amortize the cost of checking to
+ // see if we need to keep searching up/down, in bad cases?
+ // maybe we don't care?
+
+ // XXX we really just need a smart place to start (although
+ // if its a known solution then the code below is guaranteed
+ // to hit the fast path with exactly 2 queries). we could also
+ // just get this by inspection of the expr.
+
+ ref<Expr> cex(0);
+ if (!solver->getValue(state, p, cex))
+ return true;
+ unsigned example = (unsigned) cex.getConstantValue();
+ MemoryObject hack(example);
+
+ MemoryMap::iterator oi = objects.upper_bound(&hack);
+ MemoryMap::iterator begin = objects.begin();
+ MemoryMap::iterator end = objects.end();
+
+ MemoryMap::iterator start = oi;
+
+ // XXX in the common case we can save one query if we ask
+ // mustBeTrue before mayBeTrue for the first result. easy
+ // to add I just want to have a nice symbolic test case first.
+
+ // search backwards, start with one minus because this
+ // is the object that p *should* be within, which means we
+ // get write off the end with 4 queries (XXX can be better,
+ // no?)
+ while (oi!=begin) {
+ --oi;
+ const MemoryObject *mo = oi->first;
+ if (timeout_us && timeout_us < timer.check())
+ return true;
+
+ // XXX I think there is some query wasteage here?
+ ref<Expr> inBounds = mo->getBoundsCheckPointer(p);
+ bool mayBeTrue;
+ if (!solver->mayBeTrue(state, inBounds, mayBeTrue))
+ return true;
+ if (mayBeTrue) {
+ rl.push_back(*oi);
+
+ // fast path check
+ unsigned size = rl.size();
+ if (size==1) {
+ bool mustBeTrue;
+ if (!solver->mustBeTrue(state, inBounds, mustBeTrue))
+ return true;
+ if (mustBeTrue)
+ return false;
+ } else if (size==maxResolutions) {
+ return true;
+ }
+ }
+
+ bool mustBeTrue;
+ if (!solver->mustBeTrue(state,
+ UgeExpr::create(p, mo->getBaseExpr()),
+ mustBeTrue))
+ return true;
+ if (mustBeTrue)
+ break;
+ }
+ // search forwards
+ for (oi=start; oi!=end; ++oi) {
+ const MemoryObject *mo = oi->first;
+ if (timeout_us && timeout_us < timer.check())
+ return true;
+
+ bool mustBeTrue;
+ if (!solver->mustBeTrue(state,
+ UltExpr::create(p, mo->getBaseExpr()),
+ mustBeTrue))
+ return true;
+ if (mustBeTrue)
+ break;
+
+ // XXX I think there is some query wasteage here?
+ ref<Expr> inBounds = mo->getBoundsCheckPointer(p);
+ bool mayBeTrue;
+ if (!solver->mayBeTrue(state, inBounds, mayBeTrue))
+ return true;
+ if (mayBeTrue) {
+ rl.push_back(*oi);
+
+ // fast path check
+ unsigned size = rl.size();
+ if (size==1) {
+ bool mustBeTrue;
+ if (!solver->mustBeTrue(state, inBounds, mustBeTrue))
+ return true;
+ if (mustBeTrue)
+ return false;
+ } else if (size==maxResolutions) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+// These two are pretty big hack so we can sort of pass memory back
+// and forth to externals. They work by abusing the concrete cache
+// store inside of the object states, which allows them to
+// transparently avoid screwing up symbolics (if the byte is symbolic
+// then its concrete cache byte isn't being used) but is just a hack.
+
+void AddressSpace::copyOutConcretes() {
+ for (MemoryMap::iterator it = objects.begin(), ie = objects.end();
+ it != ie; ++it) {
+ const MemoryObject *mo = it->first;
+
+ if (!mo->isUserSpecified) {
+ ObjectState *os = it->second;
+ uint8_t *address = (uint8_t*) (unsigned long) mo->address;
+
+ if (!os->readOnly)
+ memcpy(address, os->concreteStore, mo->size);
+ }
+ }
+}
+
+bool AddressSpace::copyInConcretes() {
+ for (MemoryMap::iterator it = objects.begin(), ie = objects.end();
+ it != ie; ++it) {
+ const MemoryObject *mo = it->first;
+
+ if (!mo->isUserSpecified) {
+ const ObjectState *os = it->second;
+ uint8_t *address = (uint8_t*) (unsigned long) mo->address;
+
+ if (memcmp(address, os->concreteStore, mo->size)!=0) {
+ if (os->readOnly) {
+ return false;
+ } else {
+ ObjectState *wos = getWriteable(mo, os);
+ memcpy(wos->concreteStore, address, mo->size);
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/***/
+
+bool MemoryObjectLT::operator()(const MemoryObject *a, const MemoryObject *b) const {
+ return a->address < b->address;
+}
+
diff --git a/lib/Core/AddressSpace.h b/lib/Core/AddressSpace.h
new file mode 100644
index 00000000..a281714c
--- /dev/null
+++ b/lib/Core/AddressSpace.h
@@ -0,0 +1,131 @@
+//===-- AddressSpace.h ------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_ADDRESSSPACE_H
+#define KLEE_ADDRESSSPACE_H
+
+#include "ObjectHolder.h"
+
+#include "klee/Expr.h"
+#include "klee/Internal/ADT/ImmutableMap.h"
+
+namespace klee {
+ class ExecutionState;
+ class MemoryObject;
+ class ObjectState;
+ class TimingSolver;
+
+ template<class T> class ref;
+
+ typedef std::pair<const MemoryObject*, const ObjectState*> ObjectPair;
+ typedef std::vector<ObjectPair> ResolutionList;
+
+ /// Function object ordering MemoryObject's by address.
+ struct MemoryObjectLT {
+ bool operator()(const MemoryObject *a, const MemoryObject *b) const;
+ };
+
+ typedef ImmutableMap<const MemoryObject*, ObjectHolder, MemoryObjectLT> MemoryMap;
+
+ class AddressSpace {
+ private:
+ /// Epoch counter used to control ownership of objects.
+ mutable unsigned cowKey;
+
+ /// Unsupported, use copy constructor
+ AddressSpace &operator=(const AddressSpace&);
+
+ public:
+ /// The MemoryObject -> ObjectState map that constitutes the
+ /// address space.
+ ///
+ /// The set of objects where o->copyOnWriteOwner == cowKey are the
+ /// objects that we own.
+ ///
+ /// \invariant forall o in objects, o->copyOnWriteOwner <= cowKey
+ MemoryMap objects;
+
+ public:
+ AddressSpace() : cowKey(1) {}
+ AddressSpace(const AddressSpace &b) : cowKey(++b.cowKey), objects(b.objects) { }
+ ~AddressSpace() {}
+
+ /// Resolve address to an ObjectPair in result.
+ /// \return true iff an object was found.
+ bool resolveOne(uint64_t address,
+ ObjectPair &result);
+
+ /// Resolve address to an ObjectPair in result.
+ ///
+ /// \param state The state this address space is part of.
+ /// \param solver A solver used to determine possible
+ /// locations of the \a address.
+ /// \param address The address to search for.
+ /// \param[out] result An ObjectPair this address can resolve to
+ /// (when returning true).
+ /// \return true iff an object was found at \a address.
+ bool resolveOne(ExecutionState &state,
+ TimingSolver *solver,
+ ref<Expr> address,
+ ObjectPair &result,
+ bool &success);
+
+ /// Resolve address to a list of ObjectPairs it can point to. If
+ /// maxResolutions is non-zero then no more than that many pairs
+ /// will be returned.
+ ///
+ /// \return true iff the resolution is incomplete (maxResolutions
+ /// is non-zero and the search terminated early, or a query timed out).
+ bool resolve(ExecutionState &state,
+ TimingSolver *solver,
+ ref<Expr> address,
+ ResolutionList &rl,
+ unsigned maxResolutions=0,
+ double timeout=0.);
+
+ /***/
+
+ /// Add a binding to the address space.
+ void bindObject(const MemoryObject *mo, ObjectState *os);
+
+ /// Remove a binding from the address space.
+ void unbindObject(const MemoryObject *mo);
+
+ /// Lookup a binding from a MemoryObject.
+ const ObjectState *findObject(const MemoryObject *mo) const;
+
+ /// \brief Obtain an ObjectState suitable for writing.
+ ///
+ /// This returns a writeable object state, creating a new copy of
+ /// the given ObjectState if necessary. If the address space owns
+ /// the ObjectState then this routine effectively just strips the
+ /// const qualifier it.
+ ///
+ /// \param mo The MemoryObject to get a writeable ObjectState for.
+ /// \param os The current binding of the MemoryObject.
+ /// \return A writeable ObjectState (\a os or a copy).
+ ObjectState *getWriteable(const MemoryObject *mo, const ObjectState *os);
+
+ /// Copy the concrete values of all managed ObjectStates into the
+ /// actual system memory location they were allocated at.
+ void copyOutConcretes();
+
+ /// Copy the concrete values of all managed ObjectStates back from
+ /// the actual system memory location they were allocated
+ /// at. ObjectStates will only be written to (and thus,
+ /// potentially copied) if the memory values are different from
+ /// the current concrete values.
+ ///
+ /// \retval true The copy succeeded.
+ /// \retval false The copy failed because a read-only object was modified.
+ bool copyInConcretes();
+ };
+} // End klee namespace
+
+#endif
diff --git a/lib/Core/CallPathManager.cpp b/lib/Core/CallPathManager.cpp
new file mode 100644
index 00000000..d0a61b31
--- /dev/null
+++ b/lib/Core/CallPathManager.cpp
@@ -0,0 +1,103 @@
+//===-- CallPathManager.cpp -----------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CallPathManager.h"
+
+#include "klee/Statistics.h"
+
+#include <map>
+#include <vector>
+#include "llvm/Function.h"
+#include "llvm/Support/Streams.h"
+
+using namespace llvm;
+using namespace klee;
+
+///
+
+CallPathNode::CallPathNode(CallPathNode *_parent,
+ Instruction *_callSite,
+ Function *_function)
+ : parent(_parent),
+ callSite(_callSite),
+ function(_function),
+ count(0) {
+}
+
+void CallPathNode::print() {
+ llvm::cerr << " (Function: " << this->function->getName() << ", "
+ << "Callsite: " << callSite << ", "
+ << "Count: " << this->count << ")";
+ if (parent && parent->callSite) {
+ llvm::cerr << ";\n";
+ parent->print();
+ }
+ else llvm::cerr << "\n";
+}
+
+///
+
+CallPathManager::CallPathManager() : root(0, 0, 0) {
+}
+
+CallPathManager::~CallPathManager() {
+ for (std::vector<CallPathNode*>::iterator it = paths.begin(),
+ ie = paths.end(); it != ie; ++it)
+ delete *it;
+}
+
+void CallPathManager::getSummaryStatistics(CallSiteSummaryTable &results) {
+ results.clear();
+
+ for (std::vector<CallPathNode*>::iterator it = paths.begin(),
+ ie = paths.end(); it != ie; ++it)
+ (*it)->summaryStatistics = (*it)->statistics;
+
+ // compute summary bottom up, while building result table
+ for (std::vector<CallPathNode*>::reverse_iterator it = paths.rbegin(),
+ ie = paths.rend(); it != ie; ++it) {
+ CallPathNode *cp = *it;
+ cp->parent->summaryStatistics += cp->summaryStatistics;
+
+ CallSiteInfo &csi = results[cp->callSite][cp->function];
+ csi.count += cp->count;
+ csi.statistics += cp->summaryStatistics;
+ }
+}
+
+
+CallPathNode *CallPathManager::computeCallPath(CallPathNode *parent,
+ Instruction *cs,
+ Function *f) {
+ for (CallPathNode *p=parent; p; p=p->parent)
+ if (cs==p->callSite && f==p->function)
+ return p;
+
+ CallPathNode *cp = new CallPathNode(parent, cs, f);
+ paths.push_back(cp);
+ return cp;
+}
+
+CallPathNode *CallPathManager::getCallPath(CallPathNode *parent,
+ Instruction *cs,
+ Function *f) {
+ std::pair<Instruction*,Function*> key(cs, f);
+ if (!parent)
+ parent = &root;
+
+ CallPathNode::children_ty::iterator it = parent->children.find(key);
+ if (it==parent->children.end()) {
+ CallPathNode *cp = computeCallPath(parent, cs, f);
+ parent->children.insert(std::make_pair(key, cp));
+ return cp;
+ } else {
+ return it->second;
+ }
+}
+
diff --git a/lib/Core/CallPathManager.h b/lib/Core/CallPathManager.h
new file mode 100644
index 00000000..2e16d72b
--- /dev/null
+++ b/lib/Core/CallPathManager.h
@@ -0,0 +1,83 @@
+//===-- CallPathManager.h ---------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __UTIL_CALLPATHMANAGER_H__
+#define __UTIL_CALLPATHMANAGER_H__
+
+#include "klee/Statistics.h"
+
+#include <map>
+#include <vector>
+
+namespace llvm {
+ class Instruction;
+ class Function;
+}
+
+namespace klee {
+ class StatisticRecord;
+
+ struct CallSiteInfo {
+ unsigned count;
+ StatisticRecord statistics;
+
+ public:
+ CallSiteInfo() : count(0) {}
+ };
+
+ typedef std::map<llvm::Instruction*,
+ std::map<llvm::Function*, CallSiteInfo> > CallSiteSummaryTable;
+
+ class CallPathNode {
+ friend class CallPathManager;
+
+ public:
+ typedef std::map<std::pair<llvm::Instruction*,
+ llvm::Function*>, CallPathNode*> children_ty;
+
+ // form list of (callSite,function) path
+ CallPathNode *parent;
+ llvm::Instruction *callSite;
+ llvm::Function *function;
+ children_ty children;
+
+ StatisticRecord statistics;
+ StatisticRecord summaryStatistics;
+ unsigned count;
+
+ public:
+ CallPathNode(CallPathNode *parent,
+ llvm::Instruction *callSite,
+ llvm::Function *function);
+
+ void print();
+ };
+
+ class CallPathManager {
+ CallPathNode root;
+ std::vector<CallPathNode*> paths;
+
+ private:
+ CallPathNode *computeCallPath(CallPathNode *parent,
+ llvm::Instruction *callSite,
+ llvm::Function *f);
+
+ public:
+ CallPathManager();
+ ~CallPathManager();
+
+ void getSummaryStatistics(CallSiteSummaryTable &result);
+
+ CallPathNode *getCallPath(CallPathNode *parent,
+ llvm::Instruction *callSite,
+ llvm::Function *f);
+ };
+}
+
+#endif
diff --git a/lib/Core/Common.cpp b/lib/Core/Common.cpp
new file mode 100644
index 00000000..479c4465
--- /dev/null
+++ b/lib/Core/Common.cpp
@@ -0,0 +1,110 @@
+//===-- Common.cpp --------------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+
+#include <set>
+
+using namespace klee;
+
+FILE* klee::klee_warning_file = NULL;
+FILE* klee::klee_message_file = NULL;
+
+
+/* Prints a message/warning.
+
+ If pfx is NULL, this is a regular message, and it's sent to
+ klee_message_file (messages.txt). Otherwise, it is sent to
+ klee_warning_file (warnings.txt).
+
+ Iff onlyToFile is false, the message is also printed on stderr.
+*/
+static void klee_vmessage(const char *pfx, bool onlyToFile, const char *msg, va_list ap) {
+ FILE *f = stderr;
+ if (!onlyToFile) {
+ fprintf(f, "KLEE: ");
+ if (pfx) fprintf(f, "%s: ", pfx);
+ vfprintf(f, msg, ap);
+ fprintf(f, "\n");
+ fflush(f);
+ }
+
+ if (pfx == NULL)
+ f = klee_message_file;
+ else f = klee_warning_file;
+
+ if (f) {
+ fprintf(f, "KLEE: ");
+ if (pfx) fprintf(f, "%s: ", pfx);
+ vfprintf(f, msg, ap);
+ fprintf(f, "\n");
+ fflush(f);
+ }
+}
+
+
+void klee::klee_message(const char *msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ klee_vmessage(NULL, false, msg, ap);
+ va_end(ap);
+}
+
+/* Message to be written only to file */
+void klee::klee_message_to_file(const char *msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ klee_vmessage(NULL, true, msg, ap);
+ va_end(ap);
+}
+
+void klee::klee_error(const char *msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ klee_vmessage("ERROR", false, msg, ap);
+ va_end(ap);
+ exit(1);
+}
+
+void klee::klee_warning(const char *msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ klee_vmessage("WARNING", false, msg, ap);
+ va_end(ap);
+}
+
+
+/* Prints a warning once per message. */
+void klee::klee_warning_once(const void *id, const char *msg, ...) {
+ static std::set< std::pair<const void*, const char*> > keys;
+ std::pair<const void*, const char*> key;
+
+
+ /* "calling external" messages contain the actual arguments with
+ which we called the external function, so we need to ignore them
+ when computing the key. */
+ if (strncmp(msg, "calling external", strlen("calling external")) != 0)
+ key = std::make_pair(id, msg);
+ else key = std::make_pair(id, "calling external");
+
+ if (!keys.count(key)) {
+ keys.insert(key);
+
+ va_list ap;
+ va_start(ap, msg);
+ klee_vmessage("WARNING", false, msg, ap);
+ va_end(ap);
+ }
+}
diff --git a/lib/Core/Common.h b/lib/Core/Common.h
new file mode 100644
index 00000000..ce05b536
--- /dev/null
+++ b/lib/Core/Common.h
@@ -0,0 +1,56 @@
+//===-- Common.h ------------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __KLEE_COMMON_H__
+#define __KLEE_COMMON_H__
+
+#ifdef __CYGWIN__
+#ifndef WINDOWS
+#define WINDOWS
+#endif
+#endif
+
+#include <stdio.h>
+
+// XXX ugh
+namespace klee {
+ class Solver;
+
+ extern FILE* klee_warning_file;
+ extern FILE* klee_message_file;
+
+ /// Print "KLEE: ERROR" followed by the msg in printf format and a
+ /// newline on stderr and to warnings.txt, then exit with an error.
+ void klee_error(const char *msg, ...)
+ __attribute__ ((format (printf, 1, 2), noreturn));
+
+ /// Print "KLEE: " followed by the msg in printf format and a
+ /// newline on stderr and to messages.txt.
+ void klee_message(const char *msg, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+ /// Print "KLEE: " followed by the msg in printf format and a
+ /// newline to messages.txt.
+ void klee_message_to_file(const char *msg, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+ /// Print "KLEE: WARNING" followed by the msg in printf format and a
+ /// newline on stderr and to warnings.txt.
+ void klee_warning(const char *msg, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+ /// Print "KLEE: WARNING" followed by the msg in printf format and a
+ /// newline on stderr and to warnings.txt. However, the warning is only
+ /// printed once for each unique (id, msg) pair (as pointers).
+ void klee_warning_once(const void *id,
+ const char *msg, ...)
+ __attribute__ ((format (printf, 2, 3)));
+}
+
+#endif /* __KLEE_COMMON_H__ */
diff --git a/lib/Core/CoreStats.cpp b/lib/Core/CoreStats.cpp
new file mode 100644
index 00000000..ca2ef1c9
--- /dev/null
+++ b/lib/Core/CoreStats.cpp
@@ -0,0 +1,29 @@
+//===-- CoreStats.cpp -----------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CoreStats.h"
+
+using namespace klee;
+
+Statistic stats::allocations("Allocations", "Alloc");
+Statistic stats::coveredInstructions("CoveredInstructions", "Icov");
+Statistic stats::falseBranches("FalseBranches", "Bf");
+Statistic stats::forkTime("ForkTime", "Ftime");
+Statistic stats::forks("Forks", "Forks");
+Statistic stats::instructionRealTime("InstructionRealTimes", "Ireal");
+Statistic stats::instructionTime("InstructionTimes", "Itime");
+Statistic stats::instructions("Instructions", "I");
+Statistic stats::minDistToReturn("MinDistToReturn", "Rdist");
+Statistic stats::minDistToUncovered("MinDistToUncovered", "UCdist");
+Statistic stats::reachableUncovered("ReachableUncovered", "IuncovReach");
+Statistic stats::resolveTime("ResolveTime", "Rtime");
+Statistic stats::solverTime("SolverTime", "Stime");
+Statistic stats::states("States", "States");
+Statistic stats::trueBranches("TrueBranches", "Bt");
+Statistic stats::uncoveredInstructions("UncoveredInstructions", "Iuncov");
diff --git a/lib/Core/CoreStats.h b/lib/Core/CoreStats.h
new file mode 100644
index 00000000..09845a89
--- /dev/null
+++ b/lib/Core/CoreStats.h
@@ -0,0 +1,53 @@
+//===-- CoreStats.h ---------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_CORESTATS_H
+#define KLEE_CORESTATS_H
+
+#include "klee/Statistic.h"
+
+namespace klee {
+namespace stats {
+
+ extern Statistic allocations;
+ extern Statistic resolveTime;
+ extern Statistic instructions;
+ extern Statistic instructionTime;
+ extern Statistic instructionRealTime;
+ extern Statistic coveredInstructions;
+ extern Statistic uncoveredInstructions;
+ extern Statistic trueBranches;
+ extern Statistic falseBranches;
+ extern Statistic forkTime;
+ extern Statistic solverTime;
+
+ /// The number of process forks.
+ extern Statistic forks;
+
+ /// Number of states, this is a "fake" statistic used by istats, it
+ /// isn't normally up-to-date.
+ extern Statistic states;
+
+ /// Instruction level statistic for tracking number of reachable
+ /// uncovered instructions.
+ extern Statistic reachableUncovered;
+
+ /// Instruction level statistic tracking the minimum intraprocedural
+ /// distance to an uncovered instruction; this is only periodically
+ /// updated.
+ extern Statistic minDistToUncovered;
+
+ /// Instruction level statistic tracking the minimum intraprocedural
+ /// distance to a function return.
+ extern Statistic minDistToReturn;
+
+}
+}
+
+#endif
diff --git a/lib/Core/ExecutionState.cpp b/lib/Core/ExecutionState.cpp
new file mode 100644
index 00000000..dd6d4647
--- /dev/null
+++ b/lib/Core/ExecutionState.cpp
@@ -0,0 +1,417 @@
+//===-- ExecutionState.cpp ------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "klee/ExecutionState.h"
+
+#include "klee/Internal/Module/Cell.h"
+#include "klee/Internal/Module/InstructionInfoTable.h"
+#include "klee/Internal/Module/KInstruction.h"
+#include "klee/Internal/Module/KModule.h"
+
+#include "klee/Expr.h"
+
+#include "Memory.h"
+
+#include "llvm/Function.h"
+#include "llvm/Support/CommandLine.h"
+
+#include <iostream>
+#include <cassert>
+#include <map>
+#include <set>
+#include <stdarg.h>
+
+using namespace llvm;
+using namespace klee;
+
+namespace {
+ cl::opt<bool>
+ DebugLogStateMerge("debug-log-state-merge");
+}
+
+/***/
+
+StackFrame::StackFrame(KInstIterator _caller, KFunction *_kf)
+ : caller(_caller), kf(_kf), callPathNode(0),
+ minDistToUncoveredOnReturn(0), varargs(0) {
+ locals = new Cell[kf->numRegisters];
+}
+
+StackFrame::StackFrame(const StackFrame &s)
+ : caller(s.caller),
+ kf(s.kf),
+ callPathNode(s.callPathNode),
+ allocas(s.allocas),
+ minDistToUncoveredOnReturn(s.minDistToUncoveredOnReturn),
+ varargs(s.varargs) {
+ locals = new Cell[s.kf->numRegisters];
+ for (unsigned i=0; i<s.kf->numRegisters; i++)
+ locals[i] = s.locals[i];
+}
+
+StackFrame::~StackFrame() {
+ delete[] locals;
+}
+
+/***/
+
+ExecutionState::ExecutionState(KFunction *kf)
+ : fakeState(false),
+ underConstrained(false),
+ depth(0),
+ pc(kf->instructions),
+ prevPC(pc),
+ queryCost(0.),
+ weight(1),
+ instsSinceCovNew(0),
+ coveredNew(false),
+ forkDisabled(false),
+ ptreeNode(0) {
+ pushFrame(0, kf);
+}
+
+ExecutionState::ExecutionState(const std::vector<ref<Expr> > &assumptions)
+ : fakeState(true),
+ underConstrained(false),
+ constraints(assumptions),
+ queryCost(0.),
+ ptreeNode(0) {
+}
+
+ExecutionState::~ExecutionState() {
+ while (!stack.empty()) popFrame();
+}
+
+ExecutionState *ExecutionState::branch() {
+ depth++;
+
+ ExecutionState *falseState = new ExecutionState(*this);
+ falseState->coveredNew = false;
+ falseState->coveredLines.clear();
+
+ weight *= .5;
+ falseState->weight -= weight;
+
+ return falseState;
+}
+
+void ExecutionState::pushFrame(KInstIterator caller, KFunction *kf) {
+ stack.push_back(StackFrame(caller,kf));
+}
+
+void ExecutionState::popFrame() {
+ StackFrame &sf = stack.back();
+ for (std::vector<const MemoryObject*>::iterator it = sf.allocas.begin(),
+ ie = sf.allocas.end(); it != ie; ++it)
+ addressSpace.unbindObject(*it);
+ stack.pop_back();
+}
+
+///
+
+std::string ExecutionState::getFnAlias(std::string fn) {
+ std::map < std::string, std::string >::iterator it = fnAliases.find(fn);
+ if (it != fnAliases.end())
+ return it->second;
+ else return "";
+}
+
+void ExecutionState::addFnAlias(std::string old_fn, std::string new_fn) {
+ fnAliases[old_fn] = new_fn;
+}
+
+void ExecutionState::removeFnAlias(std::string fn) {
+ fnAliases.erase(fn);
+}
+
+/**/
+
+std::ostream &klee::operator<<(std::ostream &os, const MemoryMap &mm) {
+ os << "{";
+ MemoryMap::iterator it = mm.begin();
+ MemoryMap::iterator ie = mm.end();
+ if (it!=ie) {
+ os << "MO" << it->first->id << ":" << it->second;
+ for (++it; it!=ie; ++it)
+ os << ", MO" << it->first->id << ":" << it->second;
+ }
+ os << "}";
+ return os;
+}
+
+bool ExecutionState::merge(const ExecutionState &b) {
+ if (DebugLogStateMerge)
+ llvm::cerr << "-- attempting merge of A:"
+ << this << " with B:" << &b << "--\n";
+ if (pc != b.pc)
+ return false;
+
+ // XXX is it even possible for these to differ? does it matter? probably
+ // implies difference in object states?
+ if (symbolics!=b.symbolics)
+ return false;
+
+ {
+ std::vector<StackFrame>::const_iterator itA = stack.begin();
+ std::vector<StackFrame>::const_iterator itB = b.stack.begin();
+ while (itA!=stack.end() && itB!=b.stack.end()) {
+ // XXX vaargs?
+ if (itA->caller!=itB->caller || itA->kf!=itB->kf)
+ return false;
+ ++itA;
+ ++itB;
+ }
+ if (itA!=stack.end() || itB!=b.stack.end())
+ return false;
+ }
+
+ std::set< ref<Expr> > aConstraints(constraints.begin(), constraints.end());
+ std::set< ref<Expr> > bConstraints(b.constraints.begin(),
+ b.constraints.end());
+ std::set< ref<Expr> > commonConstraints, aSuffix, bSuffix;
+ std::set_intersection(aConstraints.begin(), aConstraints.end(),
+ bConstraints.begin(), bConstraints.end(),
+ std::inserter(commonConstraints, commonConstraints.begin()));
+ std::set_difference(aConstraints.begin(), aConstraints.end(),
+ commonConstraints.begin(), commonConstraints.end(),
+ std::inserter(aSuffix, aSuffix.end()));
+ std::set_difference(bConstraints.begin(), bConstraints.end(),
+ commonConstraints.begin(), commonConstraints.end(),
+ std::inserter(bSuffix, bSuffix.end()));
+ if (DebugLogStateMerge) {
+ llvm::cerr << "\tconstraint prefix: [";
+ for (std::set< ref<Expr> >::iterator it = commonConstraints.begin(),
+ ie = commonConstraints.end(); it != ie; ++it)
+ llvm::cerr << *it << ", ";
+ llvm::cerr << "]\n";
+ llvm::cerr << "\tA suffix: [";
+ for (std::set< ref<Expr> >::iterator it = aSuffix.begin(),
+ ie = aSuffix.end(); it != ie; ++it)
+ llvm::cerr << *it << ", ";
+ llvm::cerr << "]\n";
+ llvm::cerr << "\tB suffix: [";
+ for (std::set< ref<Expr> >::iterator it = bSuffix.begin(),
+ ie = bSuffix.end(); it != ie; ++it)
+ llvm::cerr << *it << ", ";
+ llvm::cerr << "]\n";
+ }
+
+ // We cannot merge if addresses would resolve differently in the
+ // states. This means:
+ //
+ // 1. Any objects created since the branch in either object must
+ // have been free'd.
+ //
+ // 2. We cannot have free'd any pre-existing object in one state
+ // and not the other
+
+ if (DebugLogStateMerge) {
+ llvm::cerr << "\tchecking object states\n";
+ llvm::cerr << "A: " << addressSpace.objects << "\n";
+ llvm::cerr << "B: " << b.addressSpace.objects << "\n";
+ }
+
+ std::set<const MemoryObject*> mutated;
+ MemoryMap::iterator ai = addressSpace.objects.begin();
+ MemoryMap::iterator bi = b.addressSpace.objects.begin();
+ MemoryMap::iterator ae = addressSpace.objects.end();
+ MemoryMap::iterator be = b.addressSpace.objects.end();
+ for (; ai!=ae && bi!=be; ++ai, ++bi) {
+ if (ai->first != bi->first) {
+ if (DebugLogStateMerge) {
+ if (ai->first < bi->first) {
+ llvm::cerr << "\t\tB misses binding for: " << ai->first->id << "\n";
+ } else {
+ llvm::cerr << "\t\tA misses binding for: " << bi->first->id << "\n";
+ }
+ }
+ return false;
+ }
+ if (ai->second != bi->second) {
+ if (DebugLogStateMerge)
+ llvm::cerr << "\t\tmutated: " << ai->first->id << "\n";
+ mutated.insert(ai->first);
+ }
+ }
+ if (ai!=ae || bi!=be) {
+ if (DebugLogStateMerge)
+ llvm::cerr << "\t\tmappings differ\n";
+ return false;
+ }
+
+ // merge stack
+
+ ref<Expr> inA(1, Expr::Bool), inB(1, Expr::Bool);
+ for (std::set< ref<Expr> >::iterator it = aSuffix.begin(),
+ ie = aSuffix.end(); it != ie; ++it)
+ inA = AndExpr::create(inA, *it);
+ for (std::set< ref<Expr> >::iterator it = bSuffix.begin(),
+ ie = bSuffix.end(); it != ie; ++it)
+ inB = AndExpr::create(inB, *it);
+
+ // XXX should we have a preference as to which predicate to use?
+ // it seems like it can make a difference, even though logically
+ // they must contradict each other and so inA => !inB
+
+ std::vector<StackFrame>::iterator itA = stack.begin();
+ std::vector<StackFrame>::const_iterator itB = b.stack.begin();
+ for (; itA!=stack.end(); ++itA, ++itB) {
+ StackFrame &af = *itA;
+ const StackFrame &bf = *itB;
+ for (unsigned i=0; i<af.kf->numRegisters; i++) {
+ ref<Expr> &av = af.locals[i].value;
+ const ref<Expr> &bv = bf.locals[i].value;
+ if (av.isNull() || bv.isNull()) {
+ // if one is null then by implication (we are at same pc)
+ // we cannot reuse this local, so just ignore
+ } else {
+ av = SelectExpr::create(inA, av, bv);
+ }
+ }
+ }
+
+ for (std::set<const MemoryObject*>::iterator it = mutated.begin(),
+ ie = mutated.end(); it != ie; ++it) {
+ const MemoryObject *mo = *it;
+ const ObjectState *os = addressSpace.findObject(mo);
+ const ObjectState *otherOS = b.addressSpace.findObject(mo);
+ assert(os && !os->readOnly &&
+ "objects mutated but not writable in merging state");
+ assert(otherOS);
+
+ ObjectState *wos = addressSpace.getWriteable(mo, os);
+ for (unsigned i=0; i<mo->size; i++) {
+ ref<Expr> av = wos->read8(i);
+ ref<Expr> bv = otherOS->read8(i);
+ wos->write(i, SelectExpr::create(inA, av, bv));
+ }
+ }
+
+ constraints = ConstraintManager();
+ for (std::set< ref<Expr> >::iterator it = commonConstraints.begin(),
+ ie = commonConstraints.end(); it != ie; ++it)
+ constraints.addConstraint(*it);
+ constraints.addConstraint(OrExpr::create(inA, inB));
+
+ return true;
+}
+
+/**/
+
+/*
+ Used for tainting: create a clone of os that we can revirt to with
+ the behavior that all constraints are preserved, but writes are
+ discarded. When we revirt it will be at the same address.
+ */
+ObjectState *ExecutionState::cloneObject(ObjectState *os,
+ MemoryObject *mo) {
+ MemoryMap::iterator it = shadowObjects.find(mo);
+ if (it != shadowObjects.end())
+ assert(0 && "Cannot exist already!");
+
+ llvm::cerr << "DRE: Inserting a cloned object: " << mo << "\n";
+ shadowObjects = shadowObjects.replace(std::make_pair(mo, os));
+ os = new ObjectState(*os);
+ addressSpace.bindObject(mo, os);
+ return os;
+}
+
+/***/
+
+
+ExecutionTraceEvent::ExecutionTraceEvent(ExecutionState& state,
+ KInstruction* ki)
+ : consecutiveCount(1)
+{
+ file = ki->info->file;
+ line = ki->info->line;
+ funcName = state.stack.back().kf->function->getName();
+ stackDepth = state.stack.size();
+}
+
+bool ExecutionTraceEvent::ignoreMe() const {
+ // ignore all events occurring in certain pesky uclibc files:
+ if (file.find("libc/stdio/") != std::string::npos) {
+ return true;
+ }
+
+ return false;
+}
+
+void ExecutionTraceEvent::print(std::ostream &os) const {
+ os.width(stackDepth);
+ os << ' ';
+ printDetails(os);
+ os << ' ' << file << ':' << line << ':' << funcName;
+ if (consecutiveCount > 1)
+ os << " (" << consecutiveCount << "x)\n";
+ else
+ os << '\n';
+}
+
+
+bool ExecutionTraceEventEquals(ExecutionTraceEvent* e1, ExecutionTraceEvent* e2) {
+ // first see if their base class members are identical:
+ if (!((e1->file == e2->file) &&
+ (e1->line == e2->line) &&
+ (e1->funcName == e2->funcName)))
+ return false;
+
+ // fairly ugly, but i'm no OOP master, so this is the way i'm
+ // doing it for now ... lemme know if there's a cleaner way:
+ BranchTraceEvent* be1 = dynamic_cast<BranchTraceEvent*>(e1);
+ BranchTraceEvent* be2 = dynamic_cast<BranchTraceEvent*>(e2);
+ if (be1 && be2) {
+ return ((be1->trueTaken == be2->trueTaken) &&
+ (be1->canForkGoBothWays == be2->canForkGoBothWays));
+ }
+
+ // don't tolerate duplicates in anything else:
+ return false;
+}
+
+
+void BranchTraceEvent::printDetails(std::ostream &os) const {
+ os << "BRANCH " << (trueTaken ? "T" : "F") << ' ' <<
+ (canForkGoBothWays ? "2-way" : "1-way");
+}
+
+void ExecutionTraceManager::addEvent(ExecutionTraceEvent* evt) {
+ // don't trace anything before __user_main, except for global events
+ if (!hasSeenUserMain) {
+ if (evt->funcName == "__user_main") {
+ hasSeenUserMain = true;
+ }
+ else if (evt->funcName != "global_def") {
+ return;
+ }
+ }
+
+ // custom ignore events:
+ if (evt->ignoreMe())
+ return;
+
+ if (events.size() > 0) {
+ // compress consecutive duplicates:
+ ExecutionTraceEvent* last = events.back();
+ if (ExecutionTraceEventEquals(last, evt)) {
+ last->consecutiveCount++;
+ return;
+ }
+ }
+
+ events.push_back(evt);
+}
+
+void ExecutionTraceManager::printAllEvents(std::ostream &os) const {
+ for (unsigned i = 0; i != events.size(); ++i)
+ events[i]->print(os);
+}
+
+/***/
diff --git a/lib/Core/Executor.cpp b/lib/Core/Executor.cpp
new file mode 100644
index 00000000..d3409908
--- /dev/null
+++ b/lib/Core/Executor.cpp
@@ -0,0 +1,3260 @@
+//===-- Executor.cpp ------------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "Executor.h"
+
+#include "CoreStats.h"
+#include "ExternalDispatcher.h"
+#include "ImpliedValue.h"
+#include "Memory.h"
+#include "MemoryManager.h"
+#include "PTree.h"
+#include "Searcher.h"
+#include "SeedInfo.h"
+#include "SpecialFunctionHandler.h"
+#include "StatsTracker.h"
+#include "TimingSolver.h"
+#include "UserSearcher.h"
+#include "../Solver/SolverStats.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Expr.h"
+#include "klee/Interpreter.h"
+#include "klee/Machine.h"
+#include "klee/TimerStatIncrementer.h"
+#include "klee/util/Assignment.h"
+#include "klee/util/ExprPPrinter.h"
+#include "klee/util/ExprUtil.h"
+#include "klee/Config/config.h"
+#include "klee/Internal/ADT/BOut.h"
+#include "klee/Internal/ADT/RNG.h"
+#include "klee/Internal/Module/Cell.h"
+#include "klee/Internal/Module/InstructionInfoTable.h"
+#include "klee/Internal/Module/KInstruction.h"
+#include "klee/Internal/Module/KModule.h"
+#include "klee/Internal/Support/FloatEvaluation.h"
+#include "klee/Internal/System/Time.h"
+
+#include "llvm/Attributes.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/System/Process.h"
+#include "llvm/Target/TargetData.h"
+
+#include <cassert>
+#include <algorithm>
+#include <iostream>
+#include <iomanip>
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <string>
+
+#include <sys/mman.h>
+
+#include <errno.h>
+#include <cxxabi.h>
+
+using namespace llvm;
+using namespace klee;
+
+// omg really hard to share cl opts across files ...
+bool WriteTraces = false;
+
+namespace {
+ cl::opt<bool>
+ DumpStatesOnHalt("dump-states-on-halt",
+ cl::init(true));
+
+ cl::opt<bool>
+ NoPreferCex("no-prefer-cex",
+ cl::init(false));
+
+ cl::opt<bool>
+ UseAsmAddresses("use-asm-addresses",
+ cl::init(false));
+
+ cl::opt<bool>
+ RandomizeFork("randomize-fork",
+ cl::init(false));
+
+ cl::opt<bool>
+ AllowExternalSymCalls("allow-external-sym-calls",
+ cl::init(false));
+
+ cl::opt<bool>
+ DebugPrintInstructions("debug-print-instructions",
+ cl::desc("Print instructions during execution."));
+
+ cl::opt<bool>
+ DebugCheckForImpliedValues("debug-check-for-implied-values");
+
+
+ cl::opt<bool>
+ SimplifySymIndices("simplify-sym-indices",
+ cl::init(false));
+
+ cl::opt<unsigned>
+ MaxSymArraySize("max-sym-array-size",
+ cl::init(0));
+
+ cl::opt<bool>
+ DebugValidateSolver("debug-validate-solver",
+ cl::init(false));
+
+ cl::opt<bool>
+ SuppressExternalWarnings("suppress-external-warnings");
+
+ cl::opt<bool>
+ AllExternalWarnings("all-external-warnings");
+
+ cl::opt<bool>
+ OnlyOutputStatesCoveringNew("only-output-states-covering-new",
+ cl::init(false));
+
+ cl::opt<bool>
+ AlwaysOutputSeeds("always-output-seeds",
+ cl::init(true));
+
+ cl::opt<bool>
+ UseFastCexSolver("use-fast-cex-solver",
+ cl::init(false));
+
+ cl::opt<bool>
+ UseIndependentSolver("use-independent-solver",
+ cl::init(true),
+ cl::desc("Use constraint independence"));
+
+ cl::opt<bool>
+ EmitAllErrors("emit-all-errors",
+ cl::init(false),
+ cl::desc("Generate tests cases for all errors "
+ "(default=one per (error,instruction) pair)"));
+
+ cl::opt<bool>
+ UseCexCache("use-cex-cache",
+ cl::init(true),
+ cl::desc("Use counterexample caching"));
+
+ cl::opt<bool>
+ UseQueryLog("use-query-log",
+ cl::init(false));
+
+ cl::opt<bool>
+ UseQueryPCLog("use-query-pc-log",
+ cl::init(false));
+
+ cl::opt<bool>
+ UseSTPQueryPCLog("use-stp-query-pc-log",
+ cl::init(false));
+
+ cl::opt<bool>
+ NoExternals("no-externals",
+ cl::desc("Do not allow external functin calls"));
+
+ cl::opt<bool>
+ UseCache("use-cache",
+ cl::init(true),
+ cl::desc("Use validity caching"));
+
+ cl::opt<bool>
+ OnlyReplaySeeds("only-replay-seeds",
+ cl::desc("Discard states that do not have a seed."));
+
+ cl::opt<bool>
+ OnlySeed("only-seed",
+ cl::desc("Stop execution after seeding is done without doing regular search."));
+
+ cl::opt<bool>
+ AllowSeedExtension("allow-seed-extension",
+ cl::desc("Allow extra (unbound) values to become symbolic during seeding."));
+
+ cl::opt<bool>
+ ZeroSeedExtension("zero-seed-extension");
+
+ cl::opt<bool>
+ AllowSeedTruncation("allow-seed-truncation",
+ cl::desc("Allow smaller buffers than in seeds."));
+
+ cl::opt<bool>
+ NamedSeedMatching("named-seed-matching",
+ cl::desc("Use names to match symbolic objects to inputs."));
+
+ cl::opt<double>
+ MaxStaticForkPct("max-static-fork-pct", cl::init(1.));
+ cl::opt<double>
+ MaxStaticSolvePct("max-static-solve-pct", cl::init(1.));
+ cl::opt<double>
+ MaxStaticCPForkPct("max-static-cpfork-pct", cl::init(1.));
+ cl::opt<double>
+ MaxStaticCPSolvePct("max-static-cpsolve-pct", cl::init(1.));
+
+ cl::opt<double>
+ MaxInstructionTime("max-instruction-time",
+ cl::desc("Only allow a single instruction to take this much time (default=0 (off))"),
+ cl::init(0));
+
+ cl::opt<double>
+ SeedTime("seed-time",
+ cl::desc("Amount of time to dedicate to seeds, before normal search (default=0 (off))"),
+ cl::init(0));
+
+ cl::opt<double>
+ MaxSTPTime("max-stp-time",
+ cl::desc("Maximum amount of time for a single query (default=120s)"),
+ cl::init(120.0));
+
+ cl::opt<unsigned int>
+ StopAfterNInstructions("stop-after-n-instructions",
+ cl::desc("Stop execution after specified number of instructions (0=off)"),
+ cl::init(0));
+
+ cl::opt<unsigned>
+ MaxForks("max-forks",
+ cl::desc("Only fork this many times (-1=off)"),
+ cl::init(~0u));
+
+ cl::opt<unsigned>
+ MaxDepth("max-depth",
+ cl::desc("Only allow this many symbolic branches (0=off)"),
+ cl::init(0));
+
+ cl::opt<unsigned>
+ MaxMemory("max-memory",
+ cl::desc("Refuse to fork when more above this about of memory (in MB, 0=off)"),
+ cl::init(0));
+
+ cl::opt<bool>
+ MaxMemoryInhibit("max-memory-inhibit",
+ cl::desc("Inhibit forking at memory cap (vs. random terminat)"),
+ cl::init(true));
+
+ // use 'external storage' because also needed by tools/klee/main.cpp
+ cl::opt<bool, true>
+ WriteTracesProxy("write-traces",
+ cl::desc("Write .trace file for each terminated state"),
+ cl::location(WriteTraces),
+ cl::init(false));
+
+ cl::opt<bool>
+ UseForkedSTP("use-forked-stp",
+ cl::desc("Run STP in forked process"));
+}
+
+
+static void *theMMap = 0;
+static unsigned theMMapSize = 0;
+
+namespace klee {
+ RNG theRNG;
+}
+
+Solver *constructSolverChain(STPSolver *stpSolver,
+ std::string queryLogPath,
+ std::string stpQueryLogPath,
+ std::string queryPCLogPath,
+ std::string stpQueryPCLogPath) {
+ Solver *solver = stpSolver;
+
+ if (UseSTPQueryPCLog)
+ solver = createPCLoggingSolver(solver,
+ stpQueryLogPath);
+
+ if (UseFastCexSolver)
+ solver = createFastCexSolver(solver);
+
+ if (UseCexCache)
+ solver = createCexCachingSolver(solver);
+
+ if (UseCache)
+ solver = createCachingSolver(solver);
+
+ if (UseIndependentSolver)
+ solver = createIndependentSolver(solver);
+
+ if (DebugValidateSolver)
+ solver = createValidatingSolver(solver, stpSolver);
+
+ if (UseQueryPCLog)
+ solver = createPCLoggingSolver(solver,
+ queryPCLogPath);
+
+ return solver;
+}
+
+Executor::Executor(const InterpreterOptions &opts,
+ InterpreterHandler *ih)
+ : Interpreter(opts),
+ kmodule(0),
+ interpreterHandler(ih),
+ searcher(0),
+ externalDispatcher(new ExternalDispatcher()),
+ statsTracker(0),
+ pathWriter(0),
+ symPathWriter(0),
+ specialFunctionHandler(0),
+ processTree(0),
+ replayOut(0),
+ replayPath(0),
+ usingSeeds(0),
+ atMemoryLimit(false),
+ inhibitForking(false),
+ haltExecution(false),
+ ivcEnabled(false),
+ stpTimeout(std::min(MaxSTPTime,MaxInstructionTime)) {
+ STPSolver *stpSolver = new STPSolver(UseForkedSTP);
+ Solver *solver =
+ constructSolverChain(stpSolver,
+ interpreterHandler->getOutputFilename("queries.qlog"),
+ interpreterHandler->getOutputFilename("stp-queries.qlog"),
+ interpreterHandler->getOutputFilename("queries.pc"),
+ interpreterHandler->getOutputFilename("stp-queries.pc"));
+
+ this->solver = new TimingSolver(solver, stpSolver);
+
+ memory = new MemoryManager();
+}
+
+
+const Module *Executor::setModule(llvm::Module *module,
+ const ModuleOptions &opts) {
+ assert(!kmodule && module && "can only register one module"); // XXX gross
+
+ kmodule = new KModule(module);
+
+ specialFunctionHandler = new SpecialFunctionHandler(*this);
+
+ specialFunctionHandler->prepare();
+ kmodule->prepare(opts, interpreterHandler);
+ specialFunctionHandler->bind();
+
+ if (StatsTracker::useStatistics()) {
+ statsTracker =
+ new StatsTracker(*this,
+ interpreterHandler->getOutputFilename("assembly.ll"),
+ userSearcherRequiresMD2U());
+ }
+
+ return module;
+}
+
+Executor::~Executor() {
+ delete memory;
+ delete externalDispatcher;
+ if (processTree)
+ delete processTree;
+ if (specialFunctionHandler)
+ delete specialFunctionHandler;
+ if (statsTracker)
+ delete statsTracker;
+ delete solver;
+ delete kmodule;
+}
+
+/***/
+
+void Executor::initializeGlobalObject(ExecutionState &state, ObjectState *os,
+ Constant *c,
+ unsigned offset) {
+ TargetData *targetData = kmodule->targetData;
+ if (ConstantVector *cp = dyn_cast<ConstantVector>(c)) {
+ unsigned elementSize =
+ targetData->getTypeStoreSize(cp->getType()->getElementType());
+ for (unsigned i=0, e=cp->getNumOperands(); i != e; ++i)
+ initializeGlobalObject(state, os, cp->getOperand(i),
+ offset + i*elementSize);
+ } else if (isa<ConstantAggregateZero>(c)) {
+ unsigned i, size = targetData->getTypeStoreSize(c->getType());
+ for (i=0; i<size; i++)
+ os->write8(offset+i, (uint8_t) 0);
+ } else if (ConstantArray *ca = dyn_cast<ConstantArray>(c)) {
+ unsigned elementSize =
+ targetData->getTypeStoreSize(ca->getType()->getElementType());
+ for (unsigned i=0, e=ca->getNumOperands(); i != e; ++i)
+ initializeGlobalObject(state, os, ca->getOperand(i),
+ offset + i*elementSize);
+ } else if (ConstantStruct *cs = dyn_cast<ConstantStruct>(c)) {
+ const StructLayout *sl =
+ targetData->getStructLayout(cast<StructType>(cs->getType()));
+ for (unsigned i=0, e=cs->getNumOperands(); i != e; ++i)
+ initializeGlobalObject(state, os, cs->getOperand(i),
+ offset + sl->getElementOffset(i));
+ } else {
+ os->write(offset, evalConstant(c));
+ }
+}
+
+MemoryObject * Executor::addExternalObject(ExecutionState &state,
+ void *addr, unsigned size,
+ bool isReadOnly) {
+ MemoryObject *mo = memory->allocateFixed((uint64_t) (unsigned long) addr,
+ size, 0);
+ ObjectState *os = bindObjectInState(state, mo, false);
+ for(unsigned i = 0; i < size; i++)
+ os->write8(i, ((uint8_t*)addr)[i]);
+ if(isReadOnly)
+ os->setReadOnly(true);
+ return mo;
+}
+
+void Executor::initializeGlobals(ExecutionState &state) {
+ Module *m = kmodule->module;
+
+ if (m->getModuleInlineAsm() != "")
+ klee_warning("executable has module level assembly (ignoring)");
+
+ assert(m->lib_begin() == m->lib_end() &&
+ "XXX do not support dependent libraries");
+
+ // represent function globals using the address of the actual llvm function
+ // object. given that we use malloc to allocate memory in states this also
+ // ensures that we won't conflict. we don't need to allocate a memory object
+ // since reading/writing via a function pointer is unsupported anyway.
+ for (Module::iterator i = m->begin(), ie = m->end(); i != ie; ++i) {
+ Function *f = i;
+ ref<Expr> addr(0);
+
+ // If the symbol has external weak linkage then it is implicitly
+ // not defined in this module; if it isn't resolvable then it
+ // should be null.
+ if (f->hasExternalWeakLinkage() &&
+ !externalDispatcher->resolveSymbol(f->getName())) {
+ addr = Expr::createPointer(0);
+ } else {
+ addr = Expr::createPointer((unsigned long) (void*) f);
+ legalFunctions.insert(f);
+ }
+
+ globalAddresses.insert(std::make_pair(f, addr));
+ }
+
+ // Disabled, we don't want to promote use of live externals.
+#ifdef HAVE_CTYPE_EXTERNALS
+#ifndef WINDOWS
+#ifndef DARWIN
+ /* From /usr/include/errno.h: it [errno] is a per-thread variable. */
+ int *errno_addr = __errno_location();
+ addExternalObject(state, (void *)errno_addr, sizeof *errno_addr, false);
+
+ /* from /usr/include/ctype.h:
+ These point into arrays of 384, so they can be indexed by any `unsigned
+ char' value [0,255]; by EOF (-1); or by any `signed char' value
+ [-128,-1). ISO C requires that the ctype functions work for `unsigned */
+ const uint16_t **addr = __ctype_b_loc();
+ addExternalObject(state, (void *)(*addr-128),
+ 384 * sizeof **addr, true);
+ addExternalObject(state, addr, 4, true);
+
+ const int32_t **lower_addr = __ctype_tolower_loc();
+ addExternalObject(state, (void *)(*lower_addr-128),
+ 384 * sizeof **lower_addr, true);
+ addExternalObject(state, lower_addr, 4, true);
+
+ const int32_t **upper_addr = __ctype_toupper_loc();
+ addExternalObject(state, (void *)(*upper_addr-128),
+ 384 * sizeof **upper_addr, true);
+ addExternalObject(state, upper_addr, 4, true);
+#endif
+#endif
+#endif
+
+ // allocate and initialize globals, done in two passes since we may
+ // need address of a global in order to initialize some other one.
+
+ // allocate memory objects for all globals
+ for (Module::const_global_iterator i = m->global_begin(),
+ e = m->global_end();
+ i != e; ++i) {
+ if (i->isDeclaration()) {
+ // FIXME: We have no general way of handling unknown external
+ // symbols. If we really cared about making external stuff work
+ // better we could support user definition, or use the EXE style
+ // hack where we check the object file information.
+
+ const Type *ty = i->getType()->getElementType();
+ const std::string &name = i->getName();
+ uint64_t size = kmodule->targetData->getTypeStoreSize(ty);
+
+ // XXX - DWD - hardcode some things until we decide how to fix.
+#ifndef WINDOWS
+ if (name == "_ZTVN10__cxxabiv117__class_type_infoE") {
+ size = 0x2C;
+ } else if (name == "_ZTVN10__cxxabiv120__si_class_type_infoE") {
+ size = 0x2C;
+ } else if (name == "_ZTVN10__cxxabiv121__vmi_class_type_infoE") {
+ size = 0x2C;
+ }
+#endif
+
+ if (size == 0) {
+ llvm::cerr << "Unable to find size for global variable: " << i->getName()
+ << " (use will result in out of bounds access)\n";
+ }
+
+ MemoryObject *mo = memory->allocate(size, false, true, i);
+ ObjectState *os = bindObjectInState(state, mo, false);
+ globalObjects.insert(std::make_pair(i, mo));
+ globalAddresses.insert(std::make_pair(i, mo->getBaseExpr()));
+
+ // Program already running = object already initialized. Read
+ // concrete value and write it to our copy.
+ if (size) {
+ void *addr;
+ if (name=="__dso_handle") {
+ extern void *__dso_handle __attribute__ ((__weak__));
+ addr = &__dso_handle; // wtf ?
+ } else {
+ addr = externalDispatcher->resolveSymbol(name);
+ }
+ if (!addr)
+ klee_error("unable to load symbol(%s) while initializing globals.",
+ name.c_str());
+
+ for (unsigned offset=0; offset<mo->size; offset++)
+ os->write8(offset, ((unsigned char*)addr)[offset]);
+ }
+ } else {
+ const std::string &name = i->getName();
+ const Type *ty = i->getType()->getElementType();
+ uint64_t size = kmodule->targetData->getTypeStoreSize(ty);
+ MemoryObject *mo = 0;
+
+ if (UseAsmAddresses && name[0]=='\01') {
+ char *end;
+ uint64_t address = ::strtoll(name.c_str()+1, &end, 0);
+
+ if (end && *end == '\0') {
+ klee_message("NOTE: allocated global at asm specified address: %#08llx"
+ " (%llu bytes)",
+ address, size);
+ mo = memory->allocateFixed(address, size, &*i);
+ mo->isUserSpecified = true; // XXX hack;
+ }
+ }
+
+ if (!mo)
+ mo = memory->allocate(size, false, true, &*i);
+ assert(mo && "out of memory");
+ ObjectState *os = bindObjectInState(state, mo, false);
+ globalObjects.insert(std::make_pair(i, mo));
+ globalAddresses.insert(std::make_pair(i, mo->getBaseExpr()));
+
+ if (!i->hasInitializer())
+ os->initializeToRandom();
+ }
+ }
+
+ // link aliases to their definitions (if bound)
+ for (Module::alias_iterator i = m->alias_begin(), ie = m->alias_end();
+ i != ie; ++i) {
+ // Map the alias to its aliasee's address. This works because we have
+ // addresses for everything, even undefined functions.
+ globalAddresses.insert(std::make_pair(i, evalConstant(i->getAliasee())));
+ }
+
+ // once all objects are allocated, do the actual initialization
+ for (Module::const_global_iterator i = m->global_begin(),
+ e = m->global_end();
+ i != e; ++i) {
+ if (i->hasInitializer()) {
+ MemoryObject *mo = globalObjects.find(i)->second;
+ const ObjectState *os = state.addressSpace.findObject(mo);
+ assert(os);
+ ObjectState *wos = state.addressSpace.getWriteable(mo, os);
+
+ initializeGlobalObject(state, wos, i->getInitializer(), 0);
+ // if(i->isConstant()) os->setReadOnly(true);
+ }
+ }
+}
+
+void Executor::branch(ExecutionState &state,
+ const std::vector< ref<Expr> > &conditions,
+ std::vector<ExecutionState*> &result) {
+ TimerStatIncrementer timer(stats::forkTime);
+ unsigned N = conditions.size();
+ assert(N);
+
+ stats::forks += N-1;
+
+ // XXX do proper balance or keep random?
+ result.push_back(&state);
+ for (unsigned i=1; i<N; ++i) {
+ ExecutionState *es = result[theRNG.getInt32() % i];
+ ExecutionState *ns = es->branch();
+ addedStates.insert(ns);
+ result.push_back(ns);
+ es->ptreeNode->data = 0;
+ std::pair<PTree::Node*,PTree::Node*> res =
+ processTree->split(es->ptreeNode, ns, es);
+ ns->ptreeNode = res.first;
+ es->ptreeNode = res.second;
+ }
+
+ // If necessary redistribute seeds to match conditions, killing
+ // states if necessary due to OnlyReplaySeeds (inefficient but
+ // simple).
+
+ std::map< ExecutionState*, std::vector<SeedInfo> >::iterator it =
+ seedMap.find(&state);
+ if (it != seedMap.end()) {
+ std::vector<SeedInfo> seeds = it->second;
+ seedMap.erase(it);
+
+ // Assume each seed only satisfies one condition (necessarily true
+ // when conditions are mutually exclusive and their conjunction is
+ // a tautology).
+ for (std::vector<SeedInfo>::iterator siit = seeds.begin(),
+ siie = seeds.end(); siit != siie; ++siit) {
+ unsigned i;
+ for (i=0; i<N; ++i) {
+ ref<Expr> res;
+ bool success =
+ solver->getValue(state, siit->assignment.evaluate(conditions[i]),
+ res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res.getConstantValue())
+ break;
+ }
+
+ // If we didn't find a satisfying condition randomly pick one
+ // (the seed will be patched).
+ if (i==N)
+ i = theRNG.getInt32() % N;
+
+ seedMap[result[i]].push_back(*siit);
+ }
+
+ if (OnlyReplaySeeds) {
+ for (unsigned i=0; i<N; ++i) {
+ if (!seedMap.count(result[i])) {
+ terminateState(*result[i]);
+ result[i] = NULL;
+ }
+ }
+ }
+ }
+
+ for (unsigned i=0; i<N; ++i)
+ if (result[i])
+ addConstraint(*result[i], conditions[i]);
+}
+
+Executor::StatePair
+Executor::fork(ExecutionState &current, ref<Expr> condition, bool isInternal) {
+ Solver::Validity res;
+ std::map< ExecutionState*, std::vector<SeedInfo> >::iterator it =
+ seedMap.find(&current);
+ bool isSeeding = it != seedMap.end();
+
+ if (!isSeeding &&
+ !condition.isConstant() &&
+ (MaxStaticForkPct!=1. || MaxStaticSolvePct != 1. ||
+ MaxStaticCPForkPct!=1. || MaxStaticCPSolvePct != 1.) &&
+ statsTracker->elapsed() > 60.) {
+ StatisticManager &sm = *theStatisticManager;
+ CallPathNode *cpn = current.stack.back().callPathNode;
+ if ((MaxStaticForkPct<1. &&
+ sm.getIndexedValue(stats::forks, sm.getIndex()) >
+ stats::forks*MaxStaticForkPct) ||
+ (MaxStaticCPForkPct<1. &&
+ cpn && (cpn->statistics.getValue(stats::forks) >
+ stats::forks*MaxStaticCPForkPct)) ||
+ (MaxStaticSolvePct<1 &&
+ sm.getIndexedValue(stats::solverTime, sm.getIndex()) >
+ stats::solverTime*MaxStaticSolvePct) ||
+ (MaxStaticCPForkPct<1. &&
+ cpn && (cpn->statistics.getValue(stats::solverTime) >
+ stats::solverTime*MaxStaticCPSolvePct))) {
+ ref<Expr> value;
+ bool success = solver->getValue(current, condition, value);
+ assert(success && "FIXME: Unhandled solver failure");
+ addConstraint(current, EqExpr::create(value, condition));
+ condition = value;
+ }
+ }
+
+ double timeout = stpTimeout;
+ if (isSeeding)
+ timeout *= it->second.size();
+ solver->setTimeout(timeout);
+ bool success = solver->evaluate(current, condition, res);
+ solver->setTimeout(0);
+ if (!success) {
+ current.pc = current.prevPC;
+ terminateStateEarly(current, "query timed out");
+ return StatePair(0, 0);
+ }
+
+ if (!isSeeding) {
+ if (replayPath && !isInternal) {
+ assert(replayPosition<replayPath->size() &&
+ "ran out of branches in replay path mode");
+ bool branch = (*replayPath)[replayPosition++];
+
+ if (res==Solver::True) {
+ assert(branch && "hit invalid branch in replay path mode");
+ } else if (res==Solver::False) {
+ assert(!branch && "hit invalid branch in replay path mode");
+ } else {
+ // add constraints
+ if(branch) {
+ res = Solver::True;
+ addConstraint(current, condition);
+ } else {
+ res = Solver::False;
+ addConstraint(current, Expr::createNot(condition));
+ }
+ }
+ } else if (res==Solver::Unknown) {
+ assert(!replayOut && "in replay mode, only one branch can be true.");
+
+ if ((MaxMemoryInhibit && atMemoryLimit) ||
+ current.forkDisabled ||
+ inhibitForking ||
+ (MaxForks!=~0u && stats::forks >= MaxForks)) {
+ TimerStatIncrementer timer(stats::forkTime);
+ if (theRNG.getBool()) {
+ addConstraint(current, condition);
+ res = Solver::True;
+ } else {
+ addConstraint(current, Expr::createNot(condition));
+ res = Solver::False;
+ }
+ }
+ }
+ }
+
+ // Fix branch in only-replay-seed mode, if we don't have both true
+ // and false seeds.
+ if (isSeeding &&
+ (current.forkDisabled || OnlyReplaySeeds) &&
+ res == Solver::Unknown) {
+ bool trueSeed=false, falseSeed=false;
+ // Is seed extension still ok here?
+ for (std::vector<SeedInfo>::iterator siit = it->second.begin(),
+ siie = it->second.end(); siit != siie; ++siit) {
+ ref<Expr> res;
+ bool success =
+ solver->getValue(current, siit->assignment.evaluate(condition), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res.isConstant()) {
+ if (res.getConstantValue()) {
+ trueSeed = true;
+ } else {
+ falseSeed = true;
+ }
+ if (trueSeed && falseSeed)
+ break;
+ }
+ }
+ if (!(trueSeed && falseSeed)) {
+ assert(trueSeed || falseSeed);
+
+ res = trueSeed ? Solver::True : Solver::False;
+ addConstraint(current, trueSeed ? condition : Expr::createNot(condition));
+ }
+ }
+
+
+ // XXX - even if the constraint is provable one way or the other we
+ // can probably benefit by adding this constraint and allowing it to
+ // reduce the other constraints. For example, if we do a binary
+ // search on a particular value, and then see a comparison against
+ // the value it has been fixed at, we should take this as a nice
+ // hint to just use the single constraint instead of all the binary
+ // search ones. If that makes sense.
+ if (res==Solver::True) {
+ if (!isInternal) {
+ if (pathWriter) {
+ current.pathOS << "1";
+ }
+ }
+
+ return StatePair(&current, 0);
+ } else if (res==Solver::False) {
+ if (!isInternal) {
+ if (pathWriter) {
+ current.pathOS << "0";
+ }
+ }
+
+ return StatePair(0, &current);
+ } else {
+ TimerStatIncrementer timer(stats::forkTime);
+ ExecutionState *falseState, *trueState = &current;
+
+ ++stats::forks;
+
+ falseState = trueState->branch();
+ addedStates.insert(falseState);
+
+ if (RandomizeFork && theRNG.getBool())
+ std::swap(trueState, falseState);
+
+ if (it != seedMap.end()) {
+ std::vector<SeedInfo> seeds = it->second;
+ it->second.clear();
+ std::vector<SeedInfo> &trueSeeds = seedMap[trueState];
+ std::vector<SeedInfo> &falseSeeds = seedMap[falseState];
+ for (std::vector<SeedInfo>::iterator siit = seeds.begin(),
+ siie = seeds.end(); siit != siie; ++siit) {
+ ref<Expr> res;
+ bool success =
+ solver->getValue(current, siit->assignment.evaluate(condition), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res.getConstantValue()) {
+ trueSeeds.push_back(*siit);
+ } else {
+ falseSeeds.push_back(*siit);
+ }
+ }
+
+ bool swapInfo = false;
+ if (trueSeeds.empty()) {
+ if (&current == trueState) swapInfo = true;
+ seedMap.erase(trueState);
+ }
+ if (falseSeeds.empty()) {
+ if (&current == falseState) swapInfo = true;
+ seedMap.erase(falseState);
+ }
+ if (swapInfo) {
+ std::swap(trueState->coveredNew, falseState->coveredNew);
+ std::swap(trueState->coveredLines, falseState->coveredLines);
+ }
+ }
+
+ current.ptreeNode->data = 0;
+ std::pair<PTree::Node*, PTree::Node*> res =
+ processTree->split(current.ptreeNode, falseState, trueState);
+ falseState->ptreeNode = res.first;
+ trueState->ptreeNode = res.second;
+
+ if (!isInternal) {
+ if (pathWriter) {
+ falseState->pathOS = pathWriter->open(current.pathOS);
+ trueState->pathOS << "1";
+ falseState->pathOS << "0";
+ }
+ if (symPathWriter) {
+ falseState->symPathOS = symPathWriter->open(current.symPathOS);
+ trueState->symPathOS << "1";
+ falseState->symPathOS << "0";
+ }
+ }
+
+ addConstraint(*trueState, condition);
+ addConstraint(*falseState, Expr::createNot(condition));
+
+ // Kinda gross, do we even really still want this option?
+ if (MaxDepth && MaxDepth<=trueState->depth) {
+ terminateStateEarly(*trueState, "max-depth exceeded");
+ terminateStateEarly(*falseState, "max-depth exceeded");
+ return StatePair(0, 0);
+ }
+
+ return StatePair(trueState, falseState);
+ }
+}
+
+void Executor::addConstraint(ExecutionState &state, ref<Expr> condition) {
+ if (condition.isConstant()) {
+ assert(condition.getConstantValue() &&
+ "attempt to add invalid constraint");
+ return;
+ }
+
+ // Check to see if this constraint violates seeds.
+ std::map< ExecutionState*, std::vector<SeedInfo> >::iterator it =
+ seedMap.find(&state);
+ if (it != seedMap.end()) {
+ bool warn = false;
+ for (std::vector<SeedInfo>::iterator siit = it->second.begin(),
+ siie = it->second.end(); siit != siie; ++siit) {
+ bool res;
+ bool success =
+ solver->mustBeFalse(state, siit->assignment.evaluate(condition), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ siit->patchSeed(state, condition, solver);
+ warn = true;
+ }
+ }
+ if (warn)
+ klee_warning("seeds patched for violating constraint");
+ }
+
+ state.addConstraint(condition);
+ if (ivcEnabled)
+ doImpliedValueConcretization(state, condition, ref<Expr>(1, Expr::Bool));
+}
+
+ref<Expr> Executor::evalConstant(Constant *c) {
+ if (llvm::ConstantExpr *ce = dyn_cast<llvm::ConstantExpr>(c)) {
+ return evalConstantExpr(ce);
+ } else {
+ if (const ConstantInt *ci = dyn_cast<ConstantInt>(c)) {
+ switch(ci->getBitWidth()) {
+ case 1: return ConstantExpr::create(ci->getZExtValue(), Expr::Bool);
+ case 8: return ConstantExpr::create(ci->getZExtValue(), Expr::Int8);
+ case 16: return ConstantExpr::create(ci->getZExtValue(), Expr::Int16);
+ case 32: return ConstantExpr::create(ci->getZExtValue(), Expr::Int32);
+ case 64: return ConstantExpr::create(ci->getZExtValue(), Expr::Int64);
+ default:
+ assert(0 && "XXX arbitrary bit width constants unhandled");
+ }
+ } else if (const ConstantFP *cf = dyn_cast<ConstantFP>(c)) {
+ switch(cf->getType()->getTypeID()) {
+ case Type::FloatTyID: {
+ float f = cf->getValueAPF().convertToFloat();
+ return ConstantExpr::create(floats::FloatAsUInt64(f), Expr::Int32);
+ }
+ case Type::DoubleTyID: {
+ double d = cf->getValueAPF().convertToDouble();
+ return ConstantExpr::create(floats::DoubleAsUInt64(d), Expr::Int64);
+ }
+ case Type::X86_FP80TyID: {
+ // FIXME: This is really broken, but for now we just convert
+ // to a double. This isn't going to work at all in general,
+ // but we need support for wide constants.
+ APFloat apf = cf->getValueAPF();
+ bool ignored;
+ APFloat::opStatus r = apf.convert(APFloat::IEEEdouble,
+ APFloat::rmNearestTiesToAway,
+ &ignored);
+ (void) r;
+ //assert(!(r & APFloat::opOverflow) && !(r & APFloat::opUnderflow) &&
+ // "Overflow/underflow while converting from FP80 (x87) to 64-bit double");
+ double d = apf.convertToDouble();
+ return ConstantExpr::create(floats::DoubleAsUInt64(d), Expr::Int64);
+ }
+ default:
+ llvm::cerr << "Constant of type " << cf->getType()->getDescription()
+ << " not supported\n";
+ llvm::cerr << "Constant used at ";
+ KConstant *kc = kmodule->getKConstant((Constant*) cf);
+ if (kc && kc->ki && kc->ki->info)
+ llvm::cerr << kc->ki->info->file << ":" << kc->ki->info->line << "\n";
+ else llvm::cerr << "<unknown>\n";
+
+ assert(0 && "Arbitrary bit width floating point constants unsupported");
+ }
+ } else if (const GlobalValue *gv = dyn_cast<GlobalValue>(c)) {
+ return globalAddresses.find(gv)->second;
+ } else if (isa<ConstantPointerNull>(c)) {
+ return Expr::createPointer(0);
+ } else if (isa<UndefValue>(c)) {
+ return ConstantExpr::create(0, Expr::getWidthForLLVMType(c->getType()));
+ } else {
+ // Constant{AggregateZero,Array,Struct,Vector}
+ assert(0 && "invalid argument to evalConstant()");
+ }
+ }
+}
+
+ref<Expr> Executor::eval(KInstruction *ki,
+ unsigned index,
+ ExecutionState &state) {
+ assert(index < ki->inst->getNumOperands());
+ int vnumber = ki->operands[index];
+
+ // Determine if this is a constant or not.
+ if (vnumber < 0) {
+ unsigned index = -vnumber - 2;
+ Cell &c = kmodule->constantTable[index];
+ return c.value;
+ } else {
+ unsigned index = vnumber;
+ StackFrame &sf = state.stack.back();
+ Cell &c = sf.locals[index];
+ return c.value;
+ }
+}
+
+void Executor::bindLocal(KInstruction *target, ExecutionState &state,
+ ref<Expr> value) {
+ StackFrame &sf = state.stack.back();
+ unsigned reg = target->dest;
+ Cell &c = sf.locals[reg];
+ c.value = value;
+}
+
+void Executor::bindArgument(KFunction *kf, unsigned index,
+ ExecutionState &state, ref<Expr> value) {
+ StackFrame &sf = state.stack.back();
+ unsigned reg = kf->getArgRegister(index);
+ Cell &c = sf.locals[reg];
+ c.value = value;
+}
+
+ref<Expr> Executor::toUnique(const ExecutionState &state,
+ ref<Expr> &e) {
+ ref<Expr> result = e;
+
+ if (!e.isConstant()) {
+ ref<Expr> value(0);
+ bool isTrue = false;
+
+ solver->setTimeout(stpTimeout);
+ if (solver->getValue(state, e, value) &&
+ solver->mustBeTrue(state, EqExpr::create(e, value), isTrue) &&
+ isTrue)
+ result = value;
+ solver->setTimeout(0);
+ }
+
+ return result;
+}
+
+
+/* Concretize the given expression, and return a possible constant value.
+ 'reason' is just a documentation string stating the reason for concretization. */
+ref<Expr> Executor::toConstant(ExecutionState &state,
+ ref<Expr> e,
+ const char *reason) {
+ e = state.constraints.simplifyExpr(e);
+ if (!e.isConstant()) {
+ ref<Expr> value;
+ bool success = solver->getValue(state, e, value);
+ assert(success && "FIXME: Unhandled solver failure");
+
+ std::ostringstream os;
+ os << "silently concretizing (reason: " << reason << ") expression " << e
+ << " to value " << value
+ << " (" << (*(state.pc)).info->file << ":" << (*(state.pc)).info->line << ")";
+
+ if (AllExternalWarnings)
+ klee_warning(reason, os.str().c_str());
+ else
+ klee_warning_once(reason, "%s", os.str().c_str());
+
+ addConstraint(state, EqExpr::create(e, value));
+
+ return value;
+ } else {
+ return e;
+ }
+}
+
+void Executor::executeGetValue(ExecutionState &state,
+ ref<Expr> e,
+ KInstruction *target) {
+ e = state.constraints.simplifyExpr(e);
+ std::map< ExecutionState*, std::vector<SeedInfo> >::iterator it =
+ seedMap.find(&state);
+ if (it==seedMap.end() || e.isConstant()) {
+ ref<Expr> value;
+ bool success = solver->getValue(state, e, value);
+ assert(success && "FIXME: Unhandled solver failure");
+ bindLocal(target, state, value);
+ } else {
+ std::set< ref<Expr> > values;
+ for (std::vector<SeedInfo>::iterator siit = it->second.begin(),
+ siie = it->second.end(); siit != siie; ++siit) {
+ ref<Expr> value;
+ bool success =
+ solver->getValue(state, siit->assignment.evaluate(e), value);
+ assert(success && "FIXME: Unhandled solver failure");
+ values.insert(value);
+ }
+
+ std::vector< ref<Expr> > conditions;
+ for (std::set< ref<Expr> >::iterator vit = values.begin(),
+ vie = values.end(); vit != vie; ++vit)
+ conditions.push_back(EqExpr::create(e, *vit));
+
+ std::vector<ExecutionState*> branches;
+ branch(state, conditions, branches);
+
+ std::vector<ExecutionState*>::iterator bit = branches.begin();
+ for (std::set< ref<Expr> >::iterator vit = values.begin(),
+ vie = values.end(); vit != vie; ++vit) {
+ ExecutionState *es = *bit;
+ if (es)
+ bindLocal(target, *es, *vit);
+ ++bit;
+ }
+ }
+}
+
+void Executor::stepInstruction(ExecutionState &state) {
+ if (DebugPrintInstructions) {
+ printFileLine(state, state.pc);
+ llvm::cerr << std::setw(10) << stats::instructions << " " << *state.pc->inst;
+ }
+
+ if (statsTracker)
+ statsTracker->stepInstruction(state);
+
+ ++stats::instructions;
+ state.prevPC = state.pc;
+ ++state.pc;
+
+ if (stats::instructions==StopAfterNInstructions)
+ haltExecution = true;
+}
+
+void Executor::executeCall(ExecutionState &state,
+ KInstruction *ki,
+ Function *f,
+ std::vector< ref<Expr> > &arguments) {
+ if (WriteTraces) {
+ // don't print out special debug stop point 'function' calls
+ if (f->getIntrinsicID() != Intrinsic::dbg_stoppoint) {
+ const std::string& calleeFuncName = f->getName();
+ state.exeTraceMgr.addEvent(new FunctionCallTraceEvent(state, ki, calleeFuncName));
+ }
+ }
+
+ Instruction *i = ki->inst;
+ if (f && f->isDeclaration()) {
+ if (f!=kmodule->dbgStopPointFn) { // special case speed hack
+ switch(f->getIntrinsicID()) {
+ case Intrinsic::dbg_stoppoint:
+ case Intrinsic::dbg_region_start:
+ case Intrinsic::dbg_region_end:
+ case Intrinsic::dbg_func_start:
+ case Intrinsic::dbg_declare:
+ case Intrinsic::not_intrinsic:
+ // state may be destroyed by this call, cannot touch
+ callExternalFunction(state, ki, f, arguments);
+ break;
+
+ // vararg is handled by caller and intrinsic lowering,
+ // see comment for ExecutionState::varargs
+ case Intrinsic::vastart: {
+ StackFrame &sf = state.stack.back();
+ assert(sf.varargs &&
+ "vastart called in function with no vararg object");
+ executeMemoryOperation(state, true, arguments[0],
+ sf.varargs->getBaseExpr(), 0);
+ break;
+ }
+ case Intrinsic::vaend: // va_end is a noop for the interpreter
+ break;
+
+ case Intrinsic::vacopy: // should be lowered
+ default:
+ klee_error("unknown intrinsic: %s", f->getName().c_str());
+ }
+ }
+
+ if (InvokeInst *ii = dyn_cast<InvokeInst>(i)) {
+ transferToBasicBlock(ii->getNormalDest(), i->getParent(), state);
+ }
+ } else {
+ // XXX not really happy about this reliance on prevPC but is ok I
+ // guess. This just done to avoid having to pass KInstIterator
+ // everywhere instead of the actual instruction, since we can't
+ // make a KInstIterator from just an instruction (unlike LLVM).
+ KFunction *kf = kmodule->functionMap[f];
+ state.pushFrame(state.prevPC, kf);
+ state.pc = kf->instructions;
+
+ if (statsTracker)
+ statsTracker->framePushed(state, &state.stack[state.stack.size()-2]);
+
+ unsigned callingArgs = arguments.size();
+ unsigned funcArgs = f->arg_size();
+ if (!f->isVarArg()) {
+ if (callingArgs > funcArgs) {
+ klee_warning_once(f, "calling %s with extra arguments.",
+ f->getName().c_str());
+ } else if (callingArgs < funcArgs) {
+ terminateStateOnError(state, "calling function with too few arguments",
+ "user.err");
+ return;
+ }
+ } else {
+ if (callingArgs < funcArgs) {
+ terminateStateOnError(state, "calling function with too few arguments",
+ "user.err");
+ return;
+ }
+
+ StackFrame &sf = state.stack.back();
+ unsigned size = 0;
+ for (unsigned i = funcArgs; i < callingArgs; i++)
+ size += Expr::getMinBytesForWidth(arguments[i].getWidth());
+
+ MemoryObject *mo = sf.varargs = memory->allocate(size, true, false,
+ state.prevPC->inst);
+ if (!mo) {
+ terminateStateOnExecError(state, "out of memory (varargs)");
+ return;
+ }
+ ObjectState *os = bindObjectInState(state, mo, true);
+ unsigned offset = 0;
+ for (unsigned i = funcArgs; i < callingArgs; i++) {
+ // XXX: DRE: i think we bind memory objects here?
+ os->write(offset, arguments[i]);
+ offset += Expr::getMinBytesForWidth(arguments[i].getWidth());
+ }
+ }
+
+ unsigned numFormals = f->arg_size();
+ for (unsigned i=0; i<numFormals; ++i)
+ bindArgument(kf, i, state, arguments[i]);
+ }
+}
+
+void Executor::transferToBasicBlock(BasicBlock *dst, BasicBlock *src,
+ ExecutionState &state) {
+ // Note that in general phi nodes can reuse phi values from the same
+ // block but the incoming value is the eval() result *before* the
+ // execution of any phi nodes. this is pathological and doesn't
+ // really seem to occur, but just in case we run the PhiCleanerPass
+ // which makes sure this cannot happen and so it is safe to just
+ // eval things in order. The PhiCleanerPass also makes sure that all
+ // incoming blocks have the same order for each PHINode so we only
+ // have to compute the index once.
+ //
+ // With that done we simply set an index in the state so that PHI
+ // instructions know which argument to eval, set the pc, and continue.
+
+ // XXX this lookup has to go ?
+ KFunction *kf = state.stack.back().kf;
+ unsigned entry = kf->basicBlockEntry[dst];
+ state.pc = &kf->instructions[entry];
+ if (state.pc->inst->getOpcode() == Instruction::PHI) {
+ PHINode *first = static_cast<PHINode*>(state.pc->inst);
+ state.incomingBBIndex = first->getBasicBlockIndex(src);
+ }
+}
+
+void Executor::printFileLine(ExecutionState &state, KInstruction *ki) {
+ const InstructionInfo &ii = *ki->info;
+ if (ii.file != "")
+ llvm::cerr << " " << ii.file << ":" << ii.line << ":";
+ else
+ llvm::cerr << " [no debug info]:";
+}
+
+
+Function* Executor::getCalledFunction(CallSite &cs, ExecutionState &state) {
+ Function *f = cs.getCalledFunction();
+
+ if (f) {
+ std::string alias = state.getFnAlias(f->getName());
+ if (alias != "") {
+ //llvm::cerr << f->getName() << "() is aliased with " << alias << "()\n";
+ llvm::Module* currModule = kmodule->module;
+ Function* old_f = f;
+ f = currModule->getFunction(alias);
+ if (!f) {
+ llvm::cerr << "Function " << alias << "(), alias for " << old_f->getName() << " not found!\n";
+ assert(f && "function alias not found");
+ }
+ }
+ }
+
+ return f;
+}
+
+
+void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) {
+ Instruction *i = ki->inst;
+ switch (i->getOpcode()) {
+ // Control flow
+ case Instruction::Ret: {
+ ReturnInst *ri = cast<ReturnInst>(i);
+ KInstIterator kcaller = state.stack.back().caller;
+ Instruction *caller = kcaller ? kcaller->inst : 0;
+ bool isVoidReturn = (ri->getNumOperands() == 0);
+ ref<Expr> result(0,Expr::Bool);
+
+ if (WriteTraces) {
+ state.exeTraceMgr.addEvent(new FunctionReturnTraceEvent(state, ki));
+ }
+
+ if (!isVoidReturn) {
+ result = eval(ki, 0, state);
+ }
+
+ if (state.stack.size() <= 1) {
+ assert(!caller && "caller set on initial stack frame");
+ terminateStateOnExit(state);
+ } else {
+ state.popFrame();
+
+ if (statsTracker)
+ statsTracker->framePopped(state);
+
+ if (InvokeInst *ii = dyn_cast<InvokeInst>(caller)) {
+ transferToBasicBlock(ii->getNormalDest(), caller->getParent(), state);
+ } else {
+ state.pc = kcaller;
+ ++state.pc;
+ }
+
+ if (!isVoidReturn) {
+ const Type *t = caller->getType();
+ if (t != Type::VoidTy) {
+ // may need to do coercion due to bitcasts
+ Expr::Width from = result.getWidth();
+ Expr::Width to = Expr::getWidthForLLVMType(t);
+
+ if (from != to) {
+ CallSite cs = (isa<InvokeInst>(caller) ? CallSite(cast<InvokeInst>(caller)) :
+ CallSite(cast<CallInst>(caller)));
+
+ // XXX need to check other param attrs ?
+ if (cs.paramHasAttr(0, llvm::Attribute::SExt)) {
+ result = SExtExpr::create(result, to);
+ } else {
+ result = ZExtExpr::create(result, to);
+ }
+ }
+
+ bindLocal(kcaller, state, result);
+ }
+ } else {
+ // We check that the return value has no users instead of
+ // checking the type, since C defaults to returning int for
+ // undeclared functions.
+ if (!caller->use_empty()) {
+ terminateStateOnExecError(state, "return void when caller expected a result");
+ }
+ }
+ }
+ break;
+ }
+ case Instruction::Unwind: {
+ for (;;) {
+ KInstruction *kcaller = state.stack.back().caller;
+ state.popFrame();
+
+ if (statsTracker)
+ statsTracker->framePopped(state);
+
+ if (state.stack.empty()) {
+ terminateStateOnExecError(state, "unwind from initial stack frame");
+ break;
+ } else {
+ Instruction *caller = kcaller->inst;
+ if (InvokeInst *ii = dyn_cast<InvokeInst>(caller)) {
+ transferToBasicBlock(ii->getUnwindDest(), caller->getParent(), state);
+ break;
+ }
+ }
+ }
+ break;
+ }
+ case Instruction::Br: {
+ BranchInst *bi = cast<BranchInst>(i);
+ if (bi->isUnconditional()) {
+ transferToBasicBlock(bi->getSuccessor(0), bi->getParent(), state);
+ } else {
+ // FIXME: Find a way that we don't have this hidden dependency.
+ assert(bi->getCondition() == bi->getOperand(0) &&
+ "Wrong operand index!");
+ ref<Expr> cond = eval(ki, 0, state);
+ Executor::StatePair branches = fork(state, cond, false);
+
+ if (WriteTraces) {
+ bool isTwoWay = (branches.first && branches.second);
+
+ if (branches.first) {
+ branches.first->exeTraceMgr.addEvent(
+ new BranchTraceEvent(state, ki, true, isTwoWay));
+ }
+
+ if (branches.second) {
+ branches.second->exeTraceMgr.addEvent(
+ new BranchTraceEvent(state, ki, false, isTwoWay));
+ }
+ }
+
+ // NOTE: There is a hidden dependency here, markBranchVisited
+ // requires that we still be in the context of the branch
+ // instruction (it reuses its statistic id). Should be cleaned
+ // up with convenient instruction specific data.
+ if (statsTracker && state.stack.back().kf->trackCoverage)
+ statsTracker->markBranchVisited(branches.first, branches.second);
+
+ if (branches.first)
+ transferToBasicBlock(bi->getSuccessor(0), bi->getParent(), *branches.first);
+ if (branches.second)
+ transferToBasicBlock(bi->getSuccessor(1), bi->getParent(), *branches.second);
+ }
+ break;
+ }
+ case Instruction::Switch: {
+ SwitchInst *si = cast<SwitchInst>(i);
+ ref<Expr> cond = eval(ki, 0, state);
+ unsigned cases = si->getNumCases();
+ BasicBlock *bb = si->getParent();
+
+ cond = toUnique(state, cond);
+ if (cond.isConstant()) {
+ // Somewhat gross to create these all the time, but fine till we
+ // switch to an internal rep.
+ ConstantInt *ci = ConstantInt::get(si->getCondition()->getType(),
+ cond.getConstantValue());
+ unsigned index = si->findCaseValue(ci);
+ transferToBasicBlock(si->getSuccessor(index), si->getParent(), state);
+ } else {
+ std::map<BasicBlock*, ref<Expr> > targets;
+ ref<Expr> isDefault(1,Expr::Bool);
+ for (unsigned i=1; i<cases; ++i) {
+ ref<Expr> value = evalConstant(si->getCaseValue(i));
+ ref<Expr> match = EqExpr::create(cond, value);
+ isDefault = AndExpr::create(isDefault, Expr::createNot(match));
+ bool result;
+ bool success = solver->mayBeTrue(state, match, result);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (result) {
+ std::map<BasicBlock*, ref<Expr> >::iterator it =
+ targets.insert(std::make_pair(si->getSuccessor(i),
+ ref<Expr>(0,Expr::Bool))).first;
+ it->second = OrExpr::create(match, it->second);
+ }
+ }
+ bool res;
+ bool success = solver->mayBeTrue(state, isDefault, res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res)
+ targets.insert(std::make_pair(si->getSuccessor(0), isDefault));
+
+ std::vector< ref<Expr> > conditions;
+ for (std::map<BasicBlock*, ref<Expr> >::iterator it =
+ targets.begin(), ie = targets.end();
+ it != ie; ++it)
+ conditions.push_back(it->second);
+
+ std::vector<ExecutionState*> branches;
+ branch(state, conditions, branches);
+
+ std::vector<ExecutionState*>::iterator bit = branches.begin();
+ for (std::map<BasicBlock*, ref<Expr> >::iterator it =
+ targets.begin(), ie = targets.end();
+ it != ie; ++it) {
+ ExecutionState *es = *bit;
+ if (es)
+ transferToBasicBlock(it->first, bb, *es);
+ ++bit;
+ }
+ }
+ break;
+ }
+ case Instruction::Unreachable:
+ // Note that this is not necessarily an internal bug, llvm will
+ // generate unreachable instructions in cases where it knows the
+ // program will crash. So it is effectively a SEGV or internal
+ // error.
+ terminateStateOnExecError(state, "reached \"unreachable\" instruction");
+ break;
+
+ case Instruction::Invoke:
+ case Instruction::Call: {
+ CallSite cs;
+ unsigned argStart;
+ if (i->getOpcode()==Instruction::Call) {
+ cs = CallSite(cast<CallInst>(i));
+ argStart = 1;
+ } else {
+ cs = CallSite(cast<InvokeInst>(i));
+ argStart = 3;
+ }
+
+ unsigned numArgs = cs.arg_size();
+ Function *f = getCalledFunction(cs, state);
+
+ // evaluate arguments
+ std::vector< ref<Expr> > arguments;
+ arguments.reserve(numArgs);
+
+ for (unsigned j=0; j<numArgs; ++j)
+ arguments.push_back(eval(ki, argStart+j, state));
+
+ if (!f) {
+ // special case the call with a bitcast case
+ Value *fp = cs.getCalledValue();
+ llvm::ConstantExpr *ce = dyn_cast<llvm::ConstantExpr>(fp);
+
+ if (ce && ce->getOpcode()==Instruction::BitCast) {
+ f = dyn_cast<Function>(ce->getOperand(0));
+ assert(f && "XXX unrecognized constant expression in call");
+ const FunctionType *fType =
+ dyn_cast<FunctionType>(cast<PointerType>(f->getType())->getElementType());
+ const FunctionType *ceType =
+ dyn_cast<FunctionType>(cast<PointerType>(ce->getType())->getElementType());
+ assert(fType && ceType && "unable to get function type");
+
+ // XXX check result coercion
+
+ // XXX this really needs thought and validation
+ unsigned i=0;
+ for (std::vector< ref<Expr> >::iterator
+ ai = arguments.begin(), ie = arguments.end();
+ ai != ie; ++ai) {
+ Expr::Width to, from = (*ai).getWidth();
+
+ if (i<fType->getNumParams()) {
+ to = Expr::getWidthForLLVMType(fType->getParamType(i));
+
+ if (from != to) {
+ // XXX need to check other param attrs ?
+ if (cs.paramHasAttr(i+1, llvm::Attribute::SExt)) {
+ arguments[i] = SExtExpr::create(arguments[i], to);
+ } else {
+ arguments[i] = ZExtExpr::create(arguments[i], to);
+ }
+ }
+ }
+
+ i++;
+ }
+ } else if (isa<InlineAsm>(fp)) {
+ terminateStateOnExecError(state, "inline assembly is unsupported");
+ break;
+ }
+ }
+
+ if (f) {
+ executeCall(state, ki, f, arguments);
+ } else {
+ ref<Expr> v = eval(ki, 0, state);
+
+ ExecutionState *free = &state;
+ bool hasInvalid = false, first = true;
+
+ /* XXX This is wasteful, no need to do a full evaluate since we
+ have already got a value. But in the end the caches should
+ handle it for us, albeit with some overhead. */
+ do {
+ ref<Expr> value;
+ bool success = solver->getValue(*free, v, value);
+ assert(success && "FIXME: Unhandled solver failure");
+ StatePair res = fork(*free, EqExpr::create(v, value), true);
+ if (res.first) {
+ void *addr = (void*) (unsigned long) value.getConstantValue();
+ std::set<void*>::iterator it = legalFunctions.find(addr);
+ if (it != legalFunctions.end()) {
+ f = (Function*) addr;
+
+ // Don't give warning on unique resolution
+ if (res.second || !first)
+ klee_warning_once(addr,
+ "resolved symbolic function pointer to: %s",
+ f->getName().c_str());
+
+ executeCall(*res.first, ki, f, arguments);
+ } else {
+ if (!hasInvalid) {
+ terminateStateOnExecError(state, "invalid function pointer");
+ hasInvalid = true;
+ }
+ }
+ }
+
+ first = false;
+ free = res.second;
+ } while (free);
+ }
+ break;
+ }
+ case Instruction::PHI: {
+ ref<Expr> result = eval(ki, state.incomingBBIndex * 2, state);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ // Special instructions
+ case Instruction::Select: {
+ SelectInst *SI = cast<SelectInst>(ki->inst);
+ assert(SI->getCondition() == SI->getOperand(0) &&
+ "Wrong operand index!");
+ ref<Expr> cond = eval(ki, 0, state);
+ ref<Expr> tExpr = eval(ki, 1, state);
+ ref<Expr> fExpr = eval(ki, 2, state);
+ ref<Expr> result = SelectExpr::create(cond, tExpr, fExpr);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::VAArg:
+ terminateStateOnExecError(state, "unexpected VAArg instruction");
+ break;
+
+ // Arithmetic / logical
+#define FP_CONSTANT_BINOP(op, type, l, r, target, state) \
+ bindLocal(target, state, \
+ ref<Expr>(op(toConstant(state, l, "floating point").getConstantValue(), \
+ toConstant(state, r, "floating point").getConstantValue(), \
+ type), type))
+ case Instruction::Add: {
+ BinaryOperator *bi = cast<BinaryOperator>(i);
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+
+ if( bi->getType()->getTypeID() == llvm::Type::IntegerTyID ) {
+ bindLocal(ki, state, AddExpr::create(left, right));
+ } else {
+ Expr::Width type = Expr::getWidthForLLVMType(bi->getType());
+ FP_CONSTANT_BINOP(floats::add, type, left, right, ki, state);
+ }
+
+ break;
+ }
+
+ case Instruction::Sub: {
+ BinaryOperator *bi = cast<BinaryOperator>(i);
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+
+ if( bi->getType()->getTypeID() == llvm::Type::IntegerTyID ) {
+ bindLocal(ki, state, SubExpr::create(left, right));
+ } else {
+ Expr::Width type = Expr::getWidthForLLVMType(bi->getType());
+ FP_CONSTANT_BINOP(floats::sub, type, left, right, ki, state);
+ }
+
+ break;
+ }
+
+ case Instruction::Mul: {
+ BinaryOperator *bi = cast<BinaryOperator>(i);
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+
+ if( bi->getType()->getTypeID() == llvm::Type::IntegerTyID ) {
+ bindLocal(ki, state, MulExpr::create(left, right));
+ } else {
+ Expr::Width type = Expr::getWidthForLLVMType(bi->getType());
+ FP_CONSTANT_BINOP(floats::mul, type, left, right, ki, state);
+ }
+
+ break;
+ }
+
+ case Instruction::UDiv: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = UDivExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::SDiv: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = SDivExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::URem: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = URemExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::SRem: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = SRemExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::And: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = AndExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::Or: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = OrExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::Xor: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = XorExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::Shl: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = ShlExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::LShr: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = LShrExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::AShr: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = AShrExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ // Compare
+
+ case Instruction::ICmp: {
+ CmpInst *ci = cast<CmpInst>(i);
+ ICmpInst *ii = cast<ICmpInst>(ci);
+
+ switch(ii->getPredicate()) {
+ case ICmpInst::ICMP_EQ: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = EqExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_NE: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = NeExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_UGT: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = UgtExpr::create(left, right);
+ bindLocal(ki, state,result);
+ break;
+ }
+
+ case ICmpInst::ICMP_UGE: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = UgeExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_ULT: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = UltExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_ULE: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = UleExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_SGT: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = SgtExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_SGE: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = SgeExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_SLT: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = SltExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case ICmpInst::ICMP_SLE: {
+ ref<Expr> left = eval(ki, 0, state);
+ ref<Expr> right = eval(ki, 1, state);
+ ref<Expr> result = SleExpr::create(left, right);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ default:
+ terminateStateOnExecError(state, "invalid ICmp predicate");
+ }
+ break;
+ }
+
+ // Memory instructions...
+ case Instruction::Alloca:
+ case Instruction::Malloc: {
+ AllocationInst *ai = cast<AllocationInst>(i);
+ unsigned elementSize =
+ kmodule->targetData->getTypeStoreSize(ai->getAllocatedType());
+ ref<Expr> size = Expr::createPointer(elementSize);
+ if (ai->isArrayAllocation()) {
+ // XXX coerce?
+ ref<Expr> count = eval(ki, 0, state);
+ size = MulExpr::create(count, size);
+ }
+ bool isLocal = i->getOpcode()==Instruction::Alloca;
+ executeAlloc(state, size, isLocal, ki);
+ break;
+ }
+ case Instruction::Free: {
+ executeFree(state, eval(ki, 0, state));
+ break;
+ }
+
+ case Instruction::Load: {
+ ref<Expr> base = eval(ki, 0, state);
+ executeMemoryOperation(state, false, base, 0, ki);
+ break;
+ }
+ case Instruction::Store: {
+ ref<Expr> base = eval(ki, 1, state);
+ ref<Expr> value = eval(ki, 0, state);
+ executeMemoryOperation(state, true, base, value, 0);
+ break;
+ }
+
+ case Instruction::GetElementPtr: {
+ KGEPInstruction *kgepi = static_cast<KGEPInstruction*>(ki);
+ ref<Expr> base = eval(ki, 0, state);
+
+ for (std::vector< std::pair<unsigned, unsigned> >::iterator
+ it = kgepi->indices.begin(), ie = kgepi->indices.end();
+ it != ie; ++it) {
+ unsigned elementSize = it->second;
+ ref<Expr> index = eval(ki, it->first, state);
+ base = AddExpr::create(base,
+ MulExpr::create(Expr::createCoerceToPointerType(index),
+ Expr::createPointer(elementSize)));
+ }
+ if (kgepi->offset)
+ base = AddExpr::create(base,
+ Expr::createPointer(kgepi->offset));
+ bindLocal(ki, state, base);
+ break;
+ }
+
+ // Conversion
+ case Instruction::Trunc: {
+ CastInst *ci = cast<CastInst>(i);
+ ref<Expr> result = ExtractExpr::createByteOff(eval(ki, 0, state),
+ 0,
+ Expr::getWidthForLLVMType(ci->getType()));
+ bindLocal(ki, state, result);
+ break;
+ }
+ case Instruction::ZExt: {
+ CastInst *ci = cast<CastInst>(i);
+ ref<Expr> result = ZExtExpr::create(eval(ki, 0, state),
+ Expr::getWidthForLLVMType(ci->getType()));
+ bindLocal(ki, state, result);
+ break;
+ }
+ case Instruction::SExt: {
+ CastInst *ci = cast<CastInst>(i);
+ ref<Expr> result = SExtExpr::create(eval(ki, 0, state),
+ Expr::getWidthForLLVMType(ci->getType()));
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::IntToPtr: {
+ CastInst *ci = cast<CastInst>(i);
+ Expr::Width pType = Expr::getWidthForLLVMType(ci->getType());
+ ref<Expr> arg = eval(ki, 0, state);
+ bindLocal(ki, state, ZExtExpr::create(arg, pType));
+ break;
+ }
+ case Instruction::PtrToInt: {
+ CastInst *ci = cast<CastInst>(i);
+ Expr::Width iType = Expr::getWidthForLLVMType(ci->getType());
+ ref<Expr> arg = eval(ki, 0, state);
+ bindLocal(ki, state, ZExtExpr::create(arg, iType));
+ break;
+ }
+
+ case Instruction::BitCast: {
+ ref<Expr> result = eval(ki, 0, state);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ // Floating Point specific instructions
+ case Instruction::FPTrunc: {
+ FPTruncInst *fi = cast<FPTruncInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> arg = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ uint64_t value = floats::trunc(arg.getConstantValue(),
+ resultType,
+ arg.getWidth());
+ ref<Expr> result(value, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::FPExt: {
+ FPExtInst *fi = cast<FPExtInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> arg = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ uint64_t value = floats::ext(arg.getConstantValue(),
+ resultType,
+ arg.getWidth());
+ ref<Expr> result(value, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::FPToUI: {
+ FPToUIInst *fi = cast<FPToUIInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> arg = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ uint64_t value = floats::toUnsignedInt(arg.getConstantValue(),
+ resultType,
+ arg.getWidth());
+ ref<Expr> result(value, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::FPToSI: {
+ FPToSIInst *fi = cast<FPToSIInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> arg = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ uint64_t value = floats::toSignedInt(arg.getConstantValue(),
+ resultType,
+ arg.getWidth());
+ ref<Expr> result(value, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::UIToFP: {
+ UIToFPInst *fi = cast<UIToFPInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> arg = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ uint64_t value = floats::UnsignedIntToFP(arg.getConstantValue(),
+ resultType);
+ ref<Expr> result(value, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::SIToFP: {
+ SIToFPInst *fi = cast<SIToFPInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> arg = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ uint64_t value = floats::SignedIntToFP(arg.getConstantValue(),
+ resultType,
+ arg.getWidth());
+ ref<Expr> result(value, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::FCmp: {
+ FCmpInst *fi = cast<FCmpInst>(i);
+ Expr::Width resultType = Expr::getWidthForLLVMType(fi->getType());
+ ref<Expr> left = toConstant(state, eval(ki, 0, state),
+ "floating point");
+ ref<Expr> right = toConstant(state, eval(ki, 1, state),
+ "floating point");
+ uint64_t leftVal = left.getConstantValue();
+ uint64_t rightVal = right.getConstantValue();
+
+ //determine whether the operands are NANs
+ unsigned inWidth = left.getWidth();
+ bool leftIsNaN = floats::isNaN( leftVal, inWidth );
+ bool rightIsNaN = floats::isNaN( rightVal, inWidth );
+
+ //handle NAN based on whether the predicate is "ordered" or "unordered"
+ uint64_t ret = (uint64_t)-1;
+ bool done = false;
+ switch( fi->getPredicate() ) {
+ //predicates which only care about whether or not the operands are NaNs
+ case FCmpInst::FCMP_ORD:
+ done = true;
+ ret = !leftIsNaN && !rightIsNaN;
+ break;
+
+ case FCmpInst::FCMP_UNO:
+ done = true;
+ ret = leftIsNaN || rightIsNaN;
+ break;
+
+ //ordered comparisons return false if either operand is NaN
+ case FCmpInst::FCMP_OEQ:
+ case FCmpInst::FCMP_OGT:
+ case FCmpInst::FCMP_OGE:
+ case FCmpInst::FCMP_OLT:
+ case FCmpInst::FCMP_OLE:
+ case FCmpInst::FCMP_ONE:
+ if( !leftIsNaN && !rightIsNaN) //only fall through and return false if there are NaN(s)
+ break;
+
+ case FCmpInst::FCMP_FALSE: { //always return false for this predicate
+ done = true;
+ ret = false;
+ break;
+ }
+
+ //unordered comparisons return true if either operand is NaN
+ case FCmpInst::FCMP_UEQ:
+ case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_UGE:
+ case FCmpInst::FCMP_ULT:
+ case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_UNE:
+ if( !leftIsNaN && !rightIsNaN) //only fall through and return true if there are NaN(s)
+ break;
+
+ case FCmpInst::FCMP_TRUE: //always return true for this predicate
+ done = true;
+ ret = true;
+
+ default:
+ case FCmpInst::BAD_FCMP_PREDICATE: /* will fall through and trigger fatal in the next switch */
+ break;
+ }
+
+ //if not done, then we need to actually do a comparison to get the result
+ if( !done ) {
+ switch( fi->getPredicate() ) {
+ //ordered comparisons return false if either operand is NaN
+ case FCmpInst::FCMP_OEQ:
+ case FCmpInst::FCMP_UEQ:
+ ret = floats::eq( leftVal, rightVal, inWidth );
+ break;
+
+ case FCmpInst::FCMP_OGT:
+ case FCmpInst::FCMP_UGT:
+ ret = floats::gt( leftVal, rightVal, inWidth );
+ break;
+
+ case FCmpInst::FCMP_OGE:
+ case FCmpInst::FCMP_UGE:
+ ret = floats::ge( leftVal, rightVal, inWidth );
+ break;
+
+ case FCmpInst::FCMP_OLT:
+ case FCmpInst::FCMP_ULT:
+ ret = floats::lt( leftVal, rightVal, inWidth );
+ break;
+
+ case FCmpInst::FCMP_OLE:
+ case FCmpInst::FCMP_ULE:
+ ret = floats::le( leftVal, rightVal, inWidth );
+ break;
+
+ case FCmpInst::FCMP_ONE:
+ case FCmpInst::FCMP_UNE:
+ ret = floats::ne( leftVal, rightVal, inWidth );
+ break;
+
+ default:
+ terminateStateOnExecError(state, "invalid FCmp predicate");
+ }
+ }
+
+ ref<Expr> result(ret, resultType);
+ bindLocal(ki, state, result);
+ break;
+ }
+
+ case Instruction::FDiv: {
+ BinaryOperator *bi = cast<BinaryOperator>(i);
+
+ ref<Expr> dividend = eval(ki, 0, state);
+ ref<Expr> divisor = eval(ki, 1, state);
+ Expr::Width type = Expr::getWidthForLLVMType(bi->getType());
+ FP_CONSTANT_BINOP(floats::div, type, dividend, divisor, ki, state);
+ break;
+ }
+
+ case Instruction::FRem: {
+ BinaryOperator *bi = cast<BinaryOperator>(i);
+
+ ref<Expr> dividend = eval(ki, 0, state);
+ ref<Expr> divisor = eval(ki, 1, state);
+ Expr::Width type = Expr::getWidthForLLVMType(bi->getType());
+ FP_CONSTANT_BINOP(floats::mod, type, dividend, divisor, ki, state);
+ break;
+ }
+
+
+ // Other instructions...
+ // Unhandled
+ case Instruction::ExtractElement:
+ case Instruction::InsertElement:
+ case Instruction::ShuffleVector:
+ terminateStateOnError(state, "XXX vector instructions unhandled",
+ "xxx.err");
+ break;
+
+ default:
+ terminateStateOnExecError(state, "invalid instruction");
+ break;
+ }
+}
+
+void Executor::updateStates(ExecutionState *current) {
+ if (searcher) {
+ searcher->update(current, addedStates, removedStates);
+ }
+
+ states.insert(addedStates.begin(), addedStates.end());
+ addedStates.clear();
+
+ for (std::set<ExecutionState*>::iterator
+ it = removedStates.begin(), ie = removedStates.end();
+ it != ie; ++it) {
+ ExecutionState *es = *it;
+ std::set<ExecutionState*>::iterator it2 = states.find(es);
+ assert(it2!=states.end());
+ states.erase(it2);
+ std::map<ExecutionState*, std::vector<SeedInfo> >::iterator it3 =
+ seedMap.find(es);
+ if (it3 != seedMap.end())
+ seedMap.erase(it3);
+ processTree->remove(es->ptreeNode);
+ delete es;
+ }
+ removedStates.clear();
+}
+
+void Executor::bindInstructionConstants(KInstruction *KI) {
+ GetElementPtrInst *gepi = dyn_cast<GetElementPtrInst>(KI->inst);
+ if (!gepi)
+ return;
+
+ KGEPInstruction *kgepi = static_cast<KGEPInstruction*>(KI);
+ ref<Expr> constantOffset = Expr::createPointer(0);
+ unsigned index = 1;
+ for (gep_type_iterator ii = gep_type_begin(gepi), ie = gep_type_end(gepi);
+ ii != ie; ++ii) {
+ if (const StructType *st = dyn_cast<StructType>(*ii)) {
+ const StructLayout *sl =
+ kmodule->targetData->getStructLayout(st);
+ const ConstantInt *ci = cast<ConstantInt>(ii.getOperand());
+ ref<Expr> addend = Expr::createPointer(sl->getElementOffset((unsigned)
+ ci->getZExtValue()));
+ constantOffset = AddExpr::create(constantOffset, addend);
+ } else {
+ const SequentialType *st = cast<SequentialType>(*ii);
+ unsigned elementSize =
+ kmodule->targetData->getTypeStoreSize(st->getElementType());
+ Value *operand = ii.getOperand();
+ if (Constant *c = dyn_cast<Constant>(operand)) {
+ ref<Expr> index = evalConstant(c);
+ ref<Expr> addend = MulExpr::create(Expr::createCoerceToPointerType(index),
+ Expr::createPointer(elementSize));
+ constantOffset = AddExpr::create(constantOffset, addend);
+ } else {
+ kgepi->indices.push_back(std::make_pair(index, elementSize));
+ }
+ }
+ index++;
+ }
+ assert(constantOffset.isConstant());
+ kgepi->offset = constantOffset.getConstantValue();
+}
+
+void Executor::bindModuleConstants() {
+ for (std::vector<KFunction*>::iterator it = kmodule->functions.begin(),
+ ie = kmodule->functions.end(); it != ie; ++it) {
+ KFunction *kf = *it;
+ for (unsigned i=0; i<kf->numInstructions; ++i)
+ bindInstructionConstants(kf->instructions[i]);
+ }
+
+ kmodule->constantTable = new Cell[kmodule->constants.size()];
+ for (unsigned i=0; i<kmodule->constants.size(); ++i) {
+ Cell &c = kmodule->constantTable[i];
+ c.value = evalConstant(kmodule->constants[i]);
+ }
+}
+
+void Executor::run(ExecutionState &initialState) {
+ bindModuleConstants();
+
+ // Delay init till now so that ticks don't accrue during
+ // optimization and such.
+ initTimers();
+
+ states.insert(&initialState);
+
+ if (usingSeeds) {
+ std::vector<SeedInfo> &v = seedMap[&initialState];
+
+ for (std::vector<BOut*>::const_iterator it = usingSeeds->begin(),
+ ie = usingSeeds->end(); it != ie; ++it)
+ v.push_back(SeedInfo(*it));
+
+ int lastNumSeeds = usingSeeds->size()+10;
+ double lastTime, startTime = lastTime = util::getWallTime();
+ ExecutionState *lastState = 0;
+ while (!seedMap.empty()) {
+ if (haltExecution) goto dump;
+
+ std::map<ExecutionState*, std::vector<SeedInfo> >::iterator it =
+ seedMap.upper_bound(lastState);
+ if (it == seedMap.end())
+ it = seedMap.begin();
+ lastState = it->first;
+ unsigned numSeeds = it->second.size();
+ ExecutionState &state = *lastState;
+ KInstruction *ki = state.pc;
+ stepInstruction(state);
+
+ executeInstruction(state, ki);
+ processTimers(&state, MaxInstructionTime * numSeeds);
+ updateStates(&state);
+
+ if ((stats::instructions % 1000) == 0) {
+ int numSeeds = 0, numStates = 0;
+ for (std::map<ExecutionState*, std::vector<SeedInfo> >::iterator
+ it = seedMap.begin(), ie = seedMap.end();
+ it != ie; ++it) {
+ numSeeds += it->second.size();
+ numStates++;
+ }
+ double time = util::getWallTime();
+ if (SeedTime>0. && time > startTime + SeedTime) {
+ klee_warning("seed time expired, %d seeds remain over %d states",
+ numSeeds, numStates);
+ break;
+ } else if (numSeeds<=lastNumSeeds-10 ||
+ time >= lastTime+10) {
+ lastTime = time;
+ lastNumSeeds = numSeeds;
+ klee_message("%d seeds remaining over: %d states",
+ numSeeds, numStates);
+ }
+ }
+ }
+
+ klee_message("seeding done (%d states remain)", (int) states.size());
+
+ // XXX total hack, just because I like non uniform better but want
+ // seed results to be equally weighted.
+ for (std::set<ExecutionState*>::iterator
+ it = states.begin(), ie = states.end();
+ it != ie; ++it) {
+ (*it)->weight = 1.;
+ }
+
+ if (OnlySeed)
+ goto dump;
+ }
+
+ searcher = constructUserSearcher(*this);
+
+ searcher->update(0, states, std::set<ExecutionState*>());
+
+ while (!states.empty() && !haltExecution) {
+ ExecutionState &state = searcher->selectState();
+ KInstruction *ki = state.pc;
+ stepInstruction(state);
+
+ executeInstruction(state, ki);
+ processTimers(&state, MaxInstructionTime);
+
+ if (MaxMemory) {
+ if ((stats::instructions & 0xFFFF) == 0) {
+ // We need to avoid calling GetMallocUsage() often because it
+ // is O(elts on freelist). This is really bad since we start
+ // to pummel the freelist once we hit the memory cap.
+ unsigned mbs = sys::Process::GetTotalMemoryUsage() >> 20;
+
+ if (mbs > MaxMemory) {
+ if (mbs > MaxMemory + 100) {
+ // just guess at how many to kill
+ unsigned numStates = states.size();
+ unsigned toKill = std::max(1U, numStates - numStates*MaxMemory/mbs);
+
+ if (MaxMemoryInhibit)
+ klee_warning("killing %d states (over memory cap)",
+ toKill);
+
+ std::vector<ExecutionState*> arr(states.begin(), states.end());
+ for (unsigned i=0,N=arr.size(); N && i<toKill; ++i,--N) {
+ unsigned idx = rand() % N;
+
+ // Make two pulls to try and not hit a state that
+ // covered new code.
+ if (arr[idx]->coveredNew)
+ idx = rand() % N;
+
+ std::swap(arr[idx], arr[N-1]);
+ terminateStateEarly(*arr[N-1], "memory limit");
+ }
+ }
+ atMemoryLimit = true;
+ } else {
+ atMemoryLimit = false;
+ }
+ }
+ }
+
+ updateStates(&state);
+ }
+
+ delete searcher;
+ searcher = 0;
+
+ dump:
+ if (DumpStatesOnHalt && !states.empty()) {
+ llvm::cerr << "KLEE: halting execution, dumping remaining states\n";
+ for (std::set<ExecutionState*>::iterator
+ it = states.begin(), ie = states.end();
+ it != ie; ++it) {
+ ExecutionState &state = **it;
+ stepInstruction(state); // keep stats rolling
+ terminateStateEarly(state, "execution halting");
+ }
+ updateStates(0);
+ }
+}
+
+std::string Executor::getAddressInfo(ExecutionState &state,
+ ref<Expr> address) const{
+ std::ostringstream info;
+ info << "\taddress: " << address << "\n";
+ uint64_t example;
+ if (address.isConstant()) {
+ example = address.getConstantValue();
+ } else {
+ ref<Expr> value;
+ bool success = solver->getValue(state, address, value);
+ assert(success && "FIXME: Unhandled solver failure");
+ example = value.getConstantValue();
+ info << "\texample: " << example << "\n";
+ std::pair< ref<Expr>, ref<Expr> > res = solver->getRange(state, address);
+ info << "\trange: [" << res.first << ", " << res.second <<"]\n";
+ }
+
+ MemoryObject hack((unsigned) example);
+ MemoryMap::iterator lower = state.addressSpace.objects.upper_bound(&hack);
+ info << "\tnext: ";
+ if (lower==state.addressSpace.objects.end()) {
+ info << "none\n";
+ } else {
+ const MemoryObject *mo = lower->first;
+ info << "object at " << mo->address
+ << " of size " << mo->size << "\n";
+ }
+ if (lower!=state.addressSpace.objects.begin()) {
+ --lower;
+ info << "\tprev: ";
+ if (lower==state.addressSpace.objects.end()) {
+ info << "none\n";
+ } else {
+ const MemoryObject *mo = lower->first;
+ info << "object at " << mo->address
+ << " of size " << mo->size << "\n";
+ }
+ }
+
+ return info.str();
+}
+
+void Executor::terminateState(ExecutionState &state) {
+ if (replayOut && replayPosition!=replayOut->numObjects) {
+ klee_warning_once(replayOut, "replay did not consume all objects in .bout input.");
+ }
+
+ interpreterHandler->incPathsExplored();
+
+ std::set<ExecutionState*>::iterator it = addedStates.find(&state);
+ if (it==addedStates.end()) {
+ state.pc = state.prevPC;
+
+ removedStates.insert(&state);
+ } else {
+ // never reached searcher, just delete immediately
+ std::map< ExecutionState*, std::vector<SeedInfo> >::iterator it3 =
+ seedMap.find(&state);
+ if (it3 != seedMap.end())
+ seedMap.erase(it3);
+ addedStates.erase(it);
+ processTree->remove(state.ptreeNode);
+ delete &state;
+ }
+}
+
+void Executor::terminateStateEarly(ExecutionState &state, std::string message) {
+ if (!OnlyOutputStatesCoveringNew || state.coveredNew ||
+ (AlwaysOutputSeeds && seedMap.count(&state)))
+ interpreterHandler->processTestCase(state, (message + "\n").c_str(), "early");
+ terminateState(state);
+}
+
+void Executor::terminateStateOnExit(ExecutionState &state) {
+ if (!OnlyOutputStatesCoveringNew || state.coveredNew ||
+ (AlwaysOutputSeeds && seedMap.count(&state)))
+ interpreterHandler->processTestCase(state, 0, 0);
+ terminateState(state);
+}
+
+void Executor::terminateStateOnError(ExecutionState &state,
+ const std::string &message,
+ const std::string &suffix,
+ const std::string &info) {
+ static std::set< std::pair<Instruction*, std::string> > emittedErrors;
+ const InstructionInfo &ii = *state.prevPC->info;
+
+ if (EmitAllErrors ||
+ emittedErrors.insert(std::make_pair(state.prevPC->inst,message)).second) {
+ if (ii.file != "") {
+ klee_message("ERROR: %s:%d: %s", ii.file.c_str(), ii.line, message.c_str());
+ } else {
+ klee_message("ERROR: %s", message.c_str());
+ }
+ if (!EmitAllErrors)
+ klee_message("NOTE: now ignoring this error at this location");
+
+ std::ostringstream msg;
+ msg << "Error: " << message << "\n";
+ if (ii.file != "") {
+ msg << "File: " << ii.file << "\n";
+ msg << "Line: " << ii.line << "\n";
+ }
+ msg << "Stack: \n";
+ unsigned idx = 0;
+ const KInstruction *target = state.prevPC;
+ for (ExecutionState::stack_ty::reverse_iterator
+ it = state.stack.rbegin(), ie = state.stack.rend();
+ it != ie; ++it) {
+ StackFrame &sf = *it;
+ Function *f = sf.kf->function;
+ const InstructionInfo &ii = *target->info;
+ msg << "\t#" << idx++
+ << " " << std::setw(8) << std::setfill('0') << ii.assemblyLine
+ << " in " << f->getName() << " (";
+ // Yawn, we could go up and print varargs if we wanted to.
+ unsigned index = 0;
+ for (Function::arg_iterator ai = f->arg_begin(), ae = f->arg_end();
+ ai != ae; ++ai) {
+ if (ai!=f->arg_begin()) msg << ", ";
+
+ msg << ai->getName();
+ // XXX should go through function
+ ref<Expr> value = sf.locals[sf.kf->getArgRegister(index++)].value;
+ if (value.isConstant())
+ msg << "=" << value;
+ }
+ msg << ")";
+ if (ii.file != "")
+ msg << " at " << ii.file << ":" << ii.line;
+ msg << "\n";
+ target = sf.caller;
+ }
+
+ if (info != "")
+ msg << "Info: \n" << info;
+ interpreterHandler->processTestCase(state, msg.str().c_str(), suffix.c_str());
+ }
+
+ terminateState(state);
+}
+
+// XXX shoot me
+static const char *okExternalsList[] = { "printf",
+ "fprintf",
+ "puts",
+ "getpid" };
+static std::set<std::string> okExternals(okExternalsList,
+ okExternalsList +
+ (sizeof(okExternalsList)/sizeof(okExternalsList[0])));
+
+void Executor::callExternalFunction(ExecutionState &state,
+ KInstruction *target,
+ Function *function,
+ std::vector< ref<Expr> > &arguments) {
+ // check if specialFunctionHandler wants it
+ if (specialFunctionHandler->handle(state, function, target, arguments))
+ return;
+
+ if (NoExternals && !okExternals.count(function->getName())) {
+ llvm::cerr << "KLEE:ERROR: Calling not-OK external function : " << function->getName() << "\n";
+ terminateStateOnError(state, "externals disallowed", "user.err");
+ return;
+ }
+
+ // normal external function handling path
+ uint64_t *args = (uint64_t*) alloca(sizeof(*args) * (arguments.size() + 1));
+ memset(args, 0, sizeof(*args) * (arguments.size() + 1));
+
+ unsigned i = 1;
+ for (std::vector<ref<Expr> >::iterator ai = arguments.begin(), ae = arguments.end();
+ ai!=ae; ++ai, ++i) {
+ if (AllowExternalSymCalls) { // don't bother checking uniqueness
+ ref<Expr> ce;
+ bool success = solver->getValue(state, *ai, ce);
+ assert(success && "FIXME: Unhandled solver failure");
+ static_cast<ConstantExpr*>(ce.get())->toMemory((void*) &args[i]);
+ } else {
+ ref<Expr> arg = toUnique(state, *ai);
+ if (arg.isConstant()) {
+ // XXX kick toMemory functions from here
+ static_cast<ConstantExpr*>(arg.get())->toMemory((void*) &args[i]);
+ } else {
+ std::string msg = "external call with symbolic argument: " + function->getName();
+ terminateStateOnExecError(state, msg);
+ return;
+ }
+ }
+ }
+
+ state.addressSpace.copyOutConcretes();
+
+ if (!SuppressExternalWarnings) {
+ std::ostringstream os;
+ os << "calling external: " << function->getName().c_str() << "(";
+ for (unsigned i=0; i<arguments.size(); i++) {
+ os << arguments[i];
+ if (i != arguments.size()-1)
+ os << ", ";
+ }
+ os << ")";
+
+ if (AllExternalWarnings)
+ klee_warning("%s", os.str().c_str());
+ else
+ klee_warning_once(function, "%s", os.str().c_str());
+ }
+
+ bool success = externalDispatcher->executeCall(function, target->inst, args);
+ if (!success) {
+ terminateStateOnError(state, "failed external call: " + function->getName(), "external.err");
+ return;
+ }
+
+ if (!state.addressSpace.copyInConcretes()) {
+ terminateStateOnError(state, "external modified read-only object", "external.err");
+ return;
+ }
+
+ const Type *resultType = target->inst->getType();
+ if (resultType != Type::VoidTy) {
+ ref<Expr> e = ConstantExpr::fromMemory((void*) args,
+ Expr::getWidthForLLVMType(resultType));
+ bindLocal(target, state, e);
+ }
+}
+
+/***/
+
+ref<Expr> Executor::replaceReadWithSymbolic(ExecutionState &state,
+ ref<Expr> e) {
+ unsigned n = interpreterOpts.MakeConcreteSymbolic;
+ if (!n || replayOut || replayPath)
+ return e;
+
+ // right now, we don't replace symbolics (is there any reason too?)
+ if (!e.isConstant())
+ return e;
+
+ if (n != 1 && random() % n)
+ return e;
+
+ // create a new fresh location, assert it is equal to concrete value in e
+ // and return it.
+
+ const MemoryObject *mo = memory->allocate(Expr::getMinBytesForWidth(e.getWidth()),
+ false, false,
+ state.prevPC->inst);
+ assert(mo && "out of memory");
+ ref<Expr> res = Expr::createTempRead(mo->array, e.getWidth());
+ ref<Expr> eq = NotOptimizedExpr::create(EqExpr::create(e, res));
+ llvm::cerr << "Making symbolic: " << eq << "\n";
+ state.addConstraint(eq);
+ return res;
+}
+
+ObjectState *Executor::bindObjectInState(ExecutionState &state, const MemoryObject *mo,
+ bool isLocal) {
+ ObjectState *os = new ObjectState(mo, mo->size);
+ state.addressSpace.bindObject(mo, os);
+
+ // Its possible that multiple bindings of the same mo in the state
+ // will put multiple copies on this list, but it doesn't really
+ // matter because all we use this list for is to unbind the object
+ // on function return.
+ if (isLocal)
+ state.stack.back().allocas.push_back(mo);
+
+ return os;
+}
+
+void Executor::executeAllocN(ExecutionState &state,
+ uint64_t nelems,
+ uint64_t size,
+ uint64_t alignment,
+ bool isLocal,
+ KInstruction *target) {
+#if 0
+ // over-allocate so that we can properly align the whole buffer
+ uint64_t address = (uint64_t) (unsigned) malloc(nelems * size + alignment - 1);
+ address += (alignment - address % alignment);
+#else
+ theMMap =
+ mmap((void*) 0x90000000,
+ nelems*size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE
+#ifdef MAP_ANONYMOUS
+ |MAP_ANONYMOUS
+#endif
+ , 0, 0);
+ uint64_t address = (uintptr_t) theMMap;
+ theMMapSize = nelems*size;
+#endif
+
+ for (unsigned i = 0; i < nelems; i++) {
+ MemoryObject *mo = memory->allocateFixed(address + i*size, size, state.prevPC->inst);
+ ObjectState *os = bindObjectInState(state, mo, isLocal);
+ os->initializeToRandom();
+
+ // bind the local to the first memory object in the whole array
+ if (i == 0)
+ bindLocal(target, state, mo->getBaseExpr());
+ }
+
+ llvm::cerr << "KLEE: allocN at: " << address << "\n";
+}
+
+void Executor::executeAlloc(ExecutionState &state,
+ ref<Expr> size,
+ bool isLocal,
+ KInstruction *target,
+ bool zeroMemory,
+ const ObjectState *reallocFrom) {
+ size = toUnique(state, size);
+ if (size.isConstant()) {
+ MemoryObject *mo = memory->allocate(size.getConstantValue(), isLocal, false,
+ state.prevPC->inst);
+ if (!mo) {
+ bindLocal(target, state, ref<Expr>(0, kMachinePointerType));
+ } else {
+ ObjectState *os = bindObjectInState(state, mo, isLocal);
+ if (zeroMemory) {
+ os->initializeToZero();
+ } else {
+ os->initializeToRandom();
+ }
+ bindLocal(target, state, mo->getBaseExpr());
+
+ if (reallocFrom) {
+ unsigned count = std::min(reallocFrom->size, os->size);
+ for (unsigned i=0; i<count; i++)
+ os->write(i, reallocFrom->read8(i));
+ state.addressSpace.unbindObject(reallocFrom->getObject());
+ }
+ }
+ } else {
+ // XXX For now we just pick a size. Ideally we would support
+ // symbolic sizes fully but even if we don't it would be better to
+ // "smartly" pick a value, for example we could fork and pick the
+ // min and max values and perhaps some intermediate (reasonable
+ // value).
+ //
+ // It would also be nice to recognize the case when size has
+ // exactly two values and just fork (but we need to get rid of
+ // return argument first). This shows up in pcre when llvm
+ // collapses the size expression with a select.
+
+ ref<Expr> example;
+ bool success = solver->getValue(state, size, example);
+ assert(success && "FIXME: Unhandled solver failure");
+
+ // Try and start with a small example
+ while (example.getConstantValue()>128) {
+ ref<Expr> tmp = ref<Expr>(example.getConstantValue() >> 1,
+ example.getWidth());
+ bool res;
+ bool success = solver->mayBeTrue(state, EqExpr::create(tmp, size), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (!res)
+ break;
+ example = tmp;
+ }
+
+ StatePair fixedSize = fork(state, EqExpr::create(example, size), true);
+
+ if (fixedSize.second) {
+ // Check for exactly two values
+ ref<Expr> tmp;
+ bool success = solver->getValue(*fixedSize.second, size, tmp);
+ assert(success && "FIXME: Unhandled solver failure");
+ bool res;
+ success = solver->mustBeTrue(*fixedSize.second,
+ EqExpr::create(tmp, size),
+ res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ executeAlloc(*fixedSize.second, tmp, isLocal,
+ target, zeroMemory, reallocFrom);
+ } else {
+ // See if a *really* big value is possible. If so assume
+ // malloc will fail for it, so lets fork and return 0.
+ StatePair hugeSize = fork(*fixedSize.second,
+ UltExpr::create(ref<Expr>(1<<31, Expr::Int32), size),
+ true);
+ if (hugeSize.first) {
+ klee_message("NOTE: found huge malloc, returing 0");
+ bindLocal(target, *hugeSize.first, ref<Expr>(0,kMachinePointerType));
+ }
+
+ if (hugeSize.second) {
+ std::ostringstream info;
+ ExprPPrinter::printOne(info, " size expr", size);
+ info << " concretization : " << example << "\n";
+ info << " unbound example: " << tmp << "\n";
+ terminateStateOnError(*hugeSize.second,
+ "concretized symbolic size",
+ "model.err",
+ info.str());
+ }
+ }
+ }
+
+ if (fixedSize.first) // can be zero when fork fails
+ executeAlloc(*fixedSize.first, example, isLocal,
+ target, zeroMemory, reallocFrom);
+ }
+}
+
+void Executor::executeFree(ExecutionState &state,
+ ref<Expr> address,
+ KInstruction *target) {
+ StatePair zeroPointer = fork(state, Expr::createIsZero(address), true);
+ if (zeroPointer.first) {
+ if (target)
+ bindLocal(target, *zeroPointer.first, Expr::createPointer(0));
+ }
+ if (zeroPointer.second) { // address != 0
+ ExactResolutionList rl;
+ resolveExact(*zeroPointer.second, address, rl, "free");
+
+ for (Executor::ExactResolutionList::iterator it = rl.begin(),
+ ie = rl.end(); it != ie; ++it) {
+ const MemoryObject *mo = it->first.first;
+ if (mo->isLocal) {
+ terminateStateOnError(*it->second,
+ "free of alloca",
+ "free.err",
+ getAddressInfo(*it->second, address));
+ } else if (mo->isGlobal) {
+ terminateStateOnError(*it->second,
+ "free of global",
+ "free.err",
+ getAddressInfo(*it->second, address));
+ } else {
+ it->second->addressSpace.unbindObject(mo);
+ if (target)
+ bindLocal(target, *it->second, Expr::createPointer(0));
+ }
+ }
+ }
+}
+
+void Executor::resolveExact(ExecutionState &state,
+ ref<Expr> p,
+ ExactResolutionList &results,
+ const std::string &name) {
+ // XXX we may want to be capping this?
+ ResolutionList rl;
+ state.addressSpace.resolve(state, solver, p, rl);
+
+ ExecutionState *unbound = &state;
+ for (ResolutionList::iterator it = rl.begin(), ie = rl.end();
+ it != ie; ++it) {
+ ref<Expr> inBounds = EqExpr::create(p, it->first->getBaseExpr());
+
+ StatePair branches = fork(*unbound, inBounds, true);
+
+ if (branches.first)
+ results.push_back(std::make_pair(*it, branches.first));
+
+ unbound = branches.second;
+ if (!unbound) // Fork failure
+ break;
+ }
+
+ if (unbound) {
+ terminateStateOnError(*unbound,
+ "memory error: invalid pointer: " + name,
+ "ptr.err",
+ getAddressInfo(*unbound, p));
+ }
+}
+
+void Executor::executeMemoryOperation(ExecutionState &state,
+ bool isWrite,
+ ref<Expr> address,
+ ref<Expr> value /* undef if read */,
+ KInstruction *target /* undef if write */) {
+ Expr::Width type = (isWrite ? value.getWidth() :
+ Expr::getWidthForLLVMType(target->inst->getType()));
+ unsigned bytes = Expr::getMinBytesForWidth(type);
+
+ if (SimplifySymIndices) {
+ if (!address.isConstant())
+ address = state.constraints.simplifyExpr(address);
+ if (isWrite && !value.isConstant())
+ value = state.constraints.simplifyExpr(value);
+ }
+
+ // fast path: single in-bounds resolution
+ ObjectPair op;
+ bool success;
+ solver->setTimeout(stpTimeout);
+ if (!state.addressSpace.resolveOne(state, solver, address, op, success)) {
+ address = toConstant(state, address, "resolveOne failure");
+ success = state.addressSpace.resolveOne(address.getConstantValue(), op);
+ }
+ solver->setTimeout(0);
+
+ if (success) {
+ const MemoryObject *mo = op.first;
+
+ if (MaxSymArraySize && mo->size>=MaxSymArraySize) {
+ address = toConstant(state, address, "max-sym-array-size");
+ }
+
+ ref<Expr> offset = mo->getOffsetExpr(address);
+
+ bool inBounds;
+ solver->setTimeout(stpTimeout);
+ bool success = solver->mustBeTrue(state,
+ mo->getBoundsCheckOffset(offset, bytes),
+ inBounds);
+ solver->setTimeout(0);
+ if (!success) {
+ state.pc = state.prevPC;
+ terminateStateEarly(state, "query timed out");
+ return;
+ }
+
+ if (inBounds) {
+ const ObjectState *os = op.second;
+ if (isWrite) {
+ if (os->readOnly) {
+ terminateStateOnError(state,
+ "memory error: object read only",
+ "readonly.err");
+ } else {
+ ObjectState *wos = state.addressSpace.getWriteable(mo, os);
+ wos->write(offset, value);
+ }
+ } else {
+ ref<Expr> result = os->read(offset, type);
+
+ if (interpreterOpts.MakeConcreteSymbolic)
+ result = replaceReadWithSymbolic(state, result);
+
+ bindLocal(target, state, result);
+ }
+
+ return;
+ }
+ }
+
+ // we are on an error path (no resolution, multiple resolution, one
+ // resolution with out of bounds)
+
+ ResolutionList rl;
+ solver->setTimeout(stpTimeout);
+ bool incomplete = state.addressSpace.resolve(state, solver, address, rl,
+ 0, stpTimeout);
+ solver->setTimeout(0);
+
+ // XXX there is some query wasteage here. who cares?
+ ExecutionState *unbound = &state;
+
+ for (ResolutionList::iterator i = rl.begin(), ie = rl.end(); i != ie; ++i) {
+ const MemoryObject *mo = i->first;
+ const ObjectState *os = i->second;
+ ref<Expr> inBounds = mo->getBoundsCheckPointer(address, bytes);
+
+ StatePair branches = fork(*unbound, inBounds, true);
+ ExecutionState *bound = branches.first;
+
+ // bound can be 0 on failure or overlapped
+ if (bound) {
+ if (isWrite) {
+ if (os->readOnly) {
+ terminateStateOnError(*bound,
+ "memory error: object read only",
+ "readonly.err");
+ } else {
+ ObjectState *wos = bound->addressSpace.getWriteable(mo, os);
+ wos->write(mo->getOffsetExpr(address), value);
+ }
+ } else {
+ ref<Expr> result = os->read(mo->getOffsetExpr(address), type);
+ bindLocal(target, *bound, result);
+ }
+ }
+
+ unbound = branches.second;
+ if (!unbound)
+ break;
+ }
+
+ // XXX should we distinguish out of bounds and overlapped cases?
+ if (unbound) {
+ if (incomplete) {
+ terminateStateEarly(*unbound, "query timed out (resolve)");
+ } else {
+ terminateStateOnError(*unbound,
+ "memory error: out of bound pointer",
+ "ptr.err",
+ getAddressInfo(*unbound, address));
+ }
+ }
+}
+
+void Executor::executeMakeSymbolic(ExecutionState &state,
+ const MemoryObject *mo) {
+ // make a new one and rebind, we use bind here because we want to
+ // create a flat out new state, not a copy. although I'm not really
+ // sure it matters.
+ ObjectState *os = bindObjectInState(state, mo, false);
+ if (!replayOut) {
+ os->makeSymbolic();
+ state.addSymbolic(mo);
+
+ std::map< ExecutionState*, std::vector<SeedInfo> >::iterator it =
+ seedMap.find(&state);
+ if (it!=seedMap.end()) { // In seed mode we need to add this as a
+ // binding.
+ for (std::vector<SeedInfo>::iterator siit = it->second.begin(),
+ siie = it->second.end(); siit != siie; ++siit) {
+ SeedInfo &si = *siit;
+ BOutObject *obj = si.getNextInput(mo,
+ NamedSeedMatching);
+
+ if (!obj) {
+ if (ZeroSeedExtension) {
+ std::vector<unsigned char> &values =
+ si.assignment.bindings[mo->array];
+ values = std::vector<unsigned char>(mo->size, '\0');
+ } else if (!AllowSeedExtension) {
+ terminateStateOnError(state,
+ "ran out of inputs during seeding",
+ "user.err");
+ break;
+ }
+ } else {
+ if (obj->numBytes != mo->size &&
+ ((!(AllowSeedExtension || ZeroSeedExtension)
+ && obj->numBytes < mo->size) ||
+ (!AllowSeedTruncation && obj->numBytes > mo->size))) {
+ std::stringstream msg;
+ msg << "replace size mismatch: "
+ << mo->name << "[" << mo->size << "]"
+ << " vs " << obj->name << "[" << obj->numBytes << "]"
+ << " in bout\n";
+
+ terminateStateOnError(state,
+ msg.str(),
+ "user.err");
+ break;
+ } else {
+ std::vector<unsigned char> &values =
+ si.assignment.bindings[mo->array];
+ values.insert(values.begin(), obj->bytes,
+ obj->bytes + std::min(obj->numBytes, mo->size));
+ if (ZeroSeedExtension) {
+ for (unsigned i=obj->numBytes; i<mo->size; ++i)
+ values.push_back('\0');
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (replayPosition >= replayOut->numObjects) {
+ terminateStateOnError(state, "replay count mismatch", "user.err");
+ } else {
+ BOutObject *obj = &replayOut->objects[replayPosition++];
+ if (obj->numBytes != mo->size) {
+ terminateStateOnError(state, "replay size mismatch", "user.err");
+ } else {
+ for (unsigned i=0; i<mo->size; i++)
+ os->write8(i, obj->bytes[i]);
+ }
+ }
+ }
+}
+
+/***/
+
+void Executor::runFunctionAsMain(Function *f,
+ int argc,
+ char **argv,
+ char **envp) {
+ std::vector<ref<Expr> > arguments;
+
+ // force deterministic initialization of memory objects
+ srand(1);
+ srandom(1);
+
+ MemoryObject *argvMO = 0;
+
+ // In order to make uclibc happy and be closer to what the system is
+ // doing we lay out the environments at the end of the argv array
+ // (both are terminated by a null). There is also a final terminating
+ // null that uclibc seems to expect, possibly the ELF header?
+
+ int envc;
+ for (envc=0; envp[envc]; ++envc) ;
+
+ KFunction *kf = kmodule->functionMap[f];
+ assert(kf);
+ Function::arg_iterator ai = f->arg_begin(), ae = f->arg_end();
+ if (ai!=ae) {
+ arguments.push_back(ref<Expr>(argc, Expr::Int32));
+
+ if (++ai!=ae) {
+ argvMO = memory->allocate((argc+1+envc+1+1) * kMachinePointerSize, false, true,
+ f->begin()->begin());
+
+ arguments.push_back(argvMO->getBaseExpr());
+
+ if (++ai!=ae) {
+ uint64_t envp_start = argvMO->address + (argc+1)*kMachinePointerSize;
+ arguments.push_back(Expr::createPointer(envp_start));
+
+ if (++ai!=ae)
+ klee_error("invalid main function (expect 0-3 arguments)");
+ }
+ }
+ }
+
+ ExecutionState *state = new ExecutionState(kmodule->functionMap[f]);
+
+ if (pathWriter)
+ state->pathOS = pathWriter->open();
+ if (symPathWriter)
+ state->symPathOS = symPathWriter->open();
+
+
+ if (statsTracker)
+ statsTracker->framePushed(*state, 0);
+
+ assert(arguments.size() == f->arg_size() && "wrong number of arguments");
+ for (unsigned i = 0, e = f->arg_size(); i != e; ++i)
+ bindArgument(kf, i, *state, arguments[i]);
+
+ if (argvMO) {
+ ObjectState *argvOS = bindObjectInState(*state, argvMO, false);
+
+ for (int i=0; i<argc+1+envc+1+1; i++) {
+ MemoryObject *arg;
+
+ if (i==argc || i>=argc+1+envc) {
+ arg = 0;
+ } else {
+ char *s = i<argc ? argv[i] : envp[i-(argc+1)];
+ int j, len = strlen(s);
+
+ arg = memory->allocate(len+1, false, true, state->pc->inst);
+ ObjectState *os = bindObjectInState(*state, arg, false);
+ for (j=0; j<len+1; j++)
+ os->write8(j, s[j]);
+ }
+
+ if (arg) {
+ argvOS->write(i * kMachinePointerSize, arg->getBaseExpr());
+ } else {
+ argvOS->write(i * kMachinePointerSize, Expr::createPointer(0));
+ }
+ }
+ }
+
+ initializeGlobals(*state);
+
+ processTree = new PTree(state);
+ state->ptreeNode = processTree->root;
+ run(*state);
+ delete processTree;
+ processTree = 0;
+
+ // hack to clear memory objects
+ delete memory;
+ memory = new MemoryManager();
+
+ globalObjects.clear();
+ globalAddresses.clear();
+
+ if (statsTracker)
+ statsTracker->done();
+
+ if (theMMap) {
+ munmap(theMMap, theMMapSize);
+ theMMap = 0;
+ }
+}
+
+unsigned Executor::getPathStreamID(const ExecutionState &state) {
+ assert(pathWriter);
+ return state.pathOS.getID();
+}
+
+unsigned Executor::getSymbolicPathStreamID(const ExecutionState &state) {
+ assert(symPathWriter);
+ return state.symPathOS.getID();
+}
+
+void Executor::getConstraintLog(const ExecutionState &state,
+ std::string &res,
+ bool asCVC) {
+ if (asCVC) {
+ Query query(state.constraints, ref<Expr>(0, Expr::Bool));
+ char *log = solver->stpSolver->getConstraintLog(query);
+ res = std::string(log);
+ free(log);
+ } else {
+ std::ostringstream info;
+ ExprPPrinter::printConstraints(info, state.constraints);
+ res = info.str();
+ }
+}
+
+bool Executor::getSymbolicSolution(const ExecutionState &state,
+ std::vector<
+ std::pair<std::string,
+ std::vector<unsigned char> > >
+ &res) {
+ solver->setTimeout(stpTimeout);
+
+ ExecutionState tmp(state);
+ if (!NoPreferCex) {
+ for (std::vector<const MemoryObject*>::const_iterator
+ it = state.symbolics.begin(), ie = state.symbolics.end();
+ it != ie; ++it) {
+ const MemoryObject *mo = *it;
+ std::vector< ref<Expr> >::const_iterator pi =
+ mo->cexPreferences.begin(), pie = mo->cexPreferences.end();
+ for (; pi != pie; ++pi) {
+ bool mustBeTrue;
+ bool success = solver->mustBeTrue(tmp, Expr::createNot(*pi),
+ mustBeTrue);
+ if (!success) break;
+ if (!mustBeTrue) tmp.addConstraint(*pi);
+ }
+ if (pi!=pie) break;
+ }
+ }
+
+ std::vector< std::vector<unsigned char> > values;
+ std::vector<const Array*> objects;
+ for (unsigned i = 0; i != state.symbolics.size(); ++i)
+ objects.push_back(state.symbolics[i]->array);
+ bool success = solver->getInitialValues(tmp, objects, values);
+ solver->setTimeout(0);
+ if (!success) {
+ klee_warning("unable to compute initial values (invalid constraints?)!");
+ ExprPPrinter::printQuery(std::cerr,
+ state.constraints,
+ ref<Expr>(0,Expr::Bool));
+ return false;
+ }
+
+ unsigned i = 0;
+ for (std::vector<const MemoryObject*>::const_iterator
+ it = state.symbolics.begin(), ie = state.symbolics.end();
+ it != ie; ++it) {
+ res.push_back(std::make_pair((*it)->name, values[i]));
+ ++i;
+ }
+ return true;
+}
+
+void Executor::getCoveredLines(const ExecutionState &state,
+ std::map<const std::string*, std::set<unsigned> > &res) {
+ res = state.coveredLines;
+}
+
+void Executor::doImpliedValueConcretization(ExecutionState &state,
+ ref<Expr> e,
+ ref<Expr> value) {
+ assert(value.isConstant() && "non-constant passed in place of constant");
+
+ if (DebugCheckForImpliedValues)
+ ImpliedValue::checkForImpliedValues(solver->solver, e, value);
+
+ ImpliedValueList results;
+ ImpliedValue::getImpliedValues(e, value, results);
+ for (ImpliedValueList::iterator it = results.begin(), ie = results.end();
+ it != ie; ++it) {
+ ReadExpr *re = it->first.get();
+
+ if (re->index.isConstant()) {
+ // FIXME: This is the sole remaining usage of the Array object
+ // variable. Kill me.
+ const MemoryObject *mo = re->updates.root->object;
+ const ObjectState *os = state.addressSpace.findObject(mo);
+
+ if (!os) {
+ // object has been free'd, no need to concretize (although as
+ // in other cases we would like to concretize the outstanding
+ // reads, but we have no facility for that yet)
+ } else {
+ assert(!os->readOnly && "not possible? read only object with static read?");
+ ObjectState *wos = state.addressSpace.getWriteable(mo, os);
+ wos->write(re->index.getConstantValue(), it->second);
+ }
+ }
+ }
+}
+
+///
+
+Interpreter *Interpreter::create(const InterpreterOptions &opts,
+ InterpreterHandler *ih) {
+ return new Executor(opts, ih);
+}
diff --git a/lib/Core/Executor.h b/lib/Core/Executor.h
new file mode 100644
index 00000000..76868291
--- /dev/null
+++ b/lib/Core/Executor.h
@@ -0,0 +1,445 @@
+//===-- Executor.h ----------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Class to perform actual execution, hides implementation details from external
+// interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_EXECUTOR_H
+#define KLEE_EXECUTOR_H
+
+#include "klee/Interpreter.h"
+#include "llvm/Support/CallSite.h"
+#include <vector>
+#include <string>
+#include <map>
+#include <set>
+
+struct BOut;
+
+namespace llvm {
+ class BasicBlock;
+ class BranchInst;
+ class CallInst;
+ class Constant;
+ class ConstantExpr;
+ class Function;
+ class GlobalValue;
+ class Instruction;
+ class TargetData;
+ class Value;
+}
+
+namespace klee {
+ class ExecutionState;
+ class ExternalDispatcher;
+ class Expr;
+ class InstructionInfoTable;
+ class KFunction;
+ class KInstruction;
+ class KInstIterator;
+ class KModule;
+ class MemoryManager;
+ class MemoryObject;
+ class ObjectState;
+ class PTree;
+ class Searcher;
+ class SeedInfo;
+ class SpecialFunctionHandler;
+ class StackFrame;
+ class StatsTracker;
+ class TimingSolver;
+ class TreeStreamWriter;
+ template<class T> class ref;
+
+ /// \todo Add a context object to keep track of data only live
+ /// during an instruction step. Should contain addedStates,
+ /// removedStates, and haltExecution, among others.
+
+class Executor : public Interpreter {
+ friend class BumpMergingSearcher;
+ friend class MergingSearcher;
+ friend class RandomPathSearcher;
+ friend class OwningSearcher;
+ friend class WeightedRandomSearcher;
+ friend class SpecialFunctionHandler;
+ friend class StatsTracker;
+
+public:
+ class Timer {
+ public:
+ Timer();
+ virtual ~Timer();
+
+ /// The event callback.
+ virtual void run() = 0;
+ };
+
+ typedef std::pair<ExecutionState*,ExecutionState*> StatePair;
+
+private:
+ class TimerInfo;
+
+ KModule *kmodule;
+ InterpreterHandler *interpreterHandler;
+ Searcher *searcher;
+
+ ExternalDispatcher *externalDispatcher;
+ TimingSolver *solver;
+ MemoryManager *memory;
+ std::set<ExecutionState*> states;
+ StatsTracker *statsTracker;
+ TreeStreamWriter *pathWriter, *symPathWriter;
+ SpecialFunctionHandler *specialFunctionHandler;
+ std::vector<TimerInfo*> timers;
+ PTree *processTree;
+
+ /// Used to track states that have been added during the current
+ /// instructions step.
+ /// \invariant \ref addedStates is a subset of \ref states.
+ /// \invariant \ref addedStates and \ref removedStates are disjoint.
+ std::set<ExecutionState*> addedStates;
+ /// Used to track states that have been removed during the current
+ /// instructions step.
+ /// \invariant \ref removedStates is a subset of \ref states.
+ /// \invariant \ref addedStates and \ref removedStates are disjoint.
+ std::set<ExecutionState*> removedStates;
+
+ /// When non-empty the Executor is running in "seed" mode. The
+ /// states in this map will be executed in an arbitrary order
+ /// (outside the normal search interface) until they terminate. When
+ /// the states reach a symbolic branch then either direction that
+ /// satisfies one or more seeds will be added to this map. What
+ /// happens with other states (that don't satisfy the seeds) depends
+ /// on as-yet-to-be-determined flags.
+ std::map<ExecutionState*, std::vector<SeedInfo> > seedMap;
+
+ /// Map of globals to their representative memory object.
+ std::map<const llvm::GlobalValue*, MemoryObject*> globalObjects;
+
+ /// Map of globals to their bound address. This also includes
+ /// globals that have no representative object (i.e. functions).
+ std::map<const llvm::GlobalValue*, ref<Expr> > globalAddresses;
+
+ /// The set of legal function addresses, used to validate function
+ /// pointers.
+ std::set<void*> legalFunctions;
+
+ /// When non-null the bindings that will be used for calls to
+ /// klee_make_symbolic in order replay.
+ const struct BOut *replayOut;
+ /// When non-null a list of branch decisions to be used for replay.
+ const std::vector<bool> *replayPath;
+ /// The index into the current \ref replayOut or \ref replayPath
+ /// object.
+ unsigned replayPosition;
+
+ /// When non-null a list of "seed" inputs which will be used to
+ /// drive execution.
+ const std::vector<struct BOut *> *usingSeeds;
+
+ /// Disables forking, instead a random path is chosen. Enabled as
+ /// needed to control memory usage. \see fork()
+ bool atMemoryLimit;
+
+ /// Disables forking, set by client. \see setInhibitForking()
+ bool inhibitForking;
+
+ /// Signals the executor to halt execution at the next instruction
+ /// step.
+ bool haltExecution;
+
+ /// Whether implied-value concretization is enabled. Currently
+ /// false, it is buggy (it needs to validate its writes).
+ bool ivcEnabled;
+
+ /// The maximum time to allow for a single stp query.
+ double stpTimeout;
+
+ llvm::Function* getCalledFunction(llvm::CallSite &cs, ExecutionState &state);
+
+ void executeInstruction(ExecutionState &state, KInstruction *ki);
+
+ void printFileLine(ExecutionState &state, KInstruction *ki);
+
+ void run(ExecutionState &initialState);
+
+ // Given a concrete object in our [klee's] address space, add it to
+ // objects checked code can reference.
+ MemoryObject *addExternalObject(ExecutionState &state, void *addr,
+ unsigned size, bool isReadOnly);
+
+ void initializeGlobalObject(ExecutionState &state, ObjectState *os,
+ llvm::Constant *c,
+ unsigned offset);
+ void initializeGlobals(ExecutionState &state);
+
+ void stepInstruction(ExecutionState &state);
+ void updateStates(ExecutionState *current);
+ void transferToBasicBlock(llvm::BasicBlock *dst,
+ llvm::BasicBlock *src,
+ ExecutionState &state);
+
+ void callExternalFunction(ExecutionState &state,
+ KInstruction *target,
+ llvm::Function *function,
+ std::vector< ref<Expr> > &arguments);
+
+ ObjectState *bindObjectInState(ExecutionState &state, const MemoryObject *mo,
+ bool isLocal);
+
+ /// Resolve a pointer to the memory objects it could point to the
+ /// start of, forking execution when necessary and generating errors
+ /// for pointers to invalid locations (either out of bounds or
+ /// address inside the middle of objects).
+ ///
+ /// \param results[out] A list of ((MemoryObject,ObjectState),
+ /// state) pairs for each object the given address can point to the
+ /// beginning of.
+ typedef std::vector< std::pair<std::pair<const MemoryObject*, const ObjectState*>,
+ ExecutionState*> > ExactResolutionList;
+ void resolveExact(ExecutionState &state,
+ ref<Expr> p,
+ ExactResolutionList &results,
+ const std::string &name);
+
+ /// Allocate and bind a new object in a particular state. NOTE: This
+ /// function may fork.
+ ///
+ /// \param isLocal Flag to indicate if the object should be
+ /// automatically deallocated on function return (this also makes it
+ /// illegal to free directly).
+ ///
+ /// \param target Value at which to bind the base address of the new
+ /// object.
+ ///
+ /// \param reallocFrom If non-zero and the allocation succeeds,
+ /// initialize the new object from the given one and unbind it when
+ /// done (realloc semantics). The initialized bytes will be the
+ /// minimum of the size of the old and new objects, with remaining
+ /// bytes initialized as specified by zeroMemory.
+ void executeAlloc(ExecutionState &state,
+ ref<Expr> size,
+ bool isLocal,
+ KInstruction *target,
+ bool zeroMemory=false,
+ const ObjectState *reallocFrom=0);
+
+ /// XXX not for public use (this is for histar, it allocations a
+ /// contiguous set of objects, while guaranteeing page alignment)
+ void executeAllocN(ExecutionState &state,
+ uint64_t nelems,
+ uint64_t size,
+ uint64_t alignment,
+ bool isLocal,
+ KInstruction *target);
+
+ /// Free the given address with checking for errors. If target is
+ /// given it will be bound to 0 in the resulting states (this is a
+ /// convenience for realloc). Note that this function can cause the
+ /// state to fork and that \ref state cannot be safely accessed
+ /// afterwards.
+ void executeFree(ExecutionState &state,
+ ref<Expr> address,
+ KInstruction *target = 0);
+
+ void executeCall(ExecutionState &state,
+ KInstruction *ki,
+ llvm::Function *f,
+ std::vector< ref<Expr> > &arguments);
+
+ // do address resolution / object binding / out of bounds checking
+ // and perform the operation
+ void executeMemoryOperation(ExecutionState &state,
+ bool isWrite,
+ ref<Expr> address,
+ ref<Expr> value /* undef if read */,
+ KInstruction *target /* undef if write */);
+
+ void executeMakeSymbolic(ExecutionState &state, const MemoryObject *mo);
+
+ /// Create a new state where each input condition has been added as
+ /// a constraint and return the results. The input state is included
+ /// as one of the results. Note that the output vector may included
+ /// NULL pointers for states which were unable to be created.
+ void branch(ExecutionState &state,
+ const std::vector< ref<Expr> > &conditions,
+ std::vector<ExecutionState*> &result);
+
+ // Fork current and return states in which condition holds / does
+ // not hold, respectively. One of the states is necessarily the
+ // current state, and one of the states may be null.
+ StatePair fork(ExecutionState &current, ref<Expr> condition, bool isInternal);
+
+ /// Add the given (boolean) condition as a constraint on state. This
+ /// function is a wrapper around the state's addConstraint function
+ /// which also manages manages propogation of implied values,
+ /// validity checks, and seed patching.
+ void addConstraint(ExecutionState &state, ref<Expr> condition);
+
+ // Called on [for now] concrete reads, replaces constant with a symbolic
+ // Used for testing.
+ ref<Expr> replaceReadWithSymbolic(ExecutionState &state, ref<Expr> e);
+
+ ref<Expr> eval(KInstruction *ki,
+ unsigned index,
+ ExecutionState &state);
+
+ void bindLocal(KInstruction *target,
+ ExecutionState &state,
+ ref<Expr> value);
+ void bindArgument(KFunction *kf,
+ unsigned index,
+ ExecutionState &state,
+ ref<Expr> value);
+
+ ref<Expr> evalConstantExpr(llvm::ConstantExpr *ce);
+
+ /// Return a unique constant value for the given expression in the
+ /// given state, if it has one (i.e. it provably only has a single
+ /// value). Otherwise return the original expression.
+ ref<Expr> toUnique(const ExecutionState &state, ref<Expr> &e);
+
+ /// Return a constant value for the given expression, forcing it to
+ /// be constant in the given state by adding a constraint if
+ /// necessary. Note that this function breaks completeness and
+ /// should generally be avoided.
+ ///
+ /// \param purpose An identify string to printed in case of concretization.
+ ref<Expr> toConstant(ExecutionState &state, ref<Expr> e, const char *purpose);
+
+ /// Bind a constant value for e to the given target. NOTE: This
+ /// function may fork state if the state has multiple seeds.
+ void executeGetValue(ExecutionState &state, ref<Expr> e, KInstruction *target);
+
+ /// Get textual information regarding a memory address.
+ std::string getAddressInfo(ExecutionState &state, ref<Expr> address) const;
+
+ // remove state from queue and delete
+ void terminateState(ExecutionState &state);
+ // call exit handler and terminate state
+ void terminateStateEarly(ExecutionState &state, std::string message);
+ // call exit handler and terminate state
+ void terminateStateOnExit(ExecutionState &state);
+ // call error handler and terminate state
+ void terminateStateOnError(ExecutionState &state,
+ const std::string &message,
+ const std::string &suffix,
+ const std::string &longMessage="");
+
+ // call error handler and terminate state, for execution errors
+ // (things that should not be possible, like illegal instruction or
+ // unlowered instrinsic, or are unsupported, like inline assembly)
+ void terminateStateOnExecError(ExecutionState &state,
+ const std::string &message,
+ const std::string &info="") {
+ terminateStateOnError(state, message, "exec.err", info);
+ }
+
+ /// bindModuleConstants - Initialize the module constant table.
+ void bindModuleConstants();
+
+ /// bindInstructionConstants - Initialize any necessary per instruction
+ /// constant values.
+ void bindInstructionConstants(KInstruction *KI);
+
+ void handlePointsToObj(ExecutionState &state,
+ KInstruction *target,
+ const std::vector<ref<Expr> > &arguments);
+
+ void doImpliedValueConcretization(ExecutionState &state,
+ ref<Expr> e,
+ ref<Expr> value);
+
+ /// Add a timer to be executed periodically.
+ ///
+ /// \param timer The timer object to run on firings.
+ /// \param rate The approximate delay (in seconds) between firings.
+ void addTimer(Timer *timer, double rate);
+
+ void initTimers();
+ void processTimers(ExecutionState *current,
+ double maxInstTime);
+
+public:
+ Executor(const InterpreterOptions &opts, InterpreterHandler *ie);
+ virtual ~Executor();
+
+ const InterpreterHandler& getHandler() {
+ return *interpreterHandler;
+ }
+
+ // XXX should just be moved out to utility module
+ ref<Expr> evalConstant(llvm::Constant *c);
+
+ virtual void setPathWriter(TreeStreamWriter *tsw) {
+ pathWriter = tsw;
+ }
+ virtual void setSymbolicPathWriter(TreeStreamWriter *tsw) {
+ symPathWriter = tsw;
+ }
+
+ virtual void setReplayOut(const struct BOut *out) {
+ assert(!replayPath && "cannot replay both buffer and path");
+ replayOut = out;
+ replayPosition = 0;
+ }
+
+ virtual void setReplayPath(const std::vector<bool> *path) {
+ assert(!replayOut && "cannot replay both buffer and path");
+ replayPath = path;
+ replayPosition = 0;
+ }
+
+ virtual const llvm::Module *
+ setModule(llvm::Module *module, const ModuleOptions &opts);
+
+ virtual void useSeeds(const std::vector<struct BOut *> *seeds) {
+ usingSeeds = seeds;
+ }
+
+ virtual void runFunctionAsMain(llvm::Function *f,
+ int argc,
+ char **argv,
+ char **envp);
+
+ /*** Runtime options ***/
+
+ virtual void setHaltExecution(bool value) {
+ haltExecution = value;
+ }
+
+ virtual void setInhibitForking(bool value) {
+ inhibitForking = value;
+ }
+
+ /*** State accessor methods ***/
+
+ virtual unsigned getPathStreamID(const ExecutionState &state);
+
+ virtual unsigned getSymbolicPathStreamID(const ExecutionState &state);
+
+ virtual void getConstraintLog(const ExecutionState &state,
+ std::string &res,
+ bool asCVC = false);
+
+ virtual bool getSymbolicSolution(const ExecutionState &state,
+ std::vector<
+ std::pair<std::string,
+ std::vector<unsigned char> > >
+ &res);
+
+ virtual void getCoveredLines(const ExecutionState &state,
+ std::map<const std::string*, std::set<unsigned> > &res);
+};
+
+} // End klee namespace
+
+#endif
diff --git a/lib/Core/ExecutorTimers.cpp b/lib/Core/ExecutorTimers.cpp
new file mode 100644
index 00000000..51792e0d
--- /dev/null
+++ b/lib/Core/ExecutorTimers.cpp
@@ -0,0 +1,220 @@
+//===-- ExecutorTimers.cpp ------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "CoreStats.h"
+#include "Executor.h"
+#include "PTree.h"
+#include "StatsTracker.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Internal/Module/InstructionInfoTable.h"
+#include "klee/Internal/Module/KInstruction.h"
+#include "klee/Internal/Module/KModule.h"
+#include "klee/Internal/System/Time.h"
+
+#include "llvm/Function.h"
+#include "llvm/Support/CommandLine.h"
+
+#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <math.h>
+
+
+using namespace llvm;
+using namespace klee;
+
+cl::opt<double>
+MaxTime("max-time",
+ cl::desc("Halt execution after the specified number of seconds (0=off)"),
+ cl::init(0));
+
+///
+
+class HaltTimer : public Executor::Timer {
+ Executor *executor;
+
+public:
+ HaltTimer(Executor *_executor) : executor(_executor) {}
+ ~HaltTimer() {}
+
+ void run() {
+ llvm::cerr << "KLEE: HaltTimer invoked\n";
+ executor->setHaltExecution(true);
+ }
+};
+
+///
+
+static const double kSecondsPerTick = .1;
+static volatile unsigned timerTicks = 0;
+
+// XXX hack
+extern "C" unsigned dumpStates, dumpPTree;
+unsigned dumpStates = 0, dumpPTree = 0;
+
+static void onAlarm(int) {
+ ++timerTicks;
+}
+
+// oooogalay
+static void setupHandler() {
+ struct itimerval t;
+ struct timeval tv;
+
+ tv.tv_sec = (long) kSecondsPerTick;
+ tv.tv_usec = (long) (fmod(kSecondsPerTick, 1.)*1000000);
+
+ t.it_interval = t.it_value = tv;
+
+ ::setitimer(ITIMER_REAL, &t, 0);
+ ::signal(SIGALRM, onAlarm);
+}
+
+void Executor::initTimers() {
+ static bool first = true;
+
+ if (first) {
+ first = false;
+ setupHandler();
+ }
+
+ if (MaxTime) {
+ addTimer(new HaltTimer(this), MaxTime);
+ }
+}
+
+///
+
+Executor::Timer::Timer() {}
+
+Executor::Timer::~Timer() {}
+
+class Executor::TimerInfo {
+public:
+ Timer *timer;
+
+ /// Approximate delay per timer firing.
+ double rate;
+ /// Wall time for next firing.
+ double nextFireTime;
+
+public:
+ TimerInfo(Timer *_timer, double _rate)
+ : timer(_timer),
+ rate(_rate),
+ nextFireTime(util::getWallTime() + rate) {}
+ ~TimerInfo() { delete timer; }
+};
+
+void Executor::addTimer(Timer *timer, double rate) {
+ timers.push_back(new TimerInfo(timer, rate));
+}
+
+void Executor::processTimers(ExecutionState *current,
+ double maxInstTime) {
+ static unsigned callsWithoutCheck = 0;
+ unsigned ticks = timerTicks;
+
+ if (!ticks && ++callsWithoutCheck > 1000) {
+ setupHandler();
+ ticks = 1;
+ }
+
+ if (ticks || dumpPTree || dumpStates) {
+ if (dumpPTree) {
+ char name[32];
+ sprintf(name, "ptree%08d.dot", (int) stats::instructions);
+ std::ostream *os = interpreterHandler->openOutputFile(name);
+ if (os) {
+ processTree->dump(*os);
+ delete os;
+ }
+
+ dumpPTree = 0;
+ }
+
+ if (dumpStates) {
+ std::ostream *os = interpreterHandler->openOutputFile("states.txt");
+
+ if (os) {
+ for (std::set<ExecutionState*>::const_iterator it = states.begin(),
+ ie = states.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ *os << "(" << es << ",";
+ *os << "[";
+ ExecutionState::stack_ty::iterator next = es->stack.begin();
+ ++next;
+ for (ExecutionState::stack_ty::iterator sfIt = es->stack.begin(),
+ sf_ie = es->stack.end(); sfIt != sf_ie; ++sfIt) {
+ *os << "('" << sfIt->kf->function->getName() << "',";
+ if (next == es->stack.end()) {
+ *os << es->prevPC->info->line << "), ";
+ } else {
+ *os << next->caller->info->line << "), ";
+ ++next;
+ }
+ }
+ *os << "], ";
+
+ StackFrame &sf = es->stack.back();
+ uint64_t md2u = computeMinDistToUncovered(es->pc,
+ sf.minDistToUncoveredOnReturn);
+ uint64_t icnt = theStatisticManager->getIndexedValue(stats::instructions,
+ es->pc->info->id);
+ uint64_t cpicnt = sf.callPathNode->statistics.getValue(stats::instructions);
+
+ *os << "{";
+ *os << "'depth' : " << es->depth << ", ";
+ *os << "'weight' : " << es->weight << ", ";
+ *os << "'queryCost' : " << es->queryCost << ", ";
+ *os << "'coveredNew' : " << es->coveredNew << ", ";
+ *os << "'instsSinceCovNew' : " << es->instsSinceCovNew << ", ";
+ *os << "'md2u' : " << md2u << ", ";
+ *os << "'icnt' : " << icnt << ", ";
+ *os << "'CPicnt' : " << cpicnt << ", ";
+ *os << "}";
+ *os << ")\n";
+ }
+
+ delete os;
+ }
+
+ dumpStates = 0;
+ }
+
+ if (maxInstTime>0 && current && !removedStates.count(current)) {
+ if (timerTicks*kSecondsPerTick > maxInstTime) {
+ klee_warning("max-instruction-time exceeded: %.2fs",
+ timerTicks*kSecondsPerTick);
+ terminateStateEarly(*current, "max-instruction-time exceeded");
+ }
+ }
+
+ if (!timers.empty()) {
+ double time = util::getWallTime();
+
+ for (std::vector<TimerInfo*>::iterator it = timers.begin(),
+ ie = timers.end(); it != ie; ++it) {
+ TimerInfo *ti = *it;
+
+ if (time >= ti->nextFireTime) {
+ ti->timer->run();
+ ti->nextFireTime = time + ti->rate;
+ }
+ }
+ }
+
+ timerTicks = 0;
+ callsWithoutCheck = 0;
+ }
+}
+
diff --git a/lib/Core/ExecutorUtil.cpp b/lib/Core/ExecutorUtil.cpp
new file mode 100644
index 00000000..3b11dd42
--- /dev/null
+++ b/lib/Core/ExecutorUtil.cpp
@@ -0,0 +1,144 @@
+//===-- ExecutorUtil.cpp --------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Executor.h"
+
+#include "klee/Expr.h"
+#include "klee/Interpreter.h"
+#include "klee/Machine.h"
+#include "klee/Solver.h"
+
+#include "klee/Internal/Module/KModule.h"
+
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/ModuleProvider.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Target/TargetData.h"
+#include <iostream>
+#include <cassert>
+
+using namespace klee;
+using namespace llvm;
+
+namespace klee {
+
+ref<Expr>
+Executor::evalConstantExpr(llvm::ConstantExpr *ce) {
+ const llvm::Type *type = ce->getType();
+
+ ref<Expr> op1(0,Expr::Bool), op2(0,Expr::Bool), op3(0,Expr::Bool);
+ int numOperands = ce->getNumOperands();
+
+ if (numOperands > 0) op1 = evalConstant(ce->getOperand(0));
+ if (numOperands > 1) op2 = evalConstant(ce->getOperand(1));
+ if (numOperands > 2) op3 = evalConstant(ce->getOperand(2));
+
+ switch (ce->getOpcode()) {
+ case Instruction::Trunc: return ExtractExpr::createByteOff(op1,
+ 0,
+ Expr::getWidthForLLVMType(type));
+ case Instruction::ZExt: return ZExtExpr::create(op1,
+ Expr::getWidthForLLVMType(type));
+ case Instruction::SExt: return SExtExpr::create(op1,
+ Expr::getWidthForLLVMType(type));
+ case Instruction::Add: return AddExpr::create(op1, op2);
+ case Instruction::Sub: return SubExpr::create(op1, op2);
+ case Instruction::Mul: return MulExpr::create(op1, op2);
+ case Instruction::SDiv: return SDivExpr::create(op1, op2);
+ case Instruction::UDiv: return UDivExpr::create(op1, op2);
+ case Instruction::SRem: return SRemExpr::create(op1, op2);
+ case Instruction::URem: return URemExpr::create(op1, op2);
+ case Instruction::And: return AndExpr::create(op1, op2);
+ case Instruction::Or: return OrExpr::create(op1, op2);
+ case Instruction::Xor: return XorExpr::create(op1, op2);
+ case Instruction::Shl: return ShlExpr::create(op1, op2);
+ case Instruction::LShr: return LShrExpr::create(op1, op2);
+ case Instruction::AShr: return AShrExpr::create(op1, op2);
+ case Instruction::BitCast: return op1;
+
+ case Instruction::IntToPtr: {
+ return ZExtExpr::create(op1, Expr::getWidthForLLVMType(type));
+ }
+
+ case Instruction::PtrToInt: {
+ return ZExtExpr::create(op1, Expr::getWidthForLLVMType(type));
+ }
+
+ case Instruction::GetElementPtr: {
+ ref<Expr> base = op1;
+
+ for (gep_type_iterator ii = gep_type_begin(ce), ie = gep_type_end(ce);
+ ii != ie; ++ii) {
+ ref<Expr> addend(0, kMachinePointerType);
+
+ if (const StructType *st = dyn_cast<StructType>(*ii)) {
+ const StructLayout *sl = kmodule->targetData->getStructLayout(st);
+ const ConstantInt *ci = cast<ConstantInt>(ii.getOperand());
+
+ addend = Expr::createPointer(sl->getElementOffset((unsigned)
+ ci->getZExtValue()));
+ } else {
+ const SequentialType *st = cast<SequentialType>(*ii);
+ ref<Expr> index = evalConstant(cast<Constant>(ii.getOperand()));
+ unsigned elementSize = kmodule->targetData->getTypeStoreSize(st->getElementType());
+
+ index = Expr::createCoerceToPointerType(index);
+ addend = MulExpr::create(index,
+ Expr::createPointer(elementSize));
+ }
+
+ base = AddExpr::create(base, addend);
+ }
+
+ return base;
+ }
+
+ case Instruction::ICmp: {
+ switch(ce->getPredicate()) {
+ case ICmpInst::ICMP_EQ: return EqExpr::create(op1, op2);
+ case ICmpInst::ICMP_NE: return NeExpr::create(op1, op2);
+ case ICmpInst::ICMP_UGT: return UgtExpr::create(op1, op2);
+ case ICmpInst::ICMP_UGE: return UgeExpr::create(op1, op2);
+ case ICmpInst::ICMP_ULT: return UltExpr::create(op1, op2);
+ case ICmpInst::ICMP_ULE: return UleExpr::create(op1, op2);
+ case ICmpInst::ICMP_SGT: return SgtExpr::create(op1, op2);
+ case ICmpInst::ICMP_SGE: return SgeExpr::create(op1, op2);
+ case ICmpInst::ICMP_SLT: return SltExpr::create(op1, op2);
+ case ICmpInst::ICMP_SLE: return SleExpr::create(op1, op2);
+ default:
+ assert(0 && "unhandled ICmp predicate");
+ }
+ }
+
+ case Instruction::Select: {
+ return SelectExpr::create(op1, op2, op3);
+ }
+
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FCmp:
+ assert(0 && "floating point ConstantExprs unsupported");
+
+ default :
+ assert(0 && "unknown ConstantExpr type");
+ }
+}
+
+}
diff --git a/lib/Core/ExternalDispatcher.cpp b/lib/Core/ExternalDispatcher.cpp
new file mode 100644
index 00000000..9e3b0a49
--- /dev/null
+++ b/lib/Core/ExternalDispatcher.cpp
@@ -0,0 +1,230 @@
+//===-- ExternalDispatcher.cpp --------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ExternalDispatcher.h"
+
+#include "llvm/Module.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/ModuleProvider.h"
+#include "llvm/ExecutionEngine/JIT.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
+#include <setjmp.h>
+#include <signal.h>
+
+using namespace llvm;
+using namespace klee;
+
+/***/
+
+static jmp_buf escapeCallJmpBuf;
+
+extern "C" {
+
+static void sigsegv_handler(int signal, siginfo_t *info, void *context) {
+ longjmp(escapeCallJmpBuf, 1);
+}
+
+}
+
+void *ExternalDispatcher::resolveSymbol(const std::string &name) {
+ assert(executionEngine);
+
+ const char *str = name.c_str();
+
+ // We use this to validate that function names can be resolved so we
+ // need to match how the JIT does it. Unfortunately we can't
+ // directly access the JIT resolution function
+ // JIT::getPointerToNamedFunction so we emulate the important points.
+
+ if (str[0] == 1) // asm specifier, skipped
+ ++str;
+
+ void *addr = dl_symbols.SearchForAddressOfSymbol(str);
+ if (addr)
+ return addr;
+
+ // If it has an asm specifier and starts with an underscore we retry
+ // without the underscore. I (DWD) don't know why.
+ if (name[0] == 1 && str[0]=='_') {
+ ++str;
+ addr = dl_symbols.SearchForAddressOfSymbol(str);
+ }
+
+ return addr;
+}
+
+ExternalDispatcher::ExternalDispatcher() {
+ dispatchModule = new Module("ExternalDispatcher");
+ ExistingModuleProvider* MP = new ExistingModuleProvider(dispatchModule);
+
+ std::string error;
+ executionEngine = ExecutionEngine::createJIT(MP, &error);
+ if (!executionEngine) {
+ llvm::cerr << "unable to make jit: " << error << "\n";
+ abort();
+ }
+
+ // from ExecutionEngine::create
+ if (executionEngine) {
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ try {
+ dl_symbols.LoadLibraryPermanently(0);
+ } catch (...) {
+ assert(0 && "Exception in LoadLibraryPermantently.\n");
+ }
+ }
+
+#ifdef WINDOWS
+ preboundFunctions["getpid"] = (void*) (long) getpid;
+ preboundFunctions["putchar"] = (void*) (long) putchar;
+ preboundFunctions["printf"] = (void*) (long) printf;
+ preboundFunctions["fprintf"] = (void*) (long) fprintf;
+ preboundFunctions["sprintf"] = (void*) (long) sprintf;
+#endif
+}
+
+ExternalDispatcher::~ExternalDispatcher() {
+ delete executionEngine;
+}
+
+bool ExternalDispatcher::executeCall(Function *f, Instruction *i, uint64_t *args) {
+ dispatchers_ty::iterator it = dispatchers.find(i);
+ Function *dispatcher;
+
+ if (it == dispatchers.end()) {
+#ifdef WINDOWS
+ std::map<std::string, void*>::iterator it2 =
+ preboundFunctions.find(f->getName()));
+
+ if (it2 != preboundFunctions.end()) {
+ // only bind once
+ if (it2->second) {
+ executionEngine->addGlobalMapping(f, it2->second);
+ it2->second = 0;
+ }
+ }
+#endif
+
+ dispatcher = createDispatcher(f,i);
+
+ dispatchers.insert(std::make_pair(i, dispatcher));
+
+ if (dispatcher) {
+ // force the JIT execution engine to go ahead and build the
+ // function. this ensures that any errors or assertions in the
+ // compilation process will trigger crashes instead of being
+ // caught as aborts in the external function.
+ executionEngine->recompileAndRelinkFunction(dispatcher);
+ }
+ } else {
+ dispatcher = it->second;
+ }
+
+ return runProtectedCall(dispatcher, args);
+}
+
+// XXX not reentrant
+static uint64_t *gTheArgsP;
+
+bool ExternalDispatcher::runProtectedCall(Function *f, uint64_t *args) {
+ struct sigaction segvAction, segvActionOld;
+ bool res;
+
+ if (!f)
+ return false;
+
+ std::vector<GenericValue> gvArgs;
+ gTheArgsP = args;
+
+ segvAction.sa_handler = 0;
+ memset(&segvAction.sa_mask, 0, sizeof(segvAction.sa_mask));
+ segvAction.sa_flags = SA_SIGINFO;
+ segvAction.sa_sigaction = ::sigsegv_handler;
+ sigaction(SIGSEGV, &segvAction, &segvActionOld);
+
+ if (setjmp(escapeCallJmpBuf)) {
+ res = false;
+ } else {
+ executionEngine->runFunction(f, gvArgs);
+ res = true;
+ }
+
+ sigaction(SIGSEGV, &segvActionOld, 0);
+ return res;
+}
+
+// for performance purposes we construct the stub in such a way that
+// the arguments pointer is passed through the static global variable
+// gTheArgsP in this file. This is done so that the stub function
+// prototype trivially matches the special cases that the JIT knows
+// how to directly call. If this is not done, then the jit will end up
+// generating a nullary stub just to call our stub, for every single
+// function call.
+Function *ExternalDispatcher::createDispatcher(Function *target, Instruction *inst) {
+ if (!resolveSymbol(target->getName()))
+ return 0;
+
+ CallSite cs;
+ if (inst->getOpcode()==Instruction::Call) {
+ cs = CallSite(cast<CallInst>(inst));
+ } else {
+ cs = CallSite(cast<InvokeInst>(inst));
+ }
+
+ Value **args = new Value*[cs.arg_size()];
+
+ std::vector<const Type*> nullary;
+
+ Function *dispatcher = Function::Create(FunctionType::get(Type::VoidTy,
+ nullary, false),
+ GlobalVariable::ExternalLinkage,
+ "",
+ dispatchModule);
+
+
+ BasicBlock *dBB = BasicBlock::Create("entry", dispatcher);
+
+ Instruction *argI64sp = new IntToPtrInst(ConstantInt::get(Type::Int64Ty, (long) (void*) &gTheArgsP),
+ PointerType::getUnqual(PointerType::getUnqual(Type::Int64Ty)),
+ "argsp",
+ dBB);
+ Instruction *argI64s = new LoadInst(argI64sp, "args", dBB);
+
+ unsigned i = 0;
+ for (CallSite::arg_iterator ai = cs.arg_begin(), ae = cs.arg_end();
+ ai!=ae; ++ai, ++i) {
+ Value *index = ConstantInt::get(Type::Int32Ty, i+1);
+
+ Instruction *argI64p = GetElementPtrInst::Create(argI64s, index, "", dBB);
+ Instruction *argp = new BitCastInst(argI64p,
+ PointerType::getUnqual((*ai)->getType()), "", dBB);
+ args[i] = new LoadInst(argp, "", dBB);
+ }
+
+ Instruction *result = CallInst::Create(target, args, args+i, "", dBB);
+
+ if (result->getType() != Type::VoidTy) {
+ Instruction *resp = new BitCastInst(argI64s,
+ PointerType::getUnqual(result->getType()), "", dBB);
+ new StoreInst(result, resp, dBB);
+ }
+
+ ReturnInst::Create(dBB);
+
+ delete[] args;
+
+ return dispatcher;
+}
diff --git a/lib/Core/ExternalDispatcher.h b/lib/Core/ExternalDispatcher.h
new file mode 100644
index 00000000..fc8f80f4
--- /dev/null
+++ b/lib/Core/ExternalDispatcher.h
@@ -0,0 +1,50 @@
+//===-- ExternalDispatcher.h ------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_EXTERNALDISPATCHER_H
+#define KLEE_EXTERNALDISPATCHER_H
+
+#include <map>
+#include "llvm/System/DynamicLibrary.h"
+
+namespace llvm {
+ class ExecutionEngine;
+ class Instruction;
+ class Function;
+ class FunctionType;
+ class Module;
+}
+
+namespace klee {
+ class ExternalDispatcher {
+ private:
+ typedef std::map<const llvm::Instruction*,llvm::Function*> dispatchers_ty;
+ dispatchers_ty dispatchers;
+ llvm::Module *dispatchModule;
+ llvm::ExecutionEngine *executionEngine;
+ llvm::sys::DynamicLibrary dl_symbols;
+ std::map<std::string, void*> preboundFunctions;
+
+ llvm::Function *createDispatcher(llvm::Function *f, llvm::Instruction *i);
+ bool runProtectedCall(llvm::Function *f, uint64_t *args);
+
+ public:
+ ExternalDispatcher();
+ ~ExternalDispatcher();
+
+ /* Call the given function using the parameter passing convention of
+ * ci with arguments in args[1], args[2], ... and writing the result
+ * into args[0].
+ */
+ bool executeCall(llvm::Function *function, llvm::Instruction *i, uint64_t *args);
+ void *resolveSymbol(const std::string &name);
+ };
+}
+
+#endif
diff --git a/lib/Core/ImpliedValue.cpp b/lib/Core/ImpliedValue.cpp
new file mode 100644
index 00000000..386c8d80
--- /dev/null
+++ b/lib/Core/ImpliedValue.cpp
@@ -0,0 +1,274 @@
+//===-- ImpliedValue.cpp --------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ImpliedValue.h"
+
+#include "klee/Constraints.h"
+#include "klee/Expr.h"
+#include "klee/Solver.h"
+// FIXME: Use APInt.
+#include "klee/Internal/Support/IntEvaluation.h"
+
+#include "klee/util/ExprUtil.h"
+
+#include <iostream>
+#include <map>
+#include <set>
+
+using namespace klee;
+
+// XXX we really want to do some sort of canonicalization of exprs
+// globally so that cases below become simpler
+static void _getImpliedValue(ref<Expr> e,
+ uint64_t value,
+ ImpliedValueList &results) {
+ switch (e.getKind()) {
+
+ case Expr::Constant: {
+ assert(value == e.getConstantValue() && "error in implied value calculation");
+ break;
+ }
+
+ // Special
+
+ case Expr::NotOptimized: break;
+
+ case Expr::Read: {
+ // XXX in theory it is possible to descend into a symbolic index
+ // under certain circumstances (all values known, known value
+ // unique, or range known, max / min hit). Seems unlikely this
+ // would work often enough to be worth the effort.
+ ReadExpr *re = static_ref_cast<ReadExpr>(e);
+ results.push_back(std::make_pair(re,
+ ConstantExpr::create(value, e.getWidth())));
+ break;
+ }
+
+ case Expr::Select: {
+ // not much to do, could improve with range analysis
+ SelectExpr *se = static_ref_cast<SelectExpr>(e);
+
+ if (se->trueExpr.isConstant()) {
+ if (se->falseExpr.isConstant()) {
+ if (se->trueExpr.getConstantValue() != se->falseExpr.getConstantValue()) {
+ if (value == se->trueExpr.getConstantValue()) {
+ _getImpliedValue(se->cond, 1, results);
+ } else {
+ assert(value == se->falseExpr.getConstantValue() &&
+ "err in implied value calculation");
+ _getImpliedValue(se->cond, 0, results);
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ case Expr::Concat: {
+ ConcatExpr *ce = static_ref_cast<ConcatExpr>(e);
+ _getImpliedValue(ce->getKid(0), (value >> ce->getKid(1).getWidth()) & ((1 << ce->getKid(0).getWidth()) - 1), results);
+ _getImpliedValue(ce->getKid(1), value & ((1 << ce->getKid(1).getWidth()) - 1), results);
+ break;
+ }
+
+ case Expr::Extract: {
+ // XXX, could do more here with "some bits" mask
+ break;
+ }
+
+ // Casting
+
+ case Expr::ZExt:
+ case Expr::SExt: {
+ CastExpr *ce = static_ref_cast<CastExpr>(e);
+ _getImpliedValue(ce->src,
+ bits64::truncateToNBits(value,
+ ce->src.getWidth()),
+ results);
+ break;
+ }
+
+ // Arithmetic
+
+ case Expr::Add: { // constants on left
+ BinaryExpr *be = static_ref_cast<BinaryExpr>(e);
+ if (be->left.isConstant()) {
+ uint64_t nvalue = ints::sub(value,
+ be->left.getConstantValue(),
+ be->left.getWidth());
+ _getImpliedValue(be->right, nvalue, results);
+ }
+ break;
+ }
+ case Expr::Sub: { // constants on left
+ BinaryExpr *be = static_ref_cast<BinaryExpr>(e);
+ if (be->left.isConstant()) {
+ uint64_t nvalue = ints::sub(be->left.getConstantValue(),
+ value,
+ be->left.getWidth());
+ _getImpliedValue(be->right, nvalue, results);
+ }
+ break;
+ }
+ case Expr::Mul: {
+ // XXX can do stuff here, but need valid mask and other things
+ // because of bits that might be lost
+ break;
+ }
+
+ case Expr::UDiv:
+ case Expr::SDiv:
+ case Expr::URem:
+ case Expr::SRem:
+ // no, no, no
+ break;
+
+ // Binary
+
+ case Expr::And: {
+ BinaryExpr *be = static_ref_cast<BinaryExpr>(e);
+ if (be->getWidth() == Expr::Bool) {
+ if (value) {
+ _getImpliedValue(be->left, value, results);
+ _getImpliedValue(be->right, value, results);
+ }
+ } else {
+ // XXX, we can basically propogate a mask here
+ // where we know "some bits". may or may not be
+ // useful.
+ }
+ break;
+ }
+ case Expr::Or: {
+ BinaryExpr *be = static_ref_cast<BinaryExpr>(e);
+ if (!value) {
+ _getImpliedValue(be->left, 0, results);
+ _getImpliedValue(be->right, 0, results);
+ } else {
+ // XXX, can do more?
+ }
+ break;
+ }
+ case Expr::Xor: { // constants on left
+ BinaryExpr *be = static_ref_cast<BinaryExpr>(e);
+ if (be->left.isConstant()) {
+ _getImpliedValue(be->right, value ^ be->left.getConstantValue(), results);
+ }
+ break;
+ }
+
+ // Comparison
+ case Expr::Ne:
+ value = !value;
+ /* fallthru */
+ case Expr::Eq: {
+ EqExpr *ee = static_ref_cast<EqExpr>(e);
+ if (value) {
+ if (ee->left.isConstant())
+ _getImpliedValue(ee->right, ee->left.getConstantValue(), results);
+ } else {
+ // look for limited value range, woohoo
+ //
+ // in general anytime one side was restricted to two values we
+ // can apply this trick. the only obvious case where this
+ // occurs, aside from booleans, is as the result of a select
+ // expression where the true and false branches are single
+ // valued and distinct.
+
+ if (ee->left.isConstant()) {
+ if (ee->left.getWidth() == Expr::Bool) {
+ _getImpliedValue(ee->right, !ee->left.getConstantValue(), results);
+ }
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+void ImpliedValue::getImpliedValues(ref<Expr> e,
+ ref<Expr> value,
+ ImpliedValueList &results) {
+ assert(value.isConstant() && "non-constant in place of constant");
+ _getImpliedValue(e, value.getConstantValue(), results);
+}
+
+void ImpliedValue::checkForImpliedValues(Solver *S, ref<Expr> e,
+ ref<Expr> value) {
+ assert(value.isConstant() && "non-constant in place of constant");
+
+ std::vector<ref<ReadExpr> > reads;
+ std::map<ref<ReadExpr>, ref<Expr> > found;
+ ImpliedValueList results;
+
+ getImpliedValues(e, value, results);
+
+ for (ImpliedValueList::iterator i = results.begin(), ie = results.end();
+ i != ie; ++i) {
+ std::map<ref<ReadExpr>, ref<Expr> >::iterator it = found.find(i->first);
+ if (it != found.end()) {
+ assert(it->second.getConstantValue() == i->second.getConstantValue() &&
+ "I don't think so Scott");
+ } else {
+ found.insert(std::make_pair(i->first, i->second));
+ }
+ }
+
+ findReads(e, false, reads);
+ std::set< ref<ReadExpr> > readsSet(reads.begin(), reads.end());
+ reads = std::vector< ref<ReadExpr> >(readsSet.begin(), readsSet.end());
+
+ std::vector<ref<Expr> > assumption;
+ assumption.push_back(EqExpr::create(e, value));
+
+ // obscure... we need to make sure that all the read indices are
+ // bounds checked. if we don't do this we can end up constructing
+ // invalid counterexamples because STP will happily make out of
+ // bounds indices which will not get picked up. this is of utmost
+ // importance if we are being backed by the CexCachingSolver.
+
+ for (std::vector< ref<ReadExpr> >::iterator i = reads.begin(),
+ ie = reads.end(); i != ie; ++i) {
+ ReadExpr *re = i->get();
+ ref<Expr> size = ref<Expr>(re->updates.root->size, kMachinePointerType);
+ assumption.push_back(UltExpr::create(re->index, size));
+ }
+
+ ConstraintManager assume(assumption);
+ for (std::vector< ref<ReadExpr> >::iterator i = reads.begin(),
+ ie = reads.end(); i != ie; ++i) {
+ ref<ReadExpr> var = *i;
+ ref<Expr> possible;
+ bool success = S->getValue(Query(assume, var), possible);
+ assert(success && "FIXME: Unhandled solver failure");
+ std::map<ref<ReadExpr>, ref<Expr> >::iterator it = found.find(var);
+ bool res;
+ success = S->mustBeTrue(Query(assume, EqExpr::create(var, possible)), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ if (it != found.end()) {
+ assert(possible.getConstantValue() == it->second.getConstantValue());
+ found.erase(it);
+ }
+ } else {
+ if (it!=found.end()) {
+ ref<Expr> binding = it->second;
+ llvm::cerr << "checkForImpliedValues: " << e << " = " << value << "\n"
+ << "\t\t implies " << var << " == " << binding
+ << " (error)\n";
+ assert(0);
+ }
+ }
+ }
+
+ assert(found.empty());
+}
diff --git a/lib/Core/ImpliedValue.h b/lib/Core/ImpliedValue.h
new file mode 100644
index 00000000..51ec6e9b
--- /dev/null
+++ b/lib/Core/ImpliedValue.h
@@ -0,0 +1,38 @@
+//===-- ImpliedValue.h ------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_IMPLIEDVALUE_H
+#define KLEE_IMPLIEDVALUE_H
+
+#include "klee/Expr.h"
+
+#include <vector>
+
+// The idea of implied values is that often we know the result of some
+// expression e is a concrete value C. In many cases this directly
+// implies that some variable x embedded in e is also a concrete value
+// (derived from C). This module is used for finding such variables
+// and their computed values.
+
+namespace klee {
+ class ConstantExpr;
+ class Expr;
+ class ReadExpr;
+ class Solver;
+
+ typedef std::vector< std::pair<ref<ReadExpr>, ref<Expr> > > ImpliedValueList;
+
+ namespace ImpliedValue {
+ void getImpliedValues(ref<Expr> e, ref<Expr> cvalue, ImpliedValueList &result);
+ void checkForImpliedValues(Solver *S, ref<Expr> e, ref<Expr> cvalue);
+ }
+
+}
+
+#endif
diff --git a/lib/Core/Makefile b/lib/Core/Makefile
new file mode 100755
index 00000000..4da3c7ea
--- /dev/null
+++ b/lib/Core/Makefile
@@ -0,0 +1,16 @@
+#===-- lib/Core/Makefile -----------------------------------*- Makefile -*--===#
+#
+# The KLEE Symbolic Virtual Machine
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===------------------------------------------------------------------------===#
+
+LEVEL=../..
+
+LIBRARYNAME=kleeCore
+DONT_BUILD_RELINKED=1
+BUILD_ARCHIVE=1
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Core/Memory.cpp b/lib/Core/Memory.cpp
new file mode 100644
index 00000000..cd563551
--- /dev/null
+++ b/lib/Core/Memory.cpp
@@ -0,0 +1,812 @@
+//===-- Memory.cpp --------------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "Memory.h"
+
+#include "klee/Expr.h"
+#include "klee/Machine.h"
+#include "klee/Solver.h"
+#include "klee/util/BitArray.h"
+
+#include "ObjectHolder.h"
+
+#include <llvm/Function.h>
+#include <llvm/Instruction.h>
+#include <llvm/Value.h>
+
+#include <iostream>
+#include <cassert>
+#include <sstream>
+
+using namespace llvm;
+using namespace klee;
+
+/***/
+
+ObjectHolder::ObjectHolder(const ObjectHolder &b) : os(b.os) {
+ if (os) ++os->refCount;
+}
+
+ObjectHolder::ObjectHolder(ObjectState *_os) : os(_os) {
+ if (os) ++os->refCount;
+}
+
+ObjectHolder::~ObjectHolder() {
+ if (os && --os->refCount==0) delete os;
+}
+
+ObjectHolder &ObjectHolder::operator=(const ObjectHolder &b) {
+ if (b.os) ++b.os->refCount;
+ if (os && --os->refCount==0) delete os;
+ os = b.os;
+ return *this;
+}
+
+/***/
+
+int MemoryObject::counter = 0;
+
+extern "C" void vc_DeleteExpr(void*);
+
+MemoryObject::~MemoryObject() {
+ // FIXME: This shouldn't be necessary. Array's should be ref-counted
+ // just like everything else, and the interaction with the STP array
+ // should hide at least inside the Expr/Solver layers.
+ if (array) {
+ if (array->stpInitialArray) {
+ ::vc_DeleteExpr(array->stpInitialArray);
+ array->stpInitialArray = 0;
+ }
+ delete array;
+ }
+}
+
+void MemoryObject::getAllocInfo(std::string &result) const {
+ std::ostringstream info;
+
+ info << "MO" << id << "[" << size << "]";
+
+ if (allocSite) {
+ info << " allocated at ";
+ if (const Instruction *i = dyn_cast<Instruction>(allocSite)) {
+ info << i->getParent()->getParent()->getName() << "():";
+ info << *i;
+ } else if (const GlobalValue *gv = dyn_cast<GlobalValue>(allocSite)) {
+ info << "global:" << gv->getName();
+ } else {
+ info << "value:" << *allocSite;
+ }
+ } else {
+ info << " (no allocation info)";
+ }
+
+ result = info.str();
+}
+
+/***/
+
+ObjectState::ObjectState(const MemoryObject *mo, unsigned _size)
+ : copyOnWriteOwner(0),
+ refCount(0),
+ object(mo),
+ concreteStore(new uint8_t[_size]),
+ concreteMask(0),
+ flushMask(0),
+ knownSymbolics(0),
+ size(_size),
+ updates(mo->array, false, 0),
+ readOnly(false) {
+}
+
+ObjectState::ObjectState(const ObjectState &os)
+ : copyOnWriteOwner(0),
+ refCount(0),
+ object(os.object),
+ concreteStore(new uint8_t[os.size]),
+ concreteMask(os.concreteMask ? new BitArray(*os.concreteMask, os.size) : 0),
+ flushMask(os.flushMask ? new BitArray(*os.flushMask, os.size) : 0),
+ knownSymbolics(0),
+ size(os.size),
+ updates(os.updates),
+ readOnly(false) {
+ assert(!os.readOnly && "no need to copy read only object?");
+
+ if (os.knownSymbolics) {
+ knownSymbolics = new ref<Expr>[size];
+ for (unsigned i=0; i<size; i++)
+ knownSymbolics[i] = os.knownSymbolics[i];
+ }
+
+ memcpy(concreteStore, os.concreteStore, size*sizeof(*concreteStore));
+}
+
+ObjectState::~ObjectState() {
+ if (concreteMask) delete concreteMask;
+ if (flushMask) delete flushMask;
+ if (knownSymbolics) delete[] knownSymbolics;
+ delete[] concreteStore;
+}
+
+/***/
+
+void ObjectState::makeConcrete() {
+ if (concreteMask) delete concreteMask;
+ if (flushMask) delete flushMask;
+ if (knownSymbolics) delete[] knownSymbolics;
+ concreteMask = 0;
+ flushMask = 0;
+ knownSymbolics = 0;
+}
+
+void ObjectState::makeSymbolic() {
+ assert(!updates.head &&
+ "XXX makeSymbolic of objects with symbolic values is unsupported");
+ updates.isRooted = true;
+
+ // XXX simplify this, can just delete various arrays I guess
+ for (unsigned i=0; i<size; i++) {
+ markByteSymbolic(i);
+ setKnownSymbolic(i, 0);
+ markByteFlushed(i);
+ }
+}
+
+void ObjectState::initializeToZero() {
+ makeConcrete();
+ memset(concreteStore, 0, size);
+}
+
+void ObjectState::initializeToRandom() {
+ makeConcrete();
+ for (unsigned i=0; i<size; i++) {
+ // randomly selected by 256 sided die
+ concreteStore[i] = 0xAB;
+ }
+}
+
+/*
+Cache Invariants
+--
+isByteKnownSymbolic(i) => !isByteConcrete(i)
+isByteConcrete(i) => !isByteKnownSymbolic(i)
+!isByteFlushed(i) => (isByteConcrete(i) || isByteKnownSymbolic(i))
+ */
+
+void ObjectState::fastRangeCheckOffset(ref<Expr> offset,
+ unsigned *base_r,
+ unsigned *size_r) const {
+ *base_r = 0;
+ *size_r = size;
+}
+
+void ObjectState::flushRangeForRead(unsigned rangeBase,
+ unsigned rangeSize) const {
+ if (!flushMask) flushMask = new BitArray(size, true);
+
+ for (unsigned offset=rangeBase; offset<rangeBase+rangeSize; offset++) {
+ if (!isByteFlushed(offset)) {
+ if (isByteConcrete(offset)) {
+ updates.extend(ConstantExpr::create(offset, kMachinePointerType),
+ ConstantExpr::create(concreteStore[offset], Expr::Int8));
+ } else {
+ assert(isByteKnownSymbolic(offset) && "invalid bit set in flushMask");
+ updates.extend(ConstantExpr::create(offset, kMachinePointerType),
+ knownSymbolics[offset]);
+ }
+
+ flushMask->unset(offset);
+ }
+ }
+}
+
+void ObjectState::flushRangeForWrite(unsigned rangeBase,
+ unsigned rangeSize) {
+ if (!flushMask) flushMask = new BitArray(size, true);
+
+ for (unsigned offset=rangeBase; offset<rangeBase+rangeSize; offset++) {
+ if (!isByteFlushed(offset)) {
+ if (isByteConcrete(offset)) {
+ updates.extend(ConstantExpr::create(offset, kMachinePointerType),
+ ConstantExpr::create(concreteStore[offset], Expr::Int8));
+ markByteSymbolic(offset);
+ } else {
+ assert(isByteKnownSymbolic(offset) && "invalid bit set in flushMask");
+ updates.extend(ConstantExpr::create(offset, kMachinePointerType),
+ knownSymbolics[offset]);
+ setKnownSymbolic(offset, 0);
+ }
+
+ flushMask->unset(offset);
+ } else {
+ // flushed bytes that are written over still need
+ // to be marked out
+ if (isByteConcrete(offset)) {
+ markByteSymbolic(offset);
+ } else if (isByteKnownSymbolic(offset)) {
+ setKnownSymbolic(offset, 0);
+ }
+ }
+ }
+}
+
+bool ObjectState::isByteConcrete(unsigned offset) const {
+ return !concreteMask || concreteMask->get(offset);
+}
+
+bool ObjectState::isByteFlushed(unsigned offset) const {
+ return flushMask && !flushMask->get(offset);
+}
+
+bool ObjectState::isByteKnownSymbolic(unsigned offset) const {
+ return knownSymbolics && knownSymbolics[offset].get();
+}
+
+void ObjectState::markByteConcrete(unsigned offset) {
+ if (concreteMask)
+ concreteMask->set(offset);
+}
+
+void ObjectState::markByteSymbolic(unsigned offset) {
+ if (!concreteMask)
+ concreteMask = new BitArray(size, true);
+ concreteMask->unset(offset);
+}
+
+void ObjectState::markByteUnflushed(unsigned offset) {
+ if (flushMask)
+ flushMask->set(offset);
+}
+
+void ObjectState::markByteFlushed(unsigned offset) {
+ if (!flushMask) {
+ flushMask = new BitArray(size, false);
+ } else {
+ flushMask->unset(offset);
+ }
+}
+
+void ObjectState::setKnownSymbolic(unsigned offset,
+ Expr *value /* can be null */) {
+ if (knownSymbolics) {
+ knownSymbolics[offset] = value;
+ } else {
+ if (value) {
+ knownSymbolics = new ref<Expr>[size];
+ knownSymbolics[offset] = value;
+ }
+ }
+}
+
+/***/
+
+ref<Expr> ObjectState::read8(unsigned offset) const {
+ if (isByteConcrete(offset)) {
+ return ConstantExpr::create(concreteStore[offset], Expr::Int8);
+ } else if (isByteKnownSymbolic(offset)) {
+ return knownSymbolics[offset];
+ } else {
+ assert(isByteFlushed(offset) && "unflushed byte without cache value");
+
+ return ReadExpr::create(updates,
+ ConstantExpr::create(offset, kMachinePointerType));
+ }
+}
+
+ref<Expr> ObjectState::read8(ref<Expr> offset) const {
+ assert(!offset.isConstant() && "constant offset passed to symbolic read8");
+ unsigned base, size;
+ fastRangeCheckOffset(offset, &base, &size);
+ flushRangeForRead(base, size);
+
+ if (size>4096) {
+ std::string allocInfo;
+ object->getAllocInfo(allocInfo);
+ klee_warning_once(0, "flushing %d bytes on read, may be slow and/or crash: %s",
+ size,
+ allocInfo.c_str());
+ }
+
+ return ReadExpr::create(updates, offset);
+}
+
+void ObjectState::write8(unsigned offset, uint8_t value) {
+ //assert(read_only == false && "writing to read-only object!");
+ concreteStore[offset] = value;
+ setKnownSymbolic(offset, 0);
+
+ markByteConcrete(offset);
+ markByteUnflushed(offset);
+}
+
+void ObjectState::write8(unsigned offset, ref<Expr> value) {
+ // can happen when ExtractExpr special cases
+ if (value.isConstant()) {
+ write8(offset, (uint8_t) value.getConstantValue());
+ } else {
+ setKnownSymbolic(offset, value.get());
+
+ markByteSymbolic(offset);
+ markByteUnflushed(offset);
+ }
+}
+
+void ObjectState::write8(ref<Expr> offset, ref<Expr> value) {
+ assert(!offset.isConstant() && "constant offset passed to symbolic write8");
+ unsigned base, size;
+ fastRangeCheckOffset(offset, &base, &size);
+ flushRangeForWrite(base, size);
+
+ if (size>4096) {
+ std::string allocInfo;
+ object->getAllocInfo(allocInfo);
+ klee_warning_once(0, "flushing %d bytes on read, may be slow and/or crash: %s",
+ size,
+ allocInfo.c_str());
+ }
+
+ updates.extend(offset, value);
+}
+
+/***/
+
+ref<Expr> ObjectState::read(ref<Expr> offset, Expr::Width width) const {
+ if (offset.isConstant()) {
+ return read((unsigned) offset.getConstantValue(), width);
+ } else {
+ switch (width) {
+ case Expr::Bool: return read1(offset);
+ case Expr::Int8: return read8(offset);
+ case Expr::Int16: return read16(offset);
+ case Expr::Int32: return read32(offset);
+ case Expr::Int64: return read64(offset);
+ default: assert(0 && "invalid type");
+ }
+ }
+}
+
+ref<Expr> ObjectState::read(unsigned offset, Expr::Width width) const {
+ switch (width) {
+ case Expr::Bool: return read1(offset);
+ case Expr::Int8: return read8(offset);
+ case Expr::Int16: return read16(offset);
+ case Expr::Int32: return read32(offset);
+ case Expr::Int64: return read64(offset);
+ default: assert(0 && "invalid type");
+ }
+}
+
+ref<Expr> ObjectState::read1(unsigned offset) const {
+ return ExtractExpr::createByteOff(read8(offset), 0, Expr::Bool);
+}
+
+ref<Expr> ObjectState::read1(ref<Expr> offset) const {
+ return ExtractExpr::createByteOff(read8(offset), 0, Expr::Bool);
+}
+
+ref<Expr> ObjectState::read16(unsigned offset) const {
+ if (kMachineByteOrder == machine::MSB) {
+ return ConcatExpr::create(read8(offset+0),
+ read8(offset+1));
+ } else {
+ return ConcatExpr::create(read8(offset+1),
+ read8(offset+0));
+ }
+}
+
+ref<Expr> ObjectState::read16(ref<Expr> offset) const {
+ if (kMachineByteOrder == machine::MSB) {
+ return ConcatExpr::create
+ (read8(AddExpr::create(offset,
+ ConstantExpr::create(0,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(1,
+ kMachinePointerType))));
+ } else {
+ return ConcatExpr::create
+ (read8(AddExpr::create(offset,
+ ConstantExpr::create(1,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(0,
+ kMachinePointerType))));
+ }
+}
+
+ref<Expr> ObjectState::read32(unsigned offset) const {
+ if (kMachineByteOrder == machine::MSB) {
+ return ConcatExpr::create4(read8(offset+0),
+ read8(offset+1),
+ read8(offset+2),
+ read8(offset+3));
+ } else {
+ return ConcatExpr::create4(read8(offset+3),
+ read8(offset+2),
+ read8(offset+1),
+ read8(offset+0));
+ }
+}
+
+ref<Expr> ObjectState::read32(ref<Expr> offset) const {
+ if (kMachineByteOrder == machine::MSB) {
+ return ConcatExpr::create4
+ (read8(AddExpr::create(offset,
+ ConstantExpr::create(0,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(1,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(2,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(3,
+ kMachinePointerType))));
+ } else {
+ return ConcatExpr::create4
+ (read8(AddExpr::create(offset,
+ ConstantExpr::create(3,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(2,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(1,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(0,
+ kMachinePointerType))));
+ }
+}
+
+ref<Expr> ObjectState::read64(unsigned offset) const {
+ if (kMachineByteOrder == machine::MSB) {
+ return ConcatExpr::create8(read8(offset+0),
+ read8(offset+1),
+ read8(offset+2),
+ read8(offset+3),
+ read8(offset+4),
+ read8(offset+5),
+ read8(offset+6),
+ read8(offset+7));
+ } else {
+ return ConcatExpr::create8(read8(offset+7),
+ read8(offset+6),
+ read8(offset+5),
+ read8(offset+4),
+ read8(offset+3),
+ read8(offset+2),
+ read8(offset+1),
+ read8(offset+0));
+ }
+}
+
+ref<Expr> ObjectState::read64(ref<Expr> offset) const {
+ if (kMachineByteOrder == machine::MSB) {
+ return ConcatExpr::create8
+ (read8(AddExpr::create(offset,
+ ConstantExpr::create(0,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(1,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(2,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(3,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(4,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(5,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(6,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(7,
+ kMachinePointerType))));
+ } else {
+ return ConcatExpr::create8
+ (read8(AddExpr::create(offset,
+ ConstantExpr::create(7,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(6,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(5,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(4,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(3,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(2,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(1,
+ kMachinePointerType))),
+ read8(AddExpr::create(offset,
+ ConstantExpr::create(0,
+ kMachinePointerType))));
+ }
+}
+
+void ObjectState::write(ref<Expr> offset, ref<Expr> value) {
+ Expr::Width w = value.getWidth();
+ if (offset.isConstant()) {
+ write(offset.getConstantValue(), value);
+ } else {
+ switch(w) {
+ case Expr::Bool: write1(offset, value); break;
+ case Expr::Int8: write8(offset, value); break;
+ case Expr::Int16: write16(offset, value); break;
+ case Expr::Int32: write32(offset, value); break;
+ case Expr::Int64: write64(offset, value); break;
+ default: assert(0 && "invalid number of bytes in write");
+ }
+ }
+}
+
+void ObjectState::write(unsigned offset, ref<Expr> value) {
+ Expr::Width w = value.getWidth();
+ if (value.isConstant()) {
+ uint64_t val = value.getConstantValue();
+ switch(w) {
+ case Expr::Bool:
+ case Expr::Int8: write8(offset, val); break;
+ case Expr::Int16: write16(offset, val); break;
+ case Expr::Int32: write32(offset, val); break;
+ case Expr::Int64: write64(offset, val); break;
+ default: assert(0 && "invalid number of bytes in write");
+ }
+ } else {
+ switch(w) {
+ case Expr::Bool: write1(offset, value); break;
+ case Expr::Int8: write8(offset, value); break;
+ case Expr::Int16: write16(offset, value); break;
+ case Expr::Int32: write32(offset, value); break;
+ case Expr::Int64: write64(offset, value); break;
+ default: assert(0 && "invalid number of bytes in write");
+ }
+ }
+}
+
+void ObjectState::write1(unsigned offset, ref<Expr> value) {
+ write8(offset, ZExtExpr::create(value, Expr::Int8));
+}
+
+void ObjectState::write1(ref<Expr> offset, ref<Expr> value) {
+ write8(offset, ZExtExpr::create(value, Expr::Int8));
+}
+
+void ObjectState::write16(unsigned offset, uint16_t value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(offset+0, (uint8_t) (value >> 8));
+ write8(offset+1, (uint8_t) (value >> 0));
+ } else {
+ write8(offset+1, (uint8_t) (value >> 8));
+ write8(offset+0, (uint8_t) (value >> 0));
+ }
+}
+
+void ObjectState::write16(unsigned offset, ref<Expr> value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(offset+0, ExtractExpr::createByteOff(value, 1));
+ write8(offset+1, ExtractExpr::createByteOff(value, 0));
+ } else {
+ write8(offset+1, ExtractExpr::createByteOff(value, 1));
+ write8(offset+0, ExtractExpr::createByteOff(value, 0));
+ }
+}
+
+
+void ObjectState::write16(ref<Expr> offset, ref<Expr> value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,1));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,0));
+ } else {
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(1, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,1));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,0));
+ }
+}
+
+void ObjectState::write32(unsigned offset, uint32_t value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(offset+0, (uint8_t) (value >> 24));
+ write8(offset+1, (uint8_t) (value >> 16));
+ write8(offset+2, (uint8_t) (value >> 8));
+ write8(offset+3, (uint8_t) (value >> 0));
+ } else {
+ write8(offset+3, (uint8_t) (value >> 24));
+ write8(offset+2, (uint8_t) (value >> 16));
+ write8(offset+1, (uint8_t) (value >> 8));
+ write8(offset+0, (uint8_t) (value >> 0));
+ }
+}
+
+void ObjectState::write32(unsigned offset, ref<Expr> value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(offset+0, ExtractExpr::createByteOff(value, 3));
+ write8(offset+1, ExtractExpr::createByteOff(value, 2));
+ write8(offset+2, ExtractExpr::createByteOff(value, 1));
+ write8(offset+3, ExtractExpr::createByteOff(value, 0));
+ } else {
+ write8(offset+3, ExtractExpr::createByteOff(value, 3));
+ write8(offset+2, ExtractExpr::createByteOff(value, 2));
+ write8(offset+1, ExtractExpr::createByteOff(value, 1));
+ write8(offset+0, ExtractExpr::createByteOff(value, 0));
+ }
+}
+
+void ObjectState::write32(ref<Expr> offset, ref<Expr> value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,3));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(1, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,2));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(2, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,1));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(3, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,0));
+ } else {
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(3, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,3));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(2, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,2));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(1, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,1));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,0));
+ }
+}
+
+void ObjectState::write64(unsigned offset, uint64_t value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(offset+0, (uint8_t) (value >> 56));
+ write8(offset+1, (uint8_t) (value >> 48));
+ write8(offset+2, (uint8_t) (value >> 40));
+ write8(offset+3, (uint8_t) (value >> 32));
+ write8(offset+4, (uint8_t) (value >> 24));
+ write8(offset+5, (uint8_t) (value >> 16));
+ write8(offset+6, (uint8_t) (value >> 8));
+ write8(offset+7, (uint8_t) (value >> 0));
+ } else {
+ write8(offset+7, (uint8_t) (value >> 56));
+ write8(offset+6, (uint8_t) (value >> 48));
+ write8(offset+5, (uint8_t) (value >> 40));
+ write8(offset+4, (uint8_t) (value >> 32));
+ write8(offset+3, (uint8_t) (value >> 24));
+ write8(offset+2, (uint8_t) (value >> 16));
+ write8(offset+1, (uint8_t) (value >> 8));
+ write8(offset+0, (uint8_t) (value >> 0));
+ }
+}
+
+void ObjectState::write64(unsigned offset, ref<Expr> value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(offset+0, ExtractExpr::createByteOff(value, 7));
+ write8(offset+1, ExtractExpr::createByteOff(value, 6));
+ write8(offset+2, ExtractExpr::createByteOff(value, 5));
+ write8(offset+3, ExtractExpr::createByteOff(value, 4));
+ write8(offset+4, ExtractExpr::createByteOff(value, 3));
+ write8(offset+5, ExtractExpr::createByteOff(value, 2));
+ write8(offset+6, ExtractExpr::createByteOff(value, 1));
+ write8(offset+7, ExtractExpr::createByteOff(value, 0));
+ } else {
+ write8(offset+7, ExtractExpr::createByteOff(value, 7));
+ write8(offset+6, ExtractExpr::createByteOff(value, 6));
+ write8(offset+5, ExtractExpr::createByteOff(value, 5));
+ write8(offset+4, ExtractExpr::createByteOff(value, 4));
+ write8(offset+3, ExtractExpr::createByteOff(value, 3));
+ write8(offset+2, ExtractExpr::createByteOff(value, 2));
+ write8(offset+1, ExtractExpr::createByteOff(value, 1));
+ write8(offset+0, ExtractExpr::createByteOff(value, 0));
+ }
+}
+
+void ObjectState::write64(ref<Expr> offset, ref<Expr> value) {
+ if (kMachineByteOrder == machine::MSB) {
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,7));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(1, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,6));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(2, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,5));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(3, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,4));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(4, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,3));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(5, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,2));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(6, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,1));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(7, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,0));
+ } else {
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(7, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,7));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(6, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,6));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(5, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,5));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(4, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,4));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(3, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,3));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(2, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,2));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(1, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,1));
+ write8(AddExpr::create(offset,
+ ConstantExpr::create(0, kMachinePointerType)),
+ ExtractExpr::createByteOff(value,0));
+ }
+}
+
+void ObjectState::print() {
+ llvm::cerr << "-- ObjectState --\n";
+ llvm::cerr << "\tMemoryObject ID: " << object->id << "\n";
+ llvm::cerr << "\tRoot Object: " << updates.root << "\n";
+ llvm::cerr << "\tIs Rooted? " << updates.isRooted << "\n";
+ llvm::cerr << "\tSize: " << size << "\n";
+
+ llvm::cerr << "\tBytes:\n";
+ for (unsigned i=0; i<size; i++) {
+ llvm::cerr << "\t\t["<<i<<"]"
+ << " concrete? " << isByteConcrete(i)
+ << " known-sym? " << isByteKnownSymbolic(i)
+ << " flushed? " << isByteFlushed(i) << " = ";
+ ref<Expr> e = read8(i);
+ llvm::cerr << e << "\n";
+ }
+
+ llvm::cerr << "\tUpdates:\n";
+ for (const UpdateNode *un=updates.head; un; un=un->next) {
+ llvm::cerr << "\t\t[" << un->index << "] = " << un->value << "\n";
+ }
+}
diff --git a/lib/Core/Memory.h b/lib/Core/Memory.h
new file mode 100644
index 00000000..0f09b162
--- /dev/null
+++ b/lib/Core/Memory.h
@@ -0,0 +1,239 @@
+//===-- Memory.h ------------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_MEMORY_H
+#define KLEE_MEMORY_H
+
+#include "klee/Expr.h"
+
+#include <vector>
+#include <string>
+
+namespace llvm {
+ class Value;
+}
+
+namespace klee {
+
+class BitArray;
+class MemoryManager;
+class Solver;
+
+class MemoryObject {
+ friend class STPBuilder;
+
+private:
+ static int counter;
+
+public:
+ unsigned id;
+ uint64_t address;
+ Array *array;
+
+ /// size in bytes
+ unsigned size;
+ std::string name;
+
+ bool isLocal;
+ bool isGlobal;
+ bool isFixed;
+
+ /// true if created by us.
+ bool fake_object;
+ bool isUserSpecified;
+
+ /// "Location" for which this memory object was allocated. This
+ /// should be either the allocating instruction or the global object
+ /// it was allocated for (or whatever else makes sense).
+ const llvm::Value *allocSite;
+
+ /// A list of boolean expressions the user has requested be true of
+ /// a counterexample. Mutable since we play a little fast and loose
+ /// with allowing it to be added to during execution (although
+ /// should sensibly be only at creation time).
+ mutable std::vector< ref<Expr> > cexPreferences;
+
+ // DO NOT IMPLEMENT
+ MemoryObject(const MemoryObject &b);
+ MemoryObject &operator=(const MemoryObject &b);
+
+public:
+ // XXX this is just a temp hack, should be removed
+ explicit
+ MemoryObject(uint64_t _address)
+ : id(counter++),
+ address(_address),
+ array(new Array(this, 0, id)),
+ size(0),
+ isFixed(true),
+ allocSite(0) {
+ }
+
+ MemoryObject(uint64_t _address, unsigned _size,
+ bool _isLocal, bool _isGlobal, bool _isFixed,
+ const llvm::Value *_allocSite)
+ : id(counter++),
+ address(_address),
+ array(new Array(this, id, _size)),
+ size(_size),
+ name("unnamed"),
+ isLocal(_isLocal),
+ isGlobal(_isGlobal),
+ isFixed(_isFixed),
+ fake_object(false),
+ isUserSpecified(false),
+ allocSite(_allocSite) {
+ }
+
+ ~MemoryObject();
+
+ /// Get an identifying string for this allocation.
+ void getAllocInfo(std::string &result) const;
+
+ void setName(std::string name) {
+ this->name = name;
+ }
+
+ ref<Expr> getBaseExpr() const {
+ return ConstantExpr::create(address, kMachinePointerType);
+ }
+ ref<Expr> getSizeExpr() const {
+ return ConstantExpr::create(size, kMachinePointerType);
+ }
+ ref<Expr> getOffsetExpr(ref<Expr> pointer) const {
+ return SubExpr::create(pointer, getBaseExpr());
+ }
+ ref<Expr> getBoundsCheckPointer(ref<Expr> pointer) const {
+ return getBoundsCheckOffset(getOffsetExpr(pointer));
+ }
+ ref<Expr> getBoundsCheckPointer(ref<Expr> pointer, unsigned bytes) const {
+ return getBoundsCheckOffset(getOffsetExpr(pointer), bytes);
+ }
+
+ ref<Expr> getBoundsCheckOffset(ref<Expr> offset) const {
+ if (size==0) {
+ return EqExpr::create(offset, ref<Expr>(0, kMachinePointerType));
+ } else {
+ return UltExpr::create(offset, getSizeExpr());
+ }
+ }
+ ref<Expr> getBoundsCheckOffset(ref<Expr> offset, unsigned bytes) const {
+ if (bytes<=size) {
+ return UltExpr::create(offset,
+ ref<Expr>(size - bytes + 1, kMachinePointerType));
+ } else {
+ return ref<Expr>(0, Expr::Bool);
+ }
+ }
+};
+
+class ObjectState {
+private:
+ friend class AddressSpace;
+ unsigned copyOnWriteOwner; // exclusively for AddressSpace
+
+ friend class ObjectHolder;
+ unsigned refCount;
+
+ const MemoryObject *object;
+
+ uint8_t *concreteStore;
+ // XXX cleanup name of flushMask (its backwards or something)
+ BitArray *concreteMask;
+
+ // mutable because may need flushed during read of const
+ mutable BitArray *flushMask;
+
+ ref<Expr> *knownSymbolics;
+
+public:
+ unsigned size;
+
+ // mutable because we may need flush during read of const
+ mutable UpdateList updates;
+
+ bool readOnly;
+
+public:
+ // initial contents are undefined but concrete, it is the creators
+ // responsibility to initialize the object contents appropriate
+ ObjectState(const MemoryObject *mo, unsigned size);
+ ObjectState(const ObjectState &os);
+ ~ObjectState();
+
+ const MemoryObject *getObject() const { return object; }
+
+ void setReadOnly(bool ro) { readOnly = ro; }
+
+ // make all bytes are concrete with undefined values
+ void makeConcrete();
+
+ void makeSymbolic();
+
+ // make contents all concrete and zero
+ void initializeToZero();
+ // make contents all concrete and random
+ void initializeToRandom();
+
+ ref<Expr> read(ref<Expr> offset, Expr::Width width) const;
+ ref<Expr> read(unsigned offset, Expr::Width width) const;
+ ref<Expr> read1(unsigned offset) const;
+ ref<Expr> read8(unsigned offset) const;
+ ref<Expr> read16(unsigned offset) const;
+ ref<Expr> read32(unsigned offset) const;
+ ref<Expr> read64(unsigned offset) const;
+
+ // return bytes written.
+ void write(unsigned offset, ref<Expr> value);
+ void write(ref<Expr> offset, ref<Expr> value);
+
+ void write8(unsigned offset, uint8_t value);
+ void write16(unsigned offset, uint16_t value);
+ void write32(unsigned offset, uint32_t value);
+ void write64(unsigned offset, uint64_t value);
+
+private:
+ ref<Expr> read1(ref<Expr> offset) const;
+ ref<Expr> read8(ref<Expr> offset) const;
+ ref<Expr> read16(ref<Expr> offset) const;
+ ref<Expr> read32(ref<Expr> offset) const;
+ ref<Expr> read64(ref<Expr> offset) const;
+
+ void write1(unsigned offset, ref<Expr> value);
+ void write1(ref<Expr> offset, ref<Expr> value);
+ void write8(unsigned offset, ref<Expr> value);
+ void write8(ref<Expr> offset, ref<Expr> value);
+ void write16(unsigned offset, ref<Expr> value);
+ void write16(ref<Expr> offset, ref<Expr> value);
+ void write32(unsigned offset, ref<Expr> value);
+ void write32(ref<Expr> offset, ref<Expr> value);
+ void write64(unsigned offset, ref<Expr> value);
+ void write64(ref<Expr> offset, ref<Expr> value);
+
+
+ void fastRangeCheckOffset(ref<Expr> offset, unsigned *base_r, unsigned *size_r) const;
+ void flushRangeForRead(unsigned rangeBase, unsigned rangeSize) const;
+ void flushRangeForWrite(unsigned rangeBase, unsigned rangeSize);
+
+ bool isByteConcrete(unsigned offset) const;
+ bool isByteFlushed(unsigned offset) const;
+ bool isByteKnownSymbolic(unsigned offset) const;
+
+ void markByteConcrete(unsigned offset);
+ void markByteSymbolic(unsigned offset);
+ void markByteFlushed(unsigned offset);
+ void markByteUnflushed(unsigned offset);
+ void setKnownSymbolic(unsigned offset, Expr *value);
+
+ void print();
+};
+
+} // End klee namespace
+
+#endif
diff --git a/lib/Core/MemoryManager.cpp b/lib/Core/MemoryManager.cpp
new file mode 100644
index 00000000..cec7b7d1
--- /dev/null
+++ b/lib/Core/MemoryManager.cpp
@@ -0,0 +1,69 @@
+//===-- MemoryManager.cpp -------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "CoreStats.h"
+#include "Memory.h"
+#include "MemoryManager.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Expr.h"
+#include "klee/Solver.h"
+
+#include "llvm/Support/CommandLine.h"
+
+using namespace klee;
+
+/***/
+
+MemoryManager::~MemoryManager() {
+ while (!objects.empty()) {
+ MemoryObject *mo = objects.back();
+ objects.pop_back();
+ delete mo;
+ }
+}
+
+MemoryObject *MemoryManager::allocate(uint64_t size, bool isLocal, bool isGlobal,
+ const llvm::Value *allocSite) {
+ if (size>10*1024*1024) {
+ klee_warning_once(0, "failing large alloc: %u bytes", (unsigned) size);
+ return 0;
+ }
+ uint64_t address = (uint64_t) (unsigned long) malloc((unsigned) size);
+ if (!address)
+ return 0;
+
+ ++stats::allocations;
+ MemoryObject *res = new MemoryObject(address, size, isLocal, isGlobal, false,
+ allocSite);
+ objects.push_back(res);
+ return res;
+}
+
+MemoryObject *MemoryManager::allocateFixed(uint64_t address, uint64_t size,
+ const llvm::Value *allocSite) {
+ for (objects_ty::iterator it = objects.begin(), ie = objects.end();
+ it != ie; ++it) {
+ MemoryObject *mo = *it;
+ assert(!(address+size > mo->address && address < mo->address+mo->size) &&
+ "allocated an overlapping object");
+ }
+
+ ++stats::allocations;
+ MemoryObject *res = new MemoryObject(address, size, false, true, true,
+ allocSite);
+ objects.push_back(res);
+ return res;
+}
+
+void MemoryManager::deallocate(const MemoryObject *mo) {
+ assert(0);
+}
diff --git a/lib/Core/MemoryManager.h b/lib/Core/MemoryManager.h
new file mode 100644
index 00000000..adb2ba22
--- /dev/null
+++ b/lib/Core/MemoryManager.h
@@ -0,0 +1,41 @@
+//===-- MemoryManager.h -----------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_MEMORYMANAGER_H
+#define KLEE_MEMORYMANAGER_H
+
+#include <vector>
+#include <stdint.h>
+
+namespace llvm {
+ class Value;
+}
+
+namespace klee {
+ class MemoryObject;
+
+ class MemoryManager {
+ private:
+ typedef std::vector<MemoryObject*> objects_ty;
+ objects_ty objects;
+
+ public:
+ MemoryManager() {}
+ ~MemoryManager();
+
+ MemoryObject *allocate(uint64_t size, bool isLocal, bool isGlobal,
+ const llvm::Value *allocSite);
+ MemoryObject *allocateFixed(uint64_t address, uint64_t size,
+ const llvm::Value *allocSite);
+ void deallocate(const MemoryObject *mo);
+ };
+
+} // End klee namespace
+
+#endif
diff --git a/lib/Core/ObjectHolder.h b/lib/Core/ObjectHolder.h
new file mode 100644
index 00000000..abf2c6f0
--- /dev/null
+++ b/lib/Core/ObjectHolder.h
@@ -0,0 +1,33 @@
+//===-- ObjectHolder.h ------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_OBJECTHOLDER_H
+#define KLEE_OBJECTHOLDER_H
+
+namespace klee {
+ class ObjectState;
+
+ class ObjectHolder {
+ ObjectState *os;
+
+ public:
+ ObjectHolder() : os(0) {}
+ ObjectHolder(ObjectState *_os);
+ ObjectHolder(const ObjectHolder &b);
+ ~ObjectHolder();
+
+ ObjectHolder &operator=(const ObjectHolder &b);
+
+ operator class ObjectState *() { return os; }
+ operator class ObjectState *() const { return (ObjectState*) os; }
+ };
+}
+
+#endif
+
diff --git a/lib/Core/PTree.cpp b/lib/Core/PTree.cpp
new file mode 100644
index 00000000..349761cd
--- /dev/null
+++ b/lib/Core/PTree.cpp
@@ -0,0 +1,103 @@
+//===-- PTree.cpp ---------------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTree.h"
+
+#include <klee/Expr.h>
+#include <klee/util/ExprPPrinter.h>
+
+#include <vector>
+#include <iostream>
+
+using namespace klee;
+
+ /* *** */
+
+PTree::PTree(const data_type &_root) : root(new Node(0,_root)) {
+}
+
+PTree::~PTree() {}
+
+std::pair<PTreeNode*, PTreeNode*>
+PTree::split(Node *n,
+ const data_type &leftData,
+ const data_type &rightData) {
+ assert(n && !n->left && !n->right);
+ n->left = new Node(n, leftData);
+ n->right = new Node(n, rightData);
+ return std::make_pair(n->left, n->right);
+}
+
+void PTree::remove(Node *n) {
+ assert(!n->left && !n->right);
+ do {
+ Node *p = n->parent;
+ delete n;
+ if (p) {
+ if (n == p->left) {
+ p->left = 0;
+ } else {
+ assert(n == p->right);
+ p->right = 0;
+ }
+ }
+ n = p;
+ } while (n && !n->left && !n->right);
+}
+
+void PTree::dump(std::ostream &os) {
+ ExprPPrinter *pp = ExprPPrinter::create(os);
+ pp->setNewline("\\l");
+ os << "digraph G {\n";
+ os << "\tsize=\"10,7.5\";\n";
+ os << "\tratio=fill;\n";
+ os << "\trotate=90;\n";
+ os << "\tcenter = \"true\";\n";
+ os << "\tnode [style=\"filled\",width=.1,height=.1,fontname=\"Terminus\"]\n";
+ os << "\tedge [arrowsize=.3]\n";
+ std::vector<PTree::Node*> stack;
+ stack.push_back(root);
+ while (!stack.empty()) {
+ PTree::Node *n = stack.back();
+ stack.pop_back();
+ if (n->condition.isNull()) {
+ os << "\tn" << n << " [label=\"\"";
+ } else {
+ os << "\tn" << n << " [label=\"";
+ pp->print(n->condition);
+ os << "\",shape=diamond";
+ }
+ if (n->data)
+ os << ",fillcolor=green";
+ os << "];\n";
+ if (n->left) {
+ os << "\tn" << n << " -> n" << n->left << ";\n";
+ stack.push_back(n->left);
+ }
+ if (n->right) {
+ os << "\tn" << n << " -> n" << n->right << ";\n";
+ stack.push_back(n->right);
+ }
+ }
+ os << "}\n";
+ delete pp;
+}
+
+PTreeNode::PTreeNode(PTreeNode *_parent,
+ ExecutionState *_data)
+ : parent(_parent),
+ left(0),
+ right(0),
+ data(_data),
+ condition(0) {
+}
+
+PTreeNode::~PTreeNode() {
+}
+
diff --git a/lib/Core/PTree.h b/lib/Core/PTree.h
new file mode 100644
index 00000000..6accc8e2
--- /dev/null
+++ b/lib/Core/PTree.h
@@ -0,0 +1,53 @@
+//===-- PTree.h -------------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __UTIL_PTREE_H__
+#define __UTIL_PTREE_H__
+
+#include <klee/Expr.h>
+
+#include <utility>
+#include <cassert>
+#include <iostream>
+
+namespace klee {
+ class ExecutionState;
+
+ class PTree {
+ typedef ExecutionState* data_type;
+
+ public:
+ typedef class PTreeNode Node;
+ Node *root;
+
+ PTree(const data_type &_root);
+ ~PTree();
+
+ std::pair<Node*,Node*> split(Node *n,
+ const data_type &leftData,
+ const data_type &rightData);
+ void remove(Node *n);
+
+ void dump(std::ostream &os);
+ };
+
+ class PTreeNode {
+ friend class PTree;
+ public:
+ PTreeNode *parent, *left, *right;
+ ExecutionState *data;
+ ref<Expr> condition;
+
+ private:
+ PTreeNode(PTreeNode *_parent, ExecutionState *_data);
+ ~PTreeNode();
+ };
+}
+
+#endif
diff --git a/lib/Core/Searcher.cpp b/lib/Core/Searcher.cpp
new file mode 100644
index 00000000..4c94c59b
--- /dev/null
+++ b/lib/Core/Searcher.cpp
@@ -0,0 +1,575 @@
+//===-- Searcher.cpp ------------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "Searcher.h"
+
+#include "CoreStats.h"
+#include "Executor.h"
+#include "PTree.h"
+#include "StatsTracker.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Statistics.h"
+#include "klee/Internal/Module/InstructionInfoTable.h"
+#include "klee/Internal/Module/KInstruction.h"
+#include "klee/Internal/Module/KModule.h"
+#include "klee/Internal/ADT/DiscretePDF.h"
+#include "klee/Internal/ADT/RNG.h"
+#include "klee/Internal/Support/ModuleUtil.h"
+#include "klee/Internal/System/Time.h"
+
+#include "llvm/Constants.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/CommandLine.h"
+
+#include <cassert>
+#include <fstream>
+#include <climits>
+
+using namespace klee;
+using namespace llvm;
+
+namespace {
+ cl::opt<bool>
+ DebugLogMerge("debug-log-merge");
+}
+
+namespace klee {
+ extern RNG theRNG;
+}
+
+Searcher::~Searcher() {
+}
+
+///
+
+ExecutionState &DFSSearcher::selectState() {
+ return *states.back();
+}
+
+void DFSSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ states.insert(states.end(),
+ addedStates.begin(),
+ addedStates.end());
+ for (std::set<ExecutionState*>::const_iterator it = removedStates.begin(),
+ ie = removedStates.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ if (es == states.back()) {
+ states.pop_back();
+ } else {
+ bool ok = false;
+
+ for (std::vector<ExecutionState*>::iterator it = states.begin(),
+ ie = states.end(); it != ie; ++it) {
+ if (es==*it) {
+ states.erase(it);
+ ok = true;
+ break;
+ }
+ }
+
+ assert(ok && "invalid state removed");
+ }
+ }
+}
+
+///
+
+ExecutionState &RandomSearcher::selectState() {
+ return *states[theRNG.getInt32()%states.size()];
+}
+
+void RandomSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ states.insert(states.end(),
+ addedStates.begin(),
+ addedStates.end());
+ for (std::set<ExecutionState*>::const_iterator it = removedStates.begin(),
+ ie = removedStates.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ bool ok = false;
+
+ for (std::vector<ExecutionState*>::iterator it = states.begin(),
+ ie = states.end(); it != ie; ++it) {
+ if (es==*it) {
+ states.erase(it);
+ ok = true;
+ break;
+ }
+ }
+
+ assert(ok && "invalid state removed");
+ }
+}
+
+///
+
+WeightedRandomSearcher::WeightedRandomSearcher(Executor &_executor,
+ WeightType _type)
+ : executor(_executor),
+ states(new DiscretePDF<ExecutionState*>()),
+ type(_type) {
+ switch(type) {
+ case Depth:
+ updateWeights = false;
+ break;
+ case InstCount:
+ case CPInstCount:
+ case QueryCost:
+ case MinDistToUncovered:
+ case CoveringNew:
+ updateWeights = true;
+ break;
+ default:
+ assert(0 && "invalid weight type");
+ }
+}
+
+WeightedRandomSearcher::~WeightedRandomSearcher() {
+ delete states;
+}
+
+ExecutionState &WeightedRandomSearcher::selectState() {
+ return *states->choose(theRNG.getDoubleL());
+}
+
+double WeightedRandomSearcher::getWeight(ExecutionState *es) {
+ switch(type) {
+ default:
+ case Depth:
+ return es->weight;
+ case InstCount: {
+ uint64_t count = theStatisticManager->getIndexedValue(stats::instructions,
+ es->pc->info->id);
+ double inv = 1. / std::max((uint64_t) 1, count);
+ return inv * inv;
+ }
+ case CPInstCount: {
+ StackFrame &sf = es->stack.back();
+ uint64_t count = sf.callPathNode->statistics.getValue(stats::instructions);
+ double inv = 1. / std::max((uint64_t) 1, count);
+ return inv;
+ }
+ case QueryCost:
+ return (es->queryCost < .1) ? 1. : 1./es->queryCost;
+ case CoveringNew:
+ case MinDistToUncovered: {
+ uint64_t md2u = computeMinDistToUncovered(es->pc,
+ es->stack.back().minDistToUncoveredOnReturn);
+
+ double invMD2U = 1. / (md2u ? md2u : 10000);
+ if (type==CoveringNew) {
+ double invCovNew = 0.;
+ if (es->instsSinceCovNew)
+ invCovNew = 1. / std::max(1, (int) es->instsSinceCovNew - 1000);
+ return (invCovNew * invCovNew + invMD2U * invMD2U);
+ } else {
+ return invMD2U * invMD2U;
+ }
+ }
+ }
+}
+
+void WeightedRandomSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ if (current && updateWeights && !removedStates.count(current))
+ states->update(current, getWeight(current));
+
+ for (std::set<ExecutionState*>::const_iterator it = addedStates.begin(),
+ ie = addedStates.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ states->insert(es, getWeight(es));
+ }
+
+ for (std::set<ExecutionState*>::const_iterator it = removedStates.begin(),
+ ie = removedStates.end(); it != ie; ++it) {
+ states->remove(*it);
+ }
+}
+
+bool WeightedRandomSearcher::empty() {
+ return states->empty();
+}
+
+///
+
+RandomPathSearcher::RandomPathSearcher(Executor &_executor)
+ : executor(_executor) {
+}
+
+RandomPathSearcher::~RandomPathSearcher() {
+}
+
+ExecutionState &RandomPathSearcher::selectState() {
+ unsigned flips=0, bits=0;
+ PTree::Node *n = executor.processTree->root;
+
+ while (!n->data) {
+ if (!n->left) {
+ n = n->right;
+ } else if (!n->right) {
+ n = n->left;
+ } else {
+ if (bits==0) {
+ flips = theRNG.getInt32();
+ bits = 32;
+ }
+ --bits;
+ n = (flips&(1<<bits)) ? n->left : n->right;
+ }
+ }
+
+ return *n->data;
+}
+
+void RandomPathSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+}
+
+bool RandomPathSearcher::empty() {
+ return executor.states.empty();
+}
+
+///
+
+BumpMergingSearcher::BumpMergingSearcher(Executor &_executor, Searcher *_baseSearcher)
+ : executor(_executor),
+ baseSearcher(_baseSearcher),
+ mergeFunction(executor.kmodule->kleeMergeFn) {
+}
+
+BumpMergingSearcher::~BumpMergingSearcher() {
+ delete baseSearcher;
+}
+
+///
+
+Instruction *BumpMergingSearcher::getMergePoint(ExecutionState &es) {
+ if (mergeFunction) {
+ Instruction *i = es.pc->inst;
+
+ if (i->getOpcode()==Instruction::Call) {
+ CallSite cs(cast<CallInst>(i));
+ if (mergeFunction==cs.getCalledFunction())
+ return i;
+ }
+ }
+
+ return 0;
+}
+
+ExecutionState &BumpMergingSearcher::selectState() {
+entry:
+ // out of base states, pick one to pop
+ if (baseSearcher->empty()) {
+ std::map<llvm::Instruction*, ExecutionState*>::iterator it =
+ statesAtMerge.begin();
+ ExecutionState *es = it->second;
+ statesAtMerge.erase(it);
+ ++es->pc;
+
+ baseSearcher->addState(es);
+ }
+
+ ExecutionState &es = baseSearcher->selectState();
+
+ if (Instruction *mp = getMergePoint(es)) {
+ std::map<llvm::Instruction*, ExecutionState*>::iterator it =
+ statesAtMerge.find(mp);
+
+ baseSearcher->removeState(&es);
+
+ if (it==statesAtMerge.end()) {
+ statesAtMerge.insert(std::make_pair(mp, &es));
+ } else {
+ ExecutionState *mergeWith = it->second;
+ if (mergeWith->merge(es)) {
+ // hack, because we are terminating the state we need to let
+ // the baseSearcher know about it again
+ baseSearcher->addState(&es);
+ executor.terminateState(es);
+ } else {
+ it->second = &es; // the bump
+ ++mergeWith->pc;
+
+ baseSearcher->addState(mergeWith);
+ }
+ }
+
+ goto entry;
+ } else {
+ return es;
+ }
+}
+
+void BumpMergingSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ baseSearcher->update(current, addedStates, removedStates);
+}
+
+///
+
+MergingSearcher::MergingSearcher(Executor &_executor, Searcher *_baseSearcher)
+ : executor(_executor),
+ baseSearcher(_baseSearcher),
+ mergeFunction(executor.kmodule->kleeMergeFn) {
+}
+
+MergingSearcher::~MergingSearcher() {
+ delete baseSearcher;
+}
+
+///
+
+Instruction *MergingSearcher::getMergePoint(ExecutionState &es) {
+ if (mergeFunction) {
+ Instruction *i = es.pc->inst;
+
+ if (i->getOpcode()==Instruction::Call) {
+ CallSite cs(cast<CallInst>(i));
+ if (mergeFunction==cs.getCalledFunction())
+ return i;
+ }
+ }
+
+ return 0;
+}
+
+ExecutionState &MergingSearcher::selectState() {
+ while (!baseSearcher->empty()) {
+ ExecutionState &es = baseSearcher->selectState();
+ if (getMergePoint(es)) {
+ baseSearcher->removeState(&es, &es);
+ statesAtMerge.insert(&es);
+ } else {
+ return es;
+ }
+ }
+
+ // build map of merge point -> state list
+ std::map<Instruction*, std::vector<ExecutionState*> > merges;
+ for (std::set<ExecutionState*>::const_iterator it = statesAtMerge.begin(),
+ ie = statesAtMerge.end(); it != ie; ++it) {
+ ExecutionState &state = **it;
+ Instruction *mp = getMergePoint(state);
+
+ merges[mp].push_back(&state);
+ }
+
+ if (DebugLogMerge)
+ llvm::cerr << "-- all at merge --\n";
+ for (std::map<Instruction*, std::vector<ExecutionState*> >::iterator
+ it = merges.begin(), ie = merges.end(); it != ie; ++it) {
+ if (DebugLogMerge) {
+ llvm::cerr << "\tmerge: " << it->first << " [";
+ for (std::vector<ExecutionState*>::iterator it2 = it->second.begin(),
+ ie2 = it->second.end(); it2 != ie2; ++it2) {
+ ExecutionState *state = *it2;
+ llvm::cerr << state << ", ";
+ }
+ llvm::cerr << "]\n";
+ }
+
+ // merge states
+ std::set<ExecutionState*> toMerge(it->second.begin(), it->second.end());
+ while (!toMerge.empty()) {
+ ExecutionState *base = *toMerge.begin();
+ toMerge.erase(toMerge.begin());
+
+ std::set<ExecutionState*> toErase;
+ for (std::set<ExecutionState*>::iterator it = toMerge.begin(),
+ ie = toMerge.end(); it != ie; ++it) {
+ ExecutionState *mergeWith = *it;
+
+ if (base->merge(*mergeWith)) {
+ toErase.insert(mergeWith);
+ }
+ }
+ if (DebugLogMerge && !toErase.empty()) {
+ llvm::cerr << "\t\tmerged: " << base << " with [";
+ for (std::set<ExecutionState*>::iterator it = toErase.begin(),
+ ie = toErase.end(); it != ie; ++it) {
+ if (it!=toErase.begin()) llvm::cerr << ", ";
+ llvm::cerr << *it;
+ }
+ llvm::cerr << "]\n";
+ }
+ for (std::set<ExecutionState*>::iterator it = toErase.begin(),
+ ie = toErase.end(); it != ie; ++it) {
+ std::set<ExecutionState*>::iterator it2 = toMerge.find(*it);
+ assert(it2!=toMerge.end());
+ executor.terminateState(**it);
+ toMerge.erase(it2);
+ }
+
+ // step past merge and toss base back in pool
+ statesAtMerge.erase(statesAtMerge.find(base));
+ ++base->pc;
+ baseSearcher->addState(base);
+ }
+ }
+
+ if (DebugLogMerge)
+ llvm::cerr << "-- merge complete, continuing --\n";
+
+ return selectState();
+}
+
+void MergingSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ if (!removedStates.empty()) {
+ std::set<ExecutionState *> alt = removedStates;
+ for (std::set<ExecutionState*>::const_iterator it = removedStates.begin(),
+ ie = removedStates.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ std::set<ExecutionState*>::const_iterator it = statesAtMerge.find(es);
+ if (it!=statesAtMerge.end()) {
+ statesAtMerge.erase(it);
+ alt.erase(alt.find(es));
+ }
+ }
+ baseSearcher->update(current, addedStates, alt);
+ } else {
+ baseSearcher->update(current, addedStates, removedStates);
+ }
+}
+
+///
+
+BatchingSearcher::BatchingSearcher(Searcher *_baseSearcher,
+ double _timeBudget,
+ unsigned _instructionBudget)
+ : baseSearcher(_baseSearcher),
+ timeBudget(_timeBudget),
+ instructionBudget(_instructionBudget),
+ lastState(0) {
+
+}
+
+BatchingSearcher::~BatchingSearcher() {
+ delete baseSearcher;
+}
+
+ExecutionState &BatchingSearcher::selectState() {
+ if (!lastState ||
+ (util::getWallTime()-lastStartTime)>timeBudget ||
+ (stats::instructions-lastStartInstructions)>instructionBudget) {
+ if (lastState) {
+ double delta = util::getWallTime()-lastStartTime;
+ if (delta>timeBudget*1.1) {
+ llvm::cerr << "KLEE: increased time budget from " << timeBudget << " to " << delta << "\n";
+ timeBudget = delta;
+ }
+ }
+ lastState = &baseSearcher->selectState();
+ lastStartTime = util::getWallTime();
+ lastStartInstructions = stats::instructions;
+ return *lastState;
+ } else {
+ return *lastState;
+ }
+}
+
+void BatchingSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ if (removedStates.count(lastState))
+ lastState = 0;
+ baseSearcher->update(current, addedStates, removedStates);
+}
+
+/***/
+
+IterativeDeepeningTimeSearcher::IterativeDeepeningTimeSearcher(Searcher *_baseSearcher)
+ : baseSearcher(_baseSearcher),
+ time(1.) {
+}
+
+IterativeDeepeningTimeSearcher::~IterativeDeepeningTimeSearcher() {
+ delete baseSearcher;
+}
+
+ExecutionState &IterativeDeepeningTimeSearcher::selectState() {
+ ExecutionState &res = baseSearcher->selectState();
+ startTime = util::getWallTime();
+ return res;
+}
+
+void IterativeDeepeningTimeSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ double elapsed = util::getWallTime() - startTime;
+
+ if (!removedStates.empty()) {
+ std::set<ExecutionState *> alt = removedStates;
+ for (std::set<ExecutionState*>::const_iterator it = removedStates.begin(),
+ ie = removedStates.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ std::set<ExecutionState*>::const_iterator it = pausedStates.find(es);
+ if (it!=pausedStates.end()) {
+ pausedStates.erase(it);
+ alt.erase(alt.find(es));
+ }
+ }
+ baseSearcher->update(current, addedStates, alt);
+ } else {
+ baseSearcher->update(current, addedStates, removedStates);
+ }
+
+ if (current && !removedStates.count(current) && elapsed>time) {
+ pausedStates.insert(current);
+ baseSearcher->removeState(current);
+ }
+
+ if (baseSearcher->empty()) {
+ time *= 2;
+ llvm::cerr << "KLEE: increasing time budget to: " << time << "\n";
+ baseSearcher->update(0, pausedStates, std::set<ExecutionState*>());
+ pausedStates.clear();
+ }
+}
+
+/***/
+
+InterleavedSearcher::InterleavedSearcher(const std::vector<Searcher*> &_searchers)
+ : searchers(_searchers),
+ index(1) {
+}
+
+InterleavedSearcher::~InterleavedSearcher() {
+ for (std::vector<Searcher*>::const_iterator it = searchers.begin(),
+ ie = searchers.end(); it != ie; ++it)
+ delete *it;
+}
+
+ExecutionState &InterleavedSearcher::selectState() {
+ Searcher *s = searchers[--index];
+ if (index==0) index = searchers.size();
+ return s->selectState();
+}
+
+void InterleavedSearcher::update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) {
+ for (std::vector<Searcher*>::const_iterator it = searchers.begin(),
+ ie = searchers.end(); it != ie; ++it)
+ (*it)->update(current, addedStates, removedStates);
+}
diff --git a/lib/Core/Searcher.h b/lib/Core/Searcher.h
new file mode 100644
index 00000000..455a7679
--- /dev/null
+++ b/lib/Core/Searcher.h
@@ -0,0 +1,279 @@
+//===-- Searcher.h ----------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_SEARCHER_H
+#define KLEE_SEARCHER_H
+
+#include <vector>
+#include <set>
+#include <map>
+#include <queue>
+
+// FIXME: Move out of header, use llvm streams.
+#include <ostream>
+
+namespace llvm {
+ class BasicBlock;
+ class Function;
+ class Instruction;
+}
+
+namespace klee {
+ template<class T> class DiscretePDF;
+ class ExecutionState;
+ class Executor;
+
+ class Searcher {
+ public:
+ virtual ~Searcher();
+
+ virtual ExecutionState &selectState() = 0;
+
+ virtual void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates) = 0;
+
+ virtual bool empty() = 0;
+
+ // prints name of searcher as a klee_message()
+ // TODO: could probably make prettier or more flexible
+ virtual void printName(std::ostream &os) {
+ os << "<unnamed searcher>\n";
+ }
+
+ // pgbovine - to be called when a searcher gets activated and
+ // deactivated, say, by a higher-level searcher; most searchers
+ // don't need this functionality, so don't have to override.
+ virtual void activate() {};
+ virtual void deactivate() {};
+
+ // utility functions
+
+ void addState(ExecutionState *es, ExecutionState *current = 0) {
+ std::set<ExecutionState*> tmp;
+ tmp.insert(es);
+ update(current, tmp, std::set<ExecutionState*>());
+ }
+
+ void removeState(ExecutionState *es, ExecutionState *current = 0) {
+ std::set<ExecutionState*> tmp;
+ tmp.insert(es);
+ update(current, std::set<ExecutionState*>(), tmp);
+ }
+ };
+
+ class DFSSearcher : public Searcher {
+ std::vector<ExecutionState*> states;
+
+ public:
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return states.empty(); }
+ void printName(std::ostream &os) {
+ os << "DFSSearcher\n";
+ }
+ };
+
+ class RandomSearcher : public Searcher {
+ std::vector<ExecutionState*> states;
+
+ public:
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return states.empty(); }
+ void printName(std::ostream &os) {
+ os << "RandomSearcher\n";
+ }
+ };
+
+ class WeightedRandomSearcher : public Searcher {
+ public:
+ enum WeightType {
+ Depth,
+ QueryCost,
+ InstCount,
+ CPInstCount,
+ MinDistToUncovered,
+ CoveringNew
+ };
+
+ private:
+ Executor &executor;
+ DiscretePDF<ExecutionState*> *states;
+ WeightType type;
+ bool updateWeights;
+
+ double getWeight(ExecutionState*);
+
+ public:
+ WeightedRandomSearcher(Executor &executor, WeightType type);
+ ~WeightedRandomSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty();
+ void printName(std::ostream &os) {
+ os << "WeightedRandomSearcher::";
+ switch(type) {
+ case Depth : os << "Depth\n"; return;
+ case QueryCost : os << "QueryCost\n"; return;
+ case InstCount : os << "InstCount\n"; return;
+ case CPInstCount : os << "CPInstCount\n"; return;
+ case MinDistToUncovered : os << "MinDistToUncovered\n"; return;
+ case CoveringNew : os << "CoveringNew\n"; return;
+ default : os << "<unknown type>\n"; return;
+ }
+ }
+ };
+
+ class RandomPathSearcher : public Searcher {
+ Executor &executor;
+
+ public:
+ RandomPathSearcher(Executor &_executor);
+ ~RandomPathSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty();
+ void printName(std::ostream &os) {
+ os << "RandomPathSearcher\n";
+ }
+ };
+
+ class MergingSearcher : public Searcher {
+ Executor &executor;
+ std::set<ExecutionState*> statesAtMerge;
+ Searcher *baseSearcher;
+ llvm::Function *mergeFunction;
+
+ private:
+ llvm::Instruction *getMergePoint(ExecutionState &es);
+
+ public:
+ MergingSearcher(Executor &executor, Searcher *baseSearcher);
+ ~MergingSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return baseSearcher->empty() && statesAtMerge.empty(); }
+ void printName(std::ostream &os) {
+ os << "MergingSearcher\n";
+ }
+ };
+
+ class BumpMergingSearcher : public Searcher {
+ Executor &executor;
+ std::map<llvm::Instruction*, ExecutionState*> statesAtMerge;
+ Searcher *baseSearcher;
+ llvm::Function *mergeFunction;
+
+ private:
+ llvm::Instruction *getMergePoint(ExecutionState &es);
+
+ public:
+ BumpMergingSearcher(Executor &executor, Searcher *baseSearcher);
+ ~BumpMergingSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return baseSearcher->empty() && statesAtMerge.empty(); }
+ void printName(std::ostream &os) {
+ os << "BumpMergingSearcher\n";
+ }
+ };
+
+ class BatchingSearcher : public Searcher {
+ Searcher *baseSearcher;
+ double timeBudget;
+ unsigned instructionBudget;
+
+ ExecutionState *lastState;
+ double lastStartTime;
+ unsigned lastStartInstructions;
+
+ public:
+ BatchingSearcher(Searcher *baseSearcher,
+ double _timeBudget,
+ unsigned _instructionBudget);
+ ~BatchingSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return baseSearcher->empty(); }
+ void printName(std::ostream &os) {
+ os << "<BatchingSearcher> timeBudget: " << timeBudget
+ << ", instructionBudget: " << instructionBudget
+ << ", baseSearcher:\n";
+ baseSearcher->printName(os);
+ os << "</BatchingSearcher>\n";
+ }
+ };
+
+ class IterativeDeepeningTimeSearcher : public Searcher {
+ Searcher *baseSearcher;
+ double time, startTime;
+ std::set<ExecutionState*> pausedStates;
+
+ public:
+ IterativeDeepeningTimeSearcher(Searcher *baseSearcher);
+ ~IterativeDeepeningTimeSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return baseSearcher->empty() && pausedStates.empty(); }
+ void printName(std::ostream &os) {
+ os << "IterativeDeepeningTimeSearcher\n";
+ }
+ };
+
+ class InterleavedSearcher : public Searcher {
+ typedef std::vector<Searcher*> searchers_ty;
+
+ searchers_ty searchers;
+ unsigned index;
+
+ public:
+ explicit InterleavedSearcher(const searchers_ty &_searchers);
+ ~InterleavedSearcher();
+
+ ExecutionState &selectState();
+ void update(ExecutionState *current,
+ const std::set<ExecutionState*> &addedStates,
+ const std::set<ExecutionState*> &removedStates);
+ bool empty() { return searchers[0]->empty(); }
+ void printName(std::ostream &os) {
+ os << "<InterleavedSearcher> containing "
+ << searchers.size() << " searchers:\n";
+ for (searchers_ty::iterator it = searchers.begin(), ie = searchers.end();
+ it != ie; ++it)
+ (*it)->printName(os);
+ os << "</InterleavedSearcher>\n";
+ }
+ };
+
+}
+
+#endif
diff --git a/lib/Core/SeedInfo.cpp b/lib/Core/SeedInfo.cpp
new file mode 100644
index 00000000..d76d75dc
--- /dev/null
+++ b/lib/Core/SeedInfo.cpp
@@ -0,0 +1,151 @@
+//===-- SeedInfo.cpp ------------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "Memory.h"
+#include "SeedInfo.h"
+#include "TimingSolver.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Expr.h"
+#include "klee/util/ExprUtil.h"
+#include "klee/Internal/ADT/BOut.h"
+
+using namespace klee;
+
+BOutObject *SeedInfo::getNextInput(const MemoryObject *mo,
+ bool byName) {
+ if (byName) {
+ unsigned i;
+
+ for (i=0; i<input->numObjects; ++i) {
+ BOutObject *obj = &input->objects[i];
+ if (std::string(obj->name) == mo->name)
+ if (used.insert(obj).second)
+ return obj;
+ }
+
+ // If first unused input matches in size then accept that as
+ // well.
+ for (i=0; i<input->numObjects; ++i)
+ if (!used.count(&input->objects[i]))
+ break;
+ if (i<input->numObjects) {
+ BOutObject *obj = &input->objects[i];
+ if (obj->numBytes == mo->size) {
+ used.insert(obj);
+ klee_warning_once(mo, "using seed input %s[%d] for: %s (no name match)",
+ obj->name, obj->numBytes, mo->name.c_str());
+ return obj;
+ }
+ }
+
+ klee_warning_once(mo, "no seed input for: %s", mo->name.c_str());
+ return 0;
+ } else {
+ if (inputPosition >= input->numObjects) {
+ return 0;
+ } else {
+ return &input->objects[inputPosition++];
+ }
+ }
+}
+
+void SeedInfo::patchSeed(const ExecutionState &state,
+ ref<Expr> condition,
+ TimingSolver *solver) {
+ std::vector< ref<Expr> > required(state.constraints.begin(),
+ state.constraints.end());
+ ExecutionState tmp(required);
+ tmp.addConstraint(condition);
+
+ // Try and patch direct reads first, this is likely to resolve the
+ // problem quickly and avoids long traversal of all seed
+ // values. There are other smart ways to do this, the nicest is if
+ // we got a minimal counterexample from STP, in which case we would
+ // just inject those values back into the seed.
+ std::set< std::pair<const Array*, unsigned> > directReads;
+ std::vector< ref<ReadExpr> > reads;
+ findReads(condition, false, reads);
+ for (std::vector< ref<ReadExpr> >::iterator it = reads.begin(),
+ ie = reads.end(); it != ie; ++it) {
+ ReadExpr *re = it->get();
+ if (re->index.isConstant()) {
+ unsigned index = (unsigned) re->index.getConstantValue();
+ directReads.insert(std::make_pair(re->updates.root, index));
+ }
+ }
+
+ for (std::set< std::pair<const Array*, unsigned> >::iterator
+ it = directReads.begin(), ie = directReads.end(); it != ie; ++it) {
+ const Array *array = it->first;
+ unsigned i = it->second;
+ ref<Expr> read = ReadExpr::create(UpdateList(array, true, 0),
+ ref<Expr>(i, Expr::Int32));
+
+ // If not in bindings then this can't be a violation?
+ Assignment::bindings_ty::iterator it2 = assignment.bindings.find(array);
+ if (it2 != assignment.bindings.end()) {
+ ref<Expr> isSeed = EqExpr::create(read, ref<Expr>(it2->second[i], Expr::Int8));
+ bool res;
+ bool success = solver->mustBeFalse(tmp, isSeed, res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ ref<Expr> value;
+ bool success = solver->getValue(tmp, read, value);
+ assert(success && "FIXME: Unhandled solver failure");
+ it2->second[i] = value.getConstantValue();
+ tmp.addConstraint(EqExpr::create(read, ref<Expr>(it2->second[i], Expr::Int8)));
+ } else {
+ tmp.addConstraint(isSeed);
+ }
+ }
+ }
+
+ bool res;
+ bool success = solver->mayBeTrue(state, assignment.evaluate(condition), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res)
+ return;
+
+ // We could still do a lot better than this, for example by looking at
+ // independence. But really, this shouldn't be happening often.
+ for (Assignment::bindings_ty::iterator it = assignment.bindings.begin(),
+ ie = assignment.bindings.end(); it != ie; ++it) {
+ const Array *array = it->first;
+ for (unsigned i=0; i<array->size; ++i) {
+ ref<Expr> read = ReadExpr::create(UpdateList(array, true, 0),
+ ref<Expr>(i, Expr::Int32));
+ ref<Expr> isSeed = EqExpr::create(read, ref<Expr>(it->second[i], Expr::Int8));
+ bool res;
+ bool success = solver->mustBeFalse(tmp, isSeed, res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ ref<Expr> value;
+ bool success = solver->getValue(tmp, read, value);
+ assert(success && "FIXME: Unhandled solver failure");
+ it->second[i] = value.getConstantValue();
+ tmp.addConstraint(EqExpr::create(read, ref<Expr>(it->second[i], Expr::Int8)));
+ } else {
+ tmp.addConstraint(isSeed);
+ }
+ }
+ }
+
+#ifndef NDEBUG
+ {
+ bool res;
+ bool success =
+ solver->mayBeTrue(state, assignment.evaluate(condition), res);
+ assert(success && "FIXME: Unhandled solver failure");
+ assert(res && "seed patching failed");
+ }
+#endif
+}
diff --git a/lib/Core/SeedInfo.h b/lib/Core/SeedInfo.h
new file mode 100644
index 00000000..dd151ed0
--- /dev/null
+++ b/lib/Core/SeedInfo.h
@@ -0,0 +1,48 @@
+//===-- SeedInfo.h ----------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_SEEDINFO_H
+#define KLEE_SEEDINFO_H
+
+#include "klee/util/Assignment.h"
+
+extern "C" {
+ struct BOut;
+ struct BOutObject;
+}
+
+namespace klee {
+ class ExecutionState;
+ class TimingSolver;
+
+ class SeedInfo {
+ public:
+ Assignment assignment;
+ BOut *input;
+ unsigned inputPosition;
+ std::set<struct BOutObject*> used;
+
+ public:
+ explicit
+ SeedInfo(BOut *_input) : assignment(true),
+ input(_input),
+ inputPosition(0) {}
+
+ BOutObject *getNextInput(const MemoryObject *mo,
+ bool byName);
+
+ /// Patch the seed so that condition is satisfied while retaining as
+ /// many of the seed values as possible.
+ void patchSeed(const ExecutionState &state,
+ ref<Expr> condition,
+ TimingSolver *solver);
+ };
+}
+
+#endif
diff --git a/lib/Core/SpecialFunctionHandler.cpp b/lib/Core/SpecialFunctionHandler.cpp
new file mode 100644
index 00000000..da2a4a49
--- /dev/null
+++ b/lib/Core/SpecialFunctionHandler.cpp
@@ -0,0 +1,727 @@
+//===-- SpecialFunctionHandler.cpp ----------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "Memory.h"
+#include "SpecialFunctionHandler.h"
+#include "TimingSolver.h"
+
+#include "klee/ExecutionState.h"
+
+#include "klee/Internal/Module/KInstruction.h"
+#include "klee/Internal/Module/KModule.h"
+
+#include "Executor.h"
+#include "MemoryManager.h"
+
+#include "llvm/Module.h"
+
+#include <errno.h>
+
+using namespace llvm;
+using namespace klee;
+
+/// \todo Almost all of the demands in this file should be replaced
+/// with terminateState calls.
+
+///
+
+struct HandlerInfo {
+ const char *name;
+ SpecialFunctionHandler::Handler handler;
+ bool doesNotReturn; /// Intrinsic terminates the process
+ bool hasReturnValue; /// Intrinsic has a return value
+ bool doNotOverride; /// Intrinsic should not be used if already defined
+};
+
+// FIXME: We are more or less committed to requiring an intrinsic
+// library these days. We can move some of this stuff there,
+// especially things like realloc which have complicated semantics
+// w.r.t. forking. Among other things this makes delayed query
+// dispatch easier to implement.
+HandlerInfo handlerInfo[] = {
+#define add(name, handler, ret) { name, \
+ &SpecialFunctionHandler::handler, \
+ false, ret, false }
+#define addDNR(name, handler) { name, \
+ &SpecialFunctionHandler::handler, \
+ true, false, false }
+ addDNR("__assert_rtn", handleAssertFail),
+ addDNR("__assert_fail", handleAssertFail),
+ addDNR("_assert", handleAssert),
+ addDNR("abort", handleAbort),
+ addDNR("_exit", handleExit),
+ { "exit", &SpecialFunctionHandler::handleExit, true, false, true },
+ addDNR("klee_abort", handleAbort),
+ addDNR("klee_silent_exit", handleSilentExit),
+ addDNR("klee_report_error", handleReportError),
+
+ add("calloc", handleCalloc, true),
+ add("free", handleFree, false),
+ add("klee_assume", handleAssume, false),
+ add("klee_check_memory_access", handleCheckMemoryAccess, false),
+ add("klee_get_value", handleGetValue, true),
+ add("klee_define_fixed_object", handleDefineFixedObject, false),
+ add("klee_get_obj_size", handleGetObjSize, true),
+ add("klee_get_errno", handleGetErrno, true),
+ add("klee_is_symbolic", handleIsSymbolic, true),
+ add("klee_make_symbolic_name", handleMakeSymbolic, false),
+ add("klee_mark_global", handleMarkGlobal, false),
+ add("klee_malloc_n", handleMallocN, true),
+ add("klee_merge", handleMerge, false),
+ add("klee_prefer_cex", handlePreferCex, false),
+ add("klee_print_expr", handlePrintExpr, false),
+ add("klee_print_range", handlePrintRange, false),
+ add("klee_set_forking", handleSetForking, false),
+ add("klee_warning", handleWarning, false),
+ add("klee_warning_once", handleWarningOnce, false),
+ add("klee_under_constrained", handleUnderConstrained, false),
+ add("klee_alias_function", handleAliasFunction, false),
+ add("malloc", handleMalloc, true),
+ add("realloc", handleRealloc, true),
+
+ // operator delete[](void*)
+ add("_ZdaPv", handleDeleteArray, false),
+ // operator delete(void*)
+ add("_ZdlPv", handleDelete, false),
+
+ // operator new[](unsigned int)
+ add("_Znaj", handleNewArray, true),
+ // operator new(unsigned int)
+ add("_Znwj", handleNew, true),
+
+ // FIXME-64: This is wrong for 64-bit long...
+
+ // operator new[](unsigned long)
+ add("_Znam", handleNewArray, true),
+ // operator new(unsigned long)
+ add("_Znwm", handleNew, true),
+
+#undef addDNR
+#undef add
+};
+
+SpecialFunctionHandler::SpecialFunctionHandler(Executor &_executor)
+ : executor(_executor) {}
+
+
+void SpecialFunctionHandler::prepare() {
+ unsigned N = sizeof(handlerInfo)/sizeof(handlerInfo[0]);
+
+ for (unsigned i=0; i<N; ++i) {
+ HandlerInfo &hi = handlerInfo[i];
+ Function *f = executor.kmodule->module->getFunction(hi.name);
+
+ // No need to create if the function doesn't exist, since it cannot
+ // be called in that case.
+
+ if (f && (!hi.doNotOverride || f->isDeclaration())) {
+ // Make sure NoReturn attribute is set, for optimization and
+ // coverage counting.
+ if (hi.doesNotReturn)
+ f->addFnAttr(Attribute::NoReturn);
+
+ // Change to a declaration since we handle internally (simplifies
+ // module and allows deleting dead code).
+ if (!f->isDeclaration())
+ f->deleteBody();
+ }
+ }
+}
+
+void SpecialFunctionHandler::bind() {
+ unsigned N = sizeof(handlerInfo)/sizeof(handlerInfo[0]);
+
+ for (unsigned i=0; i<N; ++i) {
+ HandlerInfo &hi = handlerInfo[i];
+ Function *f = executor.kmodule->module->getFunction(hi.name);
+
+ if (f && (!hi.doNotOverride || f->isDeclaration()))
+ handlers[f] = std::make_pair(hi.handler, hi.hasReturnValue);
+ }
+}
+
+
+bool SpecialFunctionHandler::handle(ExecutionState &state,
+ Function *f,
+ KInstruction *target,
+ std::vector< ref<Expr> > &arguments) {
+ handlers_ty::iterator it = handlers.find(f);
+ if (it != handlers.end()) {
+ Handler h = it->second.first;
+ bool hasReturnValue = it->second.second;
+ // FIXME: Check this... add test?
+ if (!hasReturnValue && !target->inst->use_empty()) {
+ executor.terminateStateOnExecError(state,
+ "expected return value from void special function");
+ } else {
+ (this->*h)(state, target, arguments);
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/****/
+
+// reads a concrete string from memory
+std::string SpecialFunctionHandler::readStringAtAddress(ExecutionState &state,
+ ref<Expr> address) {
+ ObjectPair op;
+ address = executor.toUnique(state, address);
+ assert(address.isConstant() && "symbolic string arg to intrinsic");
+ if (!state.addressSpace.resolveOne(address.getConstantValue(), op))
+ assert(0 && "XXX out of bounds / multiple resolution unhandled");
+ bool res;
+ assert(executor.solver->mustBeTrue(state,
+ EqExpr::create(address,
+ op.first->getBaseExpr()),
+ res) &&
+ res &&
+ "XXX interior pointer unhandled");
+ const MemoryObject *mo = op.first;
+ const ObjectState *os = op.second;
+
+ char *buf = new char[mo->size];
+
+ unsigned i;
+ for (i = 0; i < mo->size - 1; i++) {
+ ref<Expr> cur = os->read8(i);
+ cur = executor.toUnique(state, cur);
+ assert(cur.isConstant() &&
+ "hit symbolic char while reading concrete string");
+ buf[i] = cur.getConstantValue();
+ }
+ buf[i] = 0;
+
+ std::string result(buf);
+ delete[] buf;
+ return result;
+}
+
+/****/
+
+void SpecialFunctionHandler::handleAbort(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==0 && "invalid number of arguments to abort");
+
+ //XXX:DRE:TAINT
+ if(state.underConstrained) {
+ llvm::cerr << "TAINT: skipping abort fail\n";
+ executor.terminateState(state);
+ } else {
+ executor.terminateStateOnError(state, "abort failure", "abort.err");
+ }
+}
+
+void SpecialFunctionHandler::handleExit(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 && "invalid number of arguments to exit");
+ executor.terminateStateOnExit(state);
+}
+
+void SpecialFunctionHandler::handleSilentExit(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 && "invalid number of arguments to exit");
+ executor.terminateState(state);
+}
+
+void SpecialFunctionHandler::handleAliasFunction(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to klee_alias_function");
+ std::string old_fn = readStringAtAddress(state, arguments[0]);
+ std::string new_fn = readStringAtAddress(state, arguments[1]);
+ //llvm::cerr << "Replacing " << old_fn << "() with " << new_fn << "()\n";
+ if (old_fn == new_fn)
+ state.removeFnAlias(old_fn);
+ else state.addFnAlias(old_fn, new_fn);
+}
+
+void SpecialFunctionHandler::handleAssert(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==3 && "invalid number of arguments to _assert");
+
+ //XXX:DRE:TAINT
+ if(state.underConstrained) {
+ llvm::cerr << "TAINT: skipping assertion:"
+ << readStringAtAddress(state, arguments[0]) << "\n";
+ executor.terminateState(state);
+ } else
+ executor.terminateStateOnError(state,
+ "ASSERTION FAIL: " + readStringAtAddress(state, arguments[0]),
+ "assert.err");
+}
+
+void SpecialFunctionHandler::handleAssertFail(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==4 && "invalid number of arguments to __assert_fail");
+
+ //XXX:DRE:TAINT
+ if(state.underConstrained) {
+ llvm::cerr << "TAINT: skipping assertion:"
+ << readStringAtAddress(state, arguments[0]) << "\n";
+ executor.terminateState(state);
+ } else
+ executor.terminateStateOnError(state,
+ "ASSERTION FAIL: " + readStringAtAddress(state, arguments[0]),
+ "assert.err");
+}
+
+void SpecialFunctionHandler::handleReportError(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==4 && "invalid number of arguments to klee_report_error");
+
+ // arguments[0], arguments[1] are file, line
+
+ //XXX:DRE:TAINT
+ if(state.underConstrained) {
+ llvm::cerr << "TAINT: skipping klee_report_error:"
+ << readStringAtAddress(state, arguments[2]) << ":"
+ << readStringAtAddress(state, arguments[3]) << "\n";
+ executor.terminateState(state);
+ } else
+ executor.terminateStateOnError(state,
+ readStringAtAddress(state, arguments[2]),
+ readStringAtAddress(state, arguments[3]));
+}
+
+void SpecialFunctionHandler::handleMerge(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // nop
+}
+
+void SpecialFunctionHandler::handleNew(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 && "invalid number of arguments to new");
+
+ executor.executeAlloc(state, arguments[0], false, target);
+}
+
+void SpecialFunctionHandler::handleDelete(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 && "invalid number of arguments to delete");
+ executor.executeFree(state, arguments[0]);
+}
+
+void SpecialFunctionHandler::handleNewArray(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 && "invalid number of arguments to new[]");
+ executor.executeAlloc(state, arguments[0], false, target);
+}
+
+void SpecialFunctionHandler::handleDeleteArray(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 && "invalid number of arguments to delete[]");
+ executor.executeFree(state, arguments[0]);
+}
+
+void SpecialFunctionHandler::handleMalloc(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 && "invalid number of arguments to malloc");
+ executor.executeAlloc(state, arguments[0], false, target);
+}
+
+void SpecialFunctionHandler::handleMallocN(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+
+ // XXX should type check args
+ assert(arguments.size() == 3 && "invalid number of arguments to malloc");
+
+ // mallocn(number, size, alignment)
+ ref<Expr> numElems = executor.toUnique(state, arguments[0]);
+ ref<Expr> elemSize = executor.toUnique(state, arguments[1]);
+ ref<Expr> elemAlignment = executor.toUnique(state, arguments[2]);
+
+ assert(numElems.isConstant() &&
+ elemSize.isConstant() &&
+ elemAlignment.isConstant() &&
+ "symbolic arguments passed to klee_mallocn");
+
+ executor.executeAllocN(state,
+ numElems.getConstantValue(),
+ elemSize.getConstantValue(),
+ elemAlignment.getConstantValue(),
+ false,
+ target);
+}
+
+void SpecialFunctionHandler::handleAssume(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 && "invalid number of arguments to klee_assume");
+
+ ref<Expr> e = arguments[0];
+
+ if(e.getWidth() != Expr::Bool)
+ e = NeExpr::create(e, ConstantExpr::create(0, e.getWidth()));
+
+ bool res;
+ bool success = executor.solver->mustBeFalse(state, e, res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ executor.terminateStateOnError(state,
+ "invalid klee_assume call (provably false)",
+ "user.err");
+ } else {
+ executor.addConstraint(state, e);
+ }
+}
+
+void SpecialFunctionHandler::handleIsSymbolic(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 && "invalid number of arguments to klee_is_symbolic");
+
+ executor.bindLocal(target, state,
+ ConstantExpr::create(!arguments[0].isConstant(), Expr::Int32));
+}
+
+void SpecialFunctionHandler::handlePreferCex(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to klee_prefex_cex");
+
+ ref<Expr> cond = arguments[1];
+ if (cond.getWidth() != Expr::Bool)
+ cond = NeExpr::create(cond, ref<Expr>(0, cond.getWidth()));
+
+ Executor::ExactResolutionList rl;
+ executor.resolveExact(state, arguments[0], rl, "prefex_cex");
+
+ assert(rl.size() == 1 &&
+ "prefer_cex target must resolve to precisely one object");
+
+ rl[0].first.first->cexPreferences.push_back(cond);
+}
+
+void SpecialFunctionHandler::handlePrintExpr(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to klee_print_expr");
+
+ std::string msg_str = readStringAtAddress(state, arguments[0]);
+ llvm::cerr << msg_str << ":" << arguments[1] << "\n";
+}
+
+
+void SpecialFunctionHandler::handleUnderConstrained(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to klee_under_constrained().");
+ assert(arguments[0].isConstant() &&
+ "symbolic argument given to klee_under_constrained!");
+
+ unsigned v = arguments[0].getConstantValue();
+ llvm::cerr << "argument = " << v << " under=" << state.underConstrained << "\n";
+ if(v) {
+ assert(state.underConstrained == false &&
+ "Bogus call to klee_under_constrained().");
+ state.underConstrained = v;
+ llvm::cerr << "turning on under!\n";
+ } else {
+ assert(state.underConstrained != 0 && "Bogus call to klee_taint_end()");
+ state.underConstrained = 0;
+ llvm::cerr << "turning off under!\n";
+ }
+}
+
+void SpecialFunctionHandler::handleSetForking(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to klee_set_forking");
+ ref<Expr> value = executor.toUnique(state, arguments[0]);
+
+ if (!value.isConstant()) {
+ executor.terminateStateOnError(state,
+ "klee_set_forking requires a constant arg",
+ "user.err");
+ } else {
+ state.forkDisabled = !value.getConstantValue();
+ }
+}
+
+void SpecialFunctionHandler::handleWarning(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 && "invalid number of arguments to klee_warning");
+
+ std::string msg_str = readStringAtAddress(state, arguments[0]);
+ klee_warning("%s: %s", state.stack.back().kf->function->getName().c_str(),
+ msg_str.c_str());
+}
+
+void SpecialFunctionHandler::handleWarningOnce(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to klee_warning_once");
+
+ std::string msg_str = readStringAtAddress(state, arguments[0]);
+ klee_warning_once(0, "%s: %s", state.stack.back().kf->function->getName().c_str(),
+ msg_str.c_str());
+}
+
+void SpecialFunctionHandler::handlePrintRange(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to klee_print_range");
+
+ std::string msg_str = readStringAtAddress(state, arguments[0]);
+ llvm::cerr << msg_str << ":" << arguments[1];
+ if (!arguments[1].isConstant()) {
+ // FIXME: Pull into a unique value method?
+ ref<Expr> value;
+ bool success = executor.solver->getValue(state, arguments[1], value);
+ assert(success && "FIXME: Unhandled solver failure");
+ bool res;
+ success = executor.solver->mustBeTrue(state,
+ EqExpr::create(arguments[1], value),
+ res);
+ assert(success && "FIXME: Unhandled solver failure");
+ if (res) {
+ llvm::cerr << " == " << value;
+ } else {
+ llvm::cerr << " ~= " << value;
+ std::pair< ref<Expr>, ref<Expr> > res =
+ executor.solver->getRange(state, arguments[1]);
+ llvm::cerr << " (in [" << res.first << ", " << res.second <<"])";
+ }
+ }
+ llvm::cerr << "\n";
+}
+
+void SpecialFunctionHandler::handleGetObjSize(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to klee_get_obj_size");
+ Executor::ExactResolutionList rl;
+ executor.resolveExact(state, arguments[0], rl, "klee_get_obj_size");
+ for (Executor::ExactResolutionList::iterator it = rl.begin(),
+ ie = rl.end(); it != ie; ++it) {
+ executor.bindLocal(target, *it->second,
+ ConstantExpr::create(it->first.first->size, Expr::Int32));
+ }
+}
+
+void SpecialFunctionHandler::handleGetErrno(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==0 &&
+ "invalid number of arguments to klee_get_obj_size");
+ executor.bindLocal(target, state,
+ ConstantExpr::create(errno, Expr::Int32));
+}
+
+void SpecialFunctionHandler::handleCalloc(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to calloc");
+
+ ref<Expr> size = MulExpr::create(arguments[0],
+ arguments[1]);
+ executor.executeAlloc(state, size, false, target, true);
+}
+
+void SpecialFunctionHandler::handleRealloc(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to realloc");
+ ref<Expr> address = arguments[0];
+ ref<Expr> size = arguments[1];
+
+ Executor::StatePair zeroSize = executor.fork(state,
+ Expr::createIsZero(size),
+ true);
+
+ if (zeroSize.first) { // size == 0
+ executor.executeFree(*zeroSize.first, address, target);
+ }
+ if (zeroSize.second) { // size != 0
+ Executor::StatePair zeroPointer = executor.fork(*zeroSize.second,
+ Expr::createIsZero(address),
+ true);
+
+ if (zeroPointer.first) { // address == 0
+ executor.executeAlloc(*zeroPointer.first, size, false, target);
+ }
+ if (zeroPointer.second) { // address != 0
+ Executor::ExactResolutionList rl;
+ executor.resolveExact(*zeroPointer.second, address, rl, "realloc");
+
+ for (Executor::ExactResolutionList::iterator it = rl.begin(),
+ ie = rl.end(); it != ie; ++it) {
+ executor.executeAlloc(*it->second, size, false, target, false,
+ it->first.second);
+ }
+ }
+ }
+}
+
+void SpecialFunctionHandler::handleFree(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ // XXX should type check args
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to free");
+ executor.executeFree(state, arguments[0]);
+}
+
+void SpecialFunctionHandler::handleCheckMemoryAccess(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to klee_check_memory_access");
+
+ ref<Expr> address = executor.toUnique(state, arguments[0]);
+ ref<Expr> size = executor.toUnique(state, arguments[1]);
+ if (!address.isConstant() || !size.isConstant()) {
+ executor.terminateStateOnError(state,
+ "check_memory_access requires constant args",
+ "user.err");
+ } else {
+ ObjectPair op;
+
+ if (!state.addressSpace.resolveOne(address.getConstantValue(), op)) {
+ executor.terminateStateOnError(state,
+ "check_memory_access: memory error",
+ "ptr.err",
+ executor.getAddressInfo(state, address));
+ } else {
+ ref<Expr> chk = op.first->getBoundsCheckPointer(address,
+ size.getConstantValue());
+ assert(chk.isConstant());
+ if (!chk.getConstantValue()) {
+ executor.terminateStateOnError(state,
+ "check_memory_access: memory error",
+ "ptr.err",
+ executor.getAddressInfo(state, address));
+ }
+ }
+ }
+}
+
+void SpecialFunctionHandler::handleGetValue(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to klee_get_value");
+
+ executor.executeGetValue(state, arguments[0], target);
+}
+
+void SpecialFunctionHandler::handleDefineFixedObject(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==2 &&
+ "invalid number of arguments to klee_define_fixed_object");
+ assert(arguments[0].isConstant() &&
+ "expect constant address argument to klee_define_fixed_object");
+ assert(arguments[1].isConstant() &&
+ "expect constant size argument to klee_define_fixed_object");
+
+ uint64_t address = arguments[0].getConstantValue();
+ uint64_t size = arguments[1].getConstantValue();
+ MemoryObject *mo = executor.memory->allocateFixed(address, size, state.prevPC->inst);
+ executor.bindObjectInState(state, mo, false);
+ mo->isUserSpecified = true; // XXX hack;
+}
+
+void SpecialFunctionHandler::handleMakeSymbolic(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==3 &&
+ "invalid number of arguments to klee_make_symbolic[_name]");
+
+ Executor::ExactResolutionList rl;
+ executor.resolveExact(state, arguments[0], rl, "make_symbolic");
+
+ for (Executor::ExactResolutionList::iterator it = rl.begin(),
+ ie = rl.end(); it != ie; ++it) {
+ MemoryObject *mo = (MemoryObject*) it->first.first;
+ std::string name = readStringAtAddress(state, arguments[2]);
+ mo->setName(name);
+
+ const ObjectState *old = it->first.second;
+ ExecutionState *s = it->second;
+
+ if (old->readOnly) {
+ executor.terminateStateOnError(*s,
+ "cannot make readonly object symbolic",
+ "user.err");
+ return;
+ }
+
+ bool res;
+ bool success =
+ executor.solver->mustBeTrue(*s, EqExpr::create(arguments[1],
+ mo->getSizeExpr()),
+ res);
+ assert(success && "FIXME: Unhandled solver failure");
+
+ if (res) {
+ executor.executeMakeSymbolic(*s, mo);
+ } else {
+ executor.terminateStateOnError(*s,
+ "wrong size given to klee_make_symbolic[_name]",
+ "user.err");
+ }
+ }
+}
+
+void SpecialFunctionHandler::handleMarkGlobal(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> > &arguments) {
+ assert(arguments.size()==1 &&
+ "invalid number of arguments to klee_mark_global");
+
+ Executor::ExactResolutionList rl;
+ executor.resolveExact(state, arguments[0], rl, "mark_global");
+
+ for (Executor::ExactResolutionList::iterator it = rl.begin(),
+ ie = rl.end(); it != ie; ++it) {
+ MemoryObject *mo = (MemoryObject*) it->first.first;
+ assert(!mo->isLocal);
+ mo->isGlobal = true;
+ }
+}
diff --git a/lib/Core/SpecialFunctionHandler.h b/lib/Core/SpecialFunctionHandler.h
new file mode 100644
index 00000000..d5d1af93
--- /dev/null
+++ b/lib/Core/SpecialFunctionHandler.h
@@ -0,0 +1,106 @@
+//===-- SpecialFunctionHandler.h --------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_SPECIALFUNCTIONHANDLER_H
+#define KLEE_SPECIALFUNCTIONHANDLER_H
+
+#include <map>
+#include <vector>
+#include <string>
+
+namespace llvm {
+ class Function;
+}
+
+namespace klee {
+ class Executor;
+ class Expr;
+ class ExecutionState;
+ class KInstruction;
+ template<typename T> class ref;
+
+ class SpecialFunctionHandler {
+ public:
+ typedef void (SpecialFunctionHandler::*Handler)(ExecutionState &state,
+ KInstruction *target,
+ std::vector<ref<Expr> >
+ &arguments);
+ typedef std::map<const llvm::Function*,
+ std::pair<Handler,bool> > handlers_ty;
+
+ handlers_ty handlers;
+ class Executor &executor;
+
+ public:
+ SpecialFunctionHandler(Executor &_executor);
+
+ /// Perform any modifications on the LLVM module before it is
+ /// prepared for execution. At the moment this involves deleting
+ /// unused function bodies and marking intrinsics with appropriate
+ /// flags for use in optimizations.
+ void prepare();
+
+ /// Initialize the internal handler map after the module has been
+ /// prepared for execution.
+ void bind();
+
+ bool handle(ExecutionState &state,
+ llvm::Function *f,
+ KInstruction *target,
+ std::vector< ref<Expr> > &arguments);
+
+ /* Convenience routines */
+
+ std::string readStringAtAddress(ExecutionState &state, ref<Expr> address);
+
+ /* Handlers */
+
+#define HANDLER(name) void name(ExecutionState &state, \
+ KInstruction *target, \
+ std::vector< ref<Expr> > &arguments)
+ HANDLER(handleAbort);
+ HANDLER(handleAssert);
+ HANDLER(handleAssertFail);
+ HANDLER(handleAssume);
+ HANDLER(handleCalloc);
+ HANDLER(handleCheckMemoryAccess);
+ HANDLER(handleDefineFixedObject);
+ HANDLER(handleDelete);
+ HANDLER(handleDeleteArray);
+ HANDLER(handleExit);
+ HANDLER(handleAliasFunction);
+ HANDLER(handleFree);
+ HANDLER(handleGetErrno);
+ HANDLER(handleGetObjSize);
+ HANDLER(handleGetValue);
+ HANDLER(handleIsSymbolic);
+ HANDLER(handleMakeSymbolic);
+ HANDLER(handleMalloc);
+ HANDLER(handleMallocN);
+ HANDLER(handleMarkGlobal);
+ HANDLER(handleMerge);
+ HANDLER(handleNew);
+ HANDLER(handleNewArray);
+ HANDLER(handlePreferCex);
+ HANDLER(handlePrintExpr);
+ HANDLER(handlePrintRange);
+ HANDLER(handleRange);
+ HANDLER(handleRealloc);
+ HANDLER(handleReportError);
+ HANDLER(handleRevirtObjects);
+ HANDLER(handleSetForking);
+ HANDLER(handleSilentExit);
+ HANDLER(handleUnderConstrained);
+ HANDLER(handleWarning);
+ HANDLER(handleWarningOnce);
+#undef HANDLER
+ };
+} // End klee namespace
+
+#endif
diff --git a/lib/Core/StatsTracker.cpp b/lib/Core/StatsTracker.cpp
new file mode 100644
index 00000000..35c073a3
--- /dev/null
+++ b/lib/Core/StatsTracker.cpp
@@ -0,0 +1,814 @@
+//===-- StatsTracker.cpp --------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "StatsTracker.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Statistics.h"
+#include "klee/Internal/Module/InstructionInfoTable.h"
+#include "klee/Internal/Module/KModule.h"
+#include "klee/Internal/Module/KInstruction.h"
+#include "klee/Internal/Support/ModuleUtil.h"
+#include "klee/Internal/System/Time.h"
+
+#include "CallPathManager.h"
+#include "CoreStats.h"
+#include "Executor.h"
+#include "MemoryManager.h"
+#include "UserSearcher.h"
+#include "../Solver/SolverStats.h"
+
+#include "llvm/BasicBlock.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/System/Process.h"
+#include "llvm/System/Path.h"
+
+#include <iostream>
+#include <fstream>
+
+using namespace klee;
+using namespace llvm;
+
+///
+
+namespace {
+ cl::opt<bool>
+ TrackInstructionTime("track-instruction-time",
+ cl::desc("Enable tracking of time for individual instructions"),
+ cl::init(false));
+
+ cl::opt<bool>
+ OutputStats("output-stats",
+ cl::desc("Write running stats trace file"),
+ cl::init(true));
+
+ cl::opt<bool>
+ OutputIStats("output-istats",
+ cl::desc("Write instruction level statistics (in callgrind format)"),
+ cl::init(true));
+
+ cl::opt<double>
+ StatsWriteInterval("stats-write-interval",
+ cl::desc("Approximate number of seconds between stats writes (default: 1.0)"),
+ cl::init(1.));
+
+ cl::opt<double>
+ IStatsWriteInterval("istats-write-interval",
+ cl::desc("Approximate number of seconds between istats writes (default: 10.0)"),
+ cl::init(10.));
+
+ /*
+ cl::opt<double>
+ BranchCovCountsWriteInterval("branch-cov-counts-write-interval",
+ cl::desc("Approximate number of seconds between run.branches writes (default: 5.0)"),
+ cl::init(5.));
+ */
+
+ // XXX I really would like to have dynamic rate control for something like this.
+ cl::opt<double>
+ UncoveredUpdateInterval("uncovered-update-interval",
+ cl::init(30.));
+
+ cl::opt<bool>
+ UseCallPaths("use-call-paths",
+ cl::desc("Enable calltree tracking for instruction level statistics"),
+ cl::init(true));
+
+}
+
+///
+
+bool StatsTracker::useStatistics() {
+ return OutputStats || OutputIStats;
+}
+
+namespace klee {
+ class WriteIStatsTimer : public Executor::Timer {
+ StatsTracker *statsTracker;
+
+ public:
+ WriteIStatsTimer(StatsTracker *_statsTracker) : statsTracker(_statsTracker) {}
+ ~WriteIStatsTimer() {}
+
+ void run() { statsTracker->writeIStats(); }
+ };
+
+ class WriteStatsTimer : public Executor::Timer {
+ StatsTracker *statsTracker;
+
+ public:
+ WriteStatsTimer(StatsTracker *_statsTracker) : statsTracker(_statsTracker) {}
+ ~WriteStatsTimer() {}
+
+ void run() { statsTracker->writeStatsLine(); }
+ };
+
+ class UpdateReachableTimer : public Executor::Timer {
+ StatsTracker *statsTracker;
+
+ public:
+ UpdateReachableTimer(StatsTracker *_statsTracker) : statsTracker(_statsTracker) {}
+
+ void run() { statsTracker->computeReachableUncovered(); }
+ };
+
+}
+
+//
+
+/// Check for special cases where we statically know an instruction is
+/// uncoverable. Currently the case is an unreachable instruction
+/// following a noreturn call; the instruction is really only there to
+/// satisfy LLVM's termination requirement.
+static bool instructionIsCoverable(Instruction *i) {
+ if (i->getOpcode() == Instruction::Unreachable) {
+ BasicBlock *bb = i->getParent();
+ BasicBlock::iterator it(i);
+ if (it==bb->begin()) {
+ return true;
+ } else {
+ Instruction *prev = --it;
+ if (isa<CallInst>(prev) || isa<InvokeInst>(prev)) {
+ Function *target = getDirectCallTarget(prev);
+ if (target && target->doesNotReturn())
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+StatsTracker::StatsTracker(Executor &_executor, std::string _objectFilename,
+ bool _updateMinDistToUncovered)
+ : executor(_executor),
+ objectFilename(_objectFilename),
+ statsFile(0),
+ istatsFile(0),
+ startWallTime(util::getWallTime()),
+ numBranches(0),
+ fullBranches(0),
+ partialBranches(0),
+ updateMinDistToUncovered(_updateMinDistToUncovered) {
+ KModule *km = executor.kmodule;
+
+ sys::Path module(objectFilename);
+ if (!sys::Path(objectFilename).isAbsolute()) {
+ sys::Path current = sys::Path::GetCurrentDirectory();
+ current.appendComponent(objectFilename);
+ if (current.exists())
+ objectFilename = current.c_str();
+ }
+
+ if (OutputIStats)
+ theStatisticManager->useIndexedStats(km->infos->getMaxID());
+
+ for (std::vector<KFunction*>::iterator it = km->functions.begin(),
+ ie = km->functions.end(); it != ie; ++it) {
+ KFunction *kf = *it;
+ kf->trackCoverage = 1;
+
+ for (unsigned i=0; i<kf->numInstructions; ++i) {
+ KInstruction *ki = kf->instructions[i];
+
+ if (OutputIStats) {
+ unsigned id = ki->info->id;
+ theStatisticManager->setIndex(id);
+ if (kf->trackCoverage && instructionIsCoverable(ki->inst))
+ ++stats::uncoveredInstructions;
+ }
+
+ if (kf->trackCoverage) {
+ if (BranchInst *bi = dyn_cast<BranchInst>(ki->inst))
+ if (!bi->isUnconditional())
+ numBranches++;
+ }
+ }
+ }
+
+ if (OutputStats) {
+ statsFile = executor.interpreterHandler->openOutputFile("run.stats");
+ assert(statsFile && "unable to open statistics trace file");
+ writeStatsHeader();
+ writeStatsLine();
+
+ executor.addTimer(new WriteStatsTimer(this), StatsWriteInterval);
+
+ if (updateMinDistToUncovered)
+ executor.addTimer(new UpdateReachableTimer(this), UncoveredUpdateInterval);
+ }
+
+ if (OutputIStats) {
+ istatsFile = executor.interpreterHandler->openOutputFile("run.istats");
+ assert(istatsFile && "unable to open istats file");
+
+ executor.addTimer(new WriteIStatsTimer(this), IStatsWriteInterval);
+ }
+}
+
+StatsTracker::~StatsTracker() {
+ if (statsFile)
+ delete statsFile;
+ if (istatsFile)
+ delete istatsFile;
+}
+
+void StatsTracker::done() {
+ if (statsFile)
+ writeStatsLine();
+ if (OutputIStats)
+ writeIStats();
+}
+
+void StatsTracker::stepInstruction(ExecutionState &es) {
+ if (OutputIStats) {
+ if (TrackInstructionTime) {
+ static sys::TimeValue lastNowTime(0,0),lastUserTime(0,0);
+
+ if (lastUserTime.seconds()==0 && lastUserTime.nanoseconds()==0) {
+ sys::TimeValue sys(0,0);
+ sys::Process::GetTimeUsage(lastNowTime,lastUserTime,sys);
+ } else {
+ sys::TimeValue now(0,0),user(0,0),sys(0,0);
+ sys::Process::GetTimeUsage(now,user,sys);
+ sys::TimeValue delta = user - lastUserTime;
+ sys::TimeValue deltaNow = now - lastNowTime;
+ stats::instructionTime += delta.usec();
+ stats::instructionRealTime += deltaNow.usec();
+ lastUserTime = user;
+ lastNowTime = now;
+ }
+ }
+
+ Instruction *inst = es.pc->inst;
+ const InstructionInfo &ii = *es.pc->info;
+ StackFrame &sf = es.stack.back();
+ theStatisticManager->setIndex(ii.id);
+ if (UseCallPaths)
+ theStatisticManager->setContext(&sf.callPathNode->statistics);
+
+ if (es.instsSinceCovNew)
+ ++es.instsSinceCovNew;
+
+ if (sf.kf->trackCoverage && instructionIsCoverable(inst)) {
+ if (!theStatisticManager->getIndexedValue(stats::coveredInstructions, ii.id)) {
+ // Checking for actual stoppoints avoids inconsistencies due
+ // to line number propogation.
+ if (isa<DbgStopPointInst>(inst))
+ es.coveredLines[&ii.file].insert(ii.line);
+ es.coveredNew = true;
+ es.instsSinceCovNew = 1;
+ ++stats::coveredInstructions;
+ stats::uncoveredInstructions += (uint64_t)-1;
+ }
+ }
+ }
+}
+
+///
+
+/* Should be called _after_ the es->pushFrame() */
+void StatsTracker::framePushed(ExecutionState &es, StackFrame *parentFrame) {
+ if (OutputIStats) {
+ StackFrame &sf = es.stack.back();
+
+ if (UseCallPaths) {
+ CallPathNode *parent = parentFrame ? parentFrame->callPathNode : 0;
+ CallPathNode *cp = callPathManager.getCallPath(parent,
+ sf.caller ? sf.caller->inst : 0,
+ sf.kf->function);
+ sf.callPathNode = cp;
+ cp->count++;
+ }
+
+ if (updateMinDistToUncovered) {
+ uint64_t minDistAtRA = 0;
+ if (parentFrame)
+ minDistAtRA = parentFrame->minDistToUncoveredOnReturn;
+
+ sf.minDistToUncoveredOnReturn = sf.caller ?
+ computeMinDistToUncovered(sf.caller, minDistAtRA) : 0;
+ }
+ }
+}
+
+/* Should be called _after_ the es->popFrame() */
+void StatsTracker::framePopped(ExecutionState &es) {
+ // XXX remove me?
+}
+
+
+void StatsTracker::markBranchVisited(ExecutionState *visitedTrue,
+ ExecutionState *visitedFalse) {
+ if (OutputIStats) {
+ unsigned id = theStatisticManager->getIndex();
+ uint64_t hasTrue = theStatisticManager->getIndexedValue(stats::trueBranches, id);
+ uint64_t hasFalse = theStatisticManager->getIndexedValue(stats::falseBranches, id);
+ if (visitedTrue && !hasTrue) {
+ visitedTrue->coveredNew = true;
+ visitedTrue->instsSinceCovNew = 1;
+ ++stats::trueBranches;
+ if (hasFalse) { ++fullBranches; --partialBranches; }
+ else ++partialBranches;
+ hasTrue = 1;
+ }
+ if (visitedFalse && !hasFalse) {
+ visitedFalse->coveredNew = true;
+ visitedFalse->instsSinceCovNew = 1;
+ ++stats::falseBranches;
+ if (hasTrue) { ++fullBranches; --partialBranches; }
+ else ++partialBranches;
+ }
+ }
+}
+
+void StatsTracker::writeStatsHeader() {
+ *statsFile << "('Instructions',"
+ << "'FullBranches',"
+ << "'PartialBranches',"
+ << "'NumBranches',"
+ << "'UserTime',"
+ << "'NumStates',"
+ << "'MallocUsage',"
+ << "'NumQueries',"
+ << "'NumQueryConstructs',"
+ << "'NumObjects',"
+ << "'WallTime',"
+ << "'CoveredInstructions',"
+ << "'UncoveredInstructions',"
+ << "'QueryTime',"
+ << "'SolverTime',"
+ << "'CexCacheTime',"
+ << "'ForkTime',"
+ << "'ResolveTime',"
+ << ")\n";
+ statsFile->flush();
+}
+
+double StatsTracker::elapsed() {
+ return util::getWallTime() - startWallTime;
+}
+
+void StatsTracker::writeStatsLine() {
+ *statsFile << "(" << stats::instructions
+ << "," << fullBranches
+ << "," << partialBranches
+ << "," << numBranches
+ << "," << util::getUserTime()
+ << "," << executor.states.size()
+ << "," << sys::Process::GetTotalMemoryUsage()
+ << "," << stats::queries
+ << "," << stats::queryConstructs
+ << "," << 0 // was numObjects
+ << "," << elapsed()
+ << "," << stats::coveredInstructions
+ << "," << stats::uncoveredInstructions
+ << "," << stats::queryTime / 1000000.
+ << "," << stats::solverTime / 1000000.
+ << "," << stats::cexCacheTime / 1000000.
+ << "," << stats::forkTime / 1000000.
+ << "," << stats::resolveTime / 1000000.
+ << ")\n";
+ statsFile->flush();
+}
+
+void StatsTracker::updateStateStatistics(uint64_t addend) {
+ for (std::set<ExecutionState*>::iterator it = executor.states.begin(),
+ ie = executor.states.end(); it != ie; ++it) {
+ ExecutionState &state = **it;
+ const InstructionInfo &ii = *state.pc->info;
+ theStatisticManager->incrementIndexedValue(stats::states, ii.id, addend);
+ if (UseCallPaths)
+ state.stack.back().callPathNode->statistics.incrementValue(stats::states, addend);
+ }
+}
+
+void StatsTracker::writeIStats() {
+ Module *m = executor.kmodule->module;
+ uint64_t istatsMask = 0;
+ std::ostream &of = *istatsFile;
+
+ of.seekp(0, std::ios::end);
+ unsigned istatsSize = of.tellp();
+ of.seekp(0);
+
+ of << "version: 1\n";
+ of << "creator: klee\n";
+ of << "pid: " << sys::Process::GetCurrentUserId() << "\n";
+ of << "cmd: " << m->getModuleIdentifier() << "\n\n";
+ of << "\n";
+
+ StatisticManager &sm = *theStatisticManager;
+ unsigned nStats = sm.getNumStatistics();
+
+ // Max is 13, sadly
+ istatsMask |= 1<<sm.getStatisticID("Queries");
+ istatsMask |= 1<<sm.getStatisticID("QueriesValid");
+ istatsMask |= 1<<sm.getStatisticID("QueriesInvalid");
+ istatsMask |= 1<<sm.getStatisticID("QueryTime");
+ istatsMask |= 1<<sm.getStatisticID("ResolveTime");
+ istatsMask |= 1<<sm.getStatisticID("Instructions");
+ istatsMask |= 1<<sm.getStatisticID("InstructionTimes");
+ istatsMask |= 1<<sm.getStatisticID("InstructionRealTimes");
+ istatsMask |= 1<<sm.getStatisticID("Forks");
+ istatsMask |= 1<<sm.getStatisticID("CoveredInstructions");
+ istatsMask |= 1<<sm.getStatisticID("UncoveredInstructions");
+ istatsMask |= 1<<sm.getStatisticID("States");
+ istatsMask |= 1<<sm.getStatisticID("MinDistToUncovered");
+
+ of << "positions: instr line\n";
+
+ for (unsigned i=0; i<nStats; i++) {
+ if (istatsMask & (1<<i)) {
+ Statistic &s = sm.getStatistic(i);
+ of << "event: " << s.getShortName() << " : "
+ << s.getName() << "\n";
+ }
+ }
+
+ of << "events: ";
+ for (unsigned i=0; i<nStats; i++) {
+ if (istatsMask & (1<<i))
+ of << sm.getStatistic(i).getShortName() << " ";
+ }
+ of << "\n";
+
+ // set state counts, decremented after we process so that we don't
+ // have to zero all records each time.
+ if (istatsMask & (1<<stats::states.getID()))
+ updateStateStatistics(1);
+
+ std::string sourceFile = "";
+
+ CallSiteSummaryTable callSiteStats;
+ if (UseCallPaths)
+ callPathManager.getSummaryStatistics(callSiteStats);
+
+ of << "ob=" << objectFilename << "\n";
+
+ for (Module::iterator fnIt = m->begin(), fn_ie = m->end();
+ fnIt != fn_ie; ++fnIt) {
+ if (!fnIt->isDeclaration()) {
+ of << "fn=" << fnIt->getName() << "\n";
+ for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end();
+ bbIt != bb_ie; ++bbIt) {
+ for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end();
+ it != it; ++it) {
+ Instruction *instr = &*it;
+ const InstructionInfo &ii = executor.kmodule->infos->getInfo(instr);
+ unsigned index = ii.id;
+ if (ii.file!=sourceFile) {
+ of << "fl=" << ii.file << "\n";
+ sourceFile = ii.file;
+ }
+ of << ii.assemblyLine << " ";
+ of << ii.line << " ";
+ for (unsigned i=0; i<nStats; i++)
+ if (istatsMask&(1<<i))
+ of << sm.getIndexedValue(sm.getStatistic(i), index) << " ";
+ of << "\n";
+
+ if (UseCallPaths &&
+ (isa<CallInst>(instr) || isa<InvokeInst>(instr))) {
+ CallSiteSummaryTable::iterator it = callSiteStats.find(instr);
+ if (it!=callSiteStats.end()) {
+ for (std::map<llvm::Function*, CallSiteInfo>::iterator
+ fit = it->second.begin(), fie = it->second.end();
+ fit != fie; ++fit) {
+ Function *f = fit->first;
+ CallSiteInfo &csi = fit->second;
+ const InstructionInfo &fii =
+ executor.kmodule->infos->getFunctionInfo(f);
+
+ if (fii.file!="" && fii.file!=sourceFile)
+ of << "cfl=" << fii.file << "\n";
+ of << "cfn=" << f->getName() << "\n";
+ of << "calls=" << csi.count << " ";
+ of << fii.assemblyLine << " ";
+ of << fii.line << "\n";
+
+ of << ii.assemblyLine << " ";
+ of << ii.line << " ";
+ for (unsigned i=0; i<nStats; i++) {
+ if (istatsMask&(1<<i)) {
+ Statistic &s = sm.getStatistic(i);
+ uint64_t value;
+
+ // Hack, ignore things that don't make sense on
+ // call paths.
+ if (&s == &stats::uncoveredInstructions) {
+ value = 0;
+ } else {
+ value = csi.statistics.getValue(s);
+ }
+
+ of << value << " ";
+ }
+ }
+ of << "\n";
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (istatsMask & (1<<stats::states.getID()))
+ updateStateStatistics((uint64_t)-1);
+
+ // Clear then end of the file if necessary (no truncate op?).
+ unsigned pos = of.tellp();
+ for (unsigned i=pos; i<istatsSize; ++i)
+ of << '\n';
+
+ of.flush();
+}
+
+///
+
+typedef std::map<Instruction*, std::vector<Function*> > calltargets_ty;
+
+static calltargets_ty callTargets;
+static std::map<Function*, std::vector<Instruction*> > functionCallers;
+static std::map<Function*, unsigned> functionShortestPath;
+
+static std::vector<Instruction*> getSuccs(Instruction *i) {
+ BasicBlock *bb = i->getParent();
+ std::vector<Instruction*> res;
+
+ if (i==bb->getTerminator()) {
+ for (succ_iterator it = succ_begin(bb), ie = succ_end(bb); it != ie; ++it)
+ res.push_back(it->begin());
+ } else {
+ res.push_back(++BasicBlock::iterator(i));
+ }
+
+ return res;
+}
+
+uint64_t klee::computeMinDistToUncovered(const KInstruction *ki,
+ uint64_t minDistAtRA) {
+ StatisticManager &sm = *theStatisticManager;
+ if (minDistAtRA==0) { // unreachable on return, best is local
+ return sm.getIndexedValue(stats::minDistToUncovered,
+ ki->info->id);
+ } else {
+ uint64_t minDistLocal = sm.getIndexedValue(stats::minDistToUncovered,
+ ki->info->id);
+ uint64_t distToReturn = sm.getIndexedValue(stats::minDistToReturn,
+ ki->info->id);
+
+ if (distToReturn==0) { // return unreachable, best is local
+ return minDistLocal;
+ } else if (!minDistLocal) { // no local reachable
+ return distToReturn + minDistAtRA;
+ } else {
+ return std::min(minDistLocal, distToReturn + minDistAtRA);
+ }
+ }
+}
+
+void StatsTracker::computeReachableUncovered() {
+ KModule *km = executor.kmodule;
+ Module *m = km->module;
+ static bool init = true;
+ const InstructionInfoTable &infos = *km->infos;
+ StatisticManager &sm = *theStatisticManager;
+
+ if (init) {
+ init = false;
+
+ // Compute call targets. It would be nice to use alias information
+ // instead of assuming all indirect calls hit all escaping
+ // functions, eh?
+ for (Module::iterator fnIt = m->begin(), fn_ie = m->end();
+ fnIt != fn_ie; ++fnIt) {
+ for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end();
+ bbIt != bb_ie; ++bbIt) {
+ for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end();
+ it != it; ++it) {
+ if (isa<CallInst>(it) || isa<InvokeInst>(it)) {
+ if (isa<InlineAsm>(it->getOperand(0))) {
+ // We can never call through here so assume no targets
+ // (which should be correct anyhow).
+ callTargets.insert(std::make_pair(it,
+ std::vector<Function*>()));
+ } else if (Function *target = getDirectCallTarget(it)) {
+ callTargets[it].push_back(target);
+ } else {
+ callTargets[it] =
+ std::vector<Function*>(km->escapingFunctions.begin(),
+ km->escapingFunctions.end());
+ }
+ }
+ }
+ }
+ }
+
+ // Compute function callers as reflexion of callTargets.
+ for (calltargets_ty::iterator it = callTargets.begin(),
+ ie = callTargets.end(); it != ie; ++it)
+ for (std::vector<Function*>::iterator fit = it->second.begin(),
+ fie = it->second.end(); fit != fie; ++fit)
+ functionCallers[*fit].push_back(it->first);
+
+ // Initialize minDistToReturn to shortest paths through
+ // functions. 0 is unreachable.
+ std::vector<Instruction *> instructions;
+ for (Module::iterator fnIt = m->begin(), fn_ie = m->end();
+ fnIt != fn_ie; ++fnIt) {
+ if (fnIt->isDeclaration()) {
+ if (fnIt->doesNotReturn()) {
+ functionShortestPath[fnIt] = 0;
+ } else {
+ functionShortestPath[fnIt] = 1; // whatever
+ }
+ } else {
+ functionShortestPath[fnIt] = 0;
+ }
+
+ // Not sure if I should bother to preorder here. XXX I should.
+ for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end();
+ bbIt != bb_ie; ++bbIt) {
+ for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end();
+ it != it; ++it) {
+ instructions.push_back(it);
+ unsigned id = infos.getInfo(it).id;
+ sm.setIndexedValue(stats::minDistToReturn,
+ id,
+ isa<ReturnInst>(it) || isa<UnwindInst>(it));
+ }
+ }
+ }
+
+ std::reverse(instructions.begin(), instructions.end());
+
+ // I'm so lazy it's not even worklisted.
+ bool changed;
+ do {
+ changed = false;
+ for (std::vector<Instruction*>::iterator it = instructions.begin(),
+ ie = instructions.end(); it != ie; ++it) {
+ Instruction *inst = *it;
+ unsigned bestThrough = 0;
+
+ if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
+ std::vector<Function*> &targets = callTargets[inst];
+ for (std::vector<Function*>::iterator fnIt = targets.begin(),
+ ie = targets.end(); fnIt != ie; ++fnIt) {
+ uint64_t dist = functionShortestPath[*fnIt];
+ if (dist) {
+ dist = 1+dist; // count instruction itself
+ if (bestThrough==0 || dist<bestThrough)
+ bestThrough = dist;
+ }
+ }
+ } else {
+ bestThrough = 1;
+ }
+
+ if (bestThrough) {
+ unsigned id = infos.getInfo(*it).id;
+ uint64_t best, cur = best = sm.getIndexedValue(stats::minDistToReturn, id);
+ std::vector<Instruction*> succs = getSuccs(*it);
+ for (std::vector<Instruction*>::iterator it2 = succs.begin(),
+ ie = succs.end(); it2 != ie; ++it2) {
+ uint64_t dist = sm.getIndexedValue(stats::minDistToReturn,
+ infos.getInfo(*it2).id);
+ if (dist) {
+ uint64_t val = bestThrough + dist;
+ if (best==0 || val<best)
+ best = val;
+ }
+ }
+ if (best != cur) {
+ sm.setIndexedValue(stats::minDistToReturn, id, best);
+ changed = true;
+
+ // Update shortest path if this is the entry point.
+ Function *f = inst->getParent()->getParent();
+ if (inst==f->begin()->begin())
+ functionShortestPath[f] = best;
+ }
+ }
+ }
+ } while (changed);
+ }
+
+ // compute minDistToUncovered, 0 is unreachable
+ std::vector<Instruction *> instructions;
+ for (Module::iterator fnIt = m->begin(), fn_ie = m->end();
+ fnIt != fn_ie; ++fnIt) {
+ // Not sure if I should bother to preorder here.
+ for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end();
+ bbIt != bb_ie; ++bbIt) {
+ for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end();
+ it != it; ++it) {
+ unsigned id = infos.getInfo(it).id;
+ instructions.push_back(&*it);
+ sm.setIndexedValue(stats::minDistToUncovered,
+ id,
+ sm.getIndexedValue(stats::uncoveredInstructions, id));
+ }
+ }
+ }
+
+ std::reverse(instructions.begin(), instructions.end());
+
+ // I'm so lazy it's not even worklisted.
+ bool changed;
+ do {
+ changed = false;
+ for (std::vector<Instruction*>::iterator it = instructions.begin(),
+ ie = instructions.end(); it != ie; ++it) {
+ Instruction *inst = *it;
+ uint64_t best, cur = best = sm.getIndexedValue(stats::minDistToUncovered,
+ infos.getInfo(inst).id);
+ unsigned bestThrough = 0;
+
+ if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
+ std::vector<Function*> &targets = callTargets[inst];
+ for (std::vector<Function*>::iterator fnIt = targets.begin(),
+ ie = targets.end(); fnIt != ie; ++fnIt) {
+ uint64_t dist = functionShortestPath[*fnIt];
+ if (dist) {
+ dist = 1+dist; // count instruction itself
+ if (bestThrough==0 || dist<bestThrough)
+ bestThrough = dist;
+ }
+
+ if (!(*fnIt)->isDeclaration()) {
+ uint64_t calleeDist = sm.getIndexedValue(stats::minDistToUncovered,
+ infos.getFunctionInfo(*fnIt).id);
+ if (calleeDist) {
+ calleeDist = 1+calleeDist; // count instruction itself
+ if (best==0 || calleeDist<best)
+ best = calleeDist;
+ }
+ }
+ }
+ } else {
+ bestThrough = 1;
+ }
+
+ if (bestThrough) {
+ std::vector<Instruction*> succs = getSuccs(inst);
+ for (std::vector<Instruction*>::iterator it2 = succs.begin(),
+ ie = succs.end(); it2 != ie; ++it2) {
+ uint64_t dist = sm.getIndexedValue(stats::minDistToUncovered,
+ infos.getInfo(*it2).id);
+ if (dist) {
+ uint64_t val = bestThrough + dist;
+ if (best==0 || val<best)
+ best = val;
+ }
+ }
+ }
+
+ if (best != cur) {
+ sm.setIndexedValue(stats::minDistToUncovered,
+ infos.getInfo(inst).id,
+ best);
+ changed = true;
+ }
+ }
+ } while (changed);
+
+ for (std::set<ExecutionState*>::iterator it = executor.states.begin(),
+ ie = executor.states.end(); it != ie; ++it) {
+ ExecutionState *es = *it;
+ uint64_t currentFrameMinDist = 0;
+ for (ExecutionState::stack_ty::iterator sfIt = es->stack.begin(),
+ sf_ie = es->stack.end(); sfIt != sf_ie; ++sfIt) {
+ ExecutionState::stack_ty::iterator next = sfIt + 1;
+ KInstIterator kii;
+
+ if (next==es->stack.end()) {
+ kii = es->pc;
+ } else {
+ kii = next->caller;
+ ++kii;
+ }
+
+ sfIt->minDistToUncoveredOnReturn = currentFrameMinDist;
+
+ currentFrameMinDist = computeMinDistToUncovered(kii, currentFrameMinDist);
+ }
+ }
+}
diff --git a/lib/Core/StatsTracker.h b/lib/Core/StatsTracker.h
new file mode 100644
index 00000000..9d22b389
--- /dev/null
+++ b/lib/Core/StatsTracker.h
@@ -0,0 +1,93 @@
+//===-- StatsTracker.h ------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_STATSTRACKER_H
+#define KLEE_STATSTRACKER_H
+
+#include "CallPathManager.h"
+
+#include <iostream>
+#include <set>
+
+namespace llvm {
+ class BranchInst;
+ class Function;
+ class Instruction;
+}
+
+namespace klee {
+ class ExecutionState;
+ class Executor;
+ class InstructionInfoTable;
+ class InterpreterHandler;
+ class KInstruction;
+ class StackFrame;
+
+ class StatsTracker {
+ friend class WriteStatsTimer;
+ friend class WriteIStatsTimer;
+
+ Executor &executor;
+ std::string objectFilename;
+
+ std::ostream *statsFile, *istatsFile;
+ double startWallTime;
+
+ unsigned numBranches;
+ unsigned fullBranches, partialBranches;
+
+ CallPathManager callPathManager;
+
+ bool updateMinDistToUncovered;
+
+ public:
+ static bool useStatistics();
+
+ private:
+ void updateStateStatistics(uint64_t addend);
+ void writeStatsHeader();
+ void writeStatsLine();
+ void writeIStats();
+
+ public:
+ StatsTracker(Executor &_executor, std::string _objectFilename,
+ bool _updateMinDistToUncovered);
+ ~StatsTracker();
+
+ // called after a new StackFrame has been pushed (for callpath tracing)
+ void framePushed(ExecutionState &es, StackFrame *parentFrame);
+
+ // called after a StackFrame has been popped
+ void framePopped(ExecutionState &es);
+
+ // called when some side of a branch has been visited. it is
+ // imperative that this be called when the statistics index is at
+ // the index for the branch itself.
+ void markBranchVisited(ExecutionState *visitedTrue,
+ ExecutionState *visitedFalse);
+
+ // called when execution is done and stats files should be flushed
+ void done();
+
+ // process stats for a single instruction step, es is the state
+ // about to be stepped
+ void stepInstruction(ExecutionState &es);
+
+ /// Return time in seconds since execution start.
+ double elapsed();
+
+ void computeReachableUncovered();
+ };
+
+ uint64_t computeMinDistToUncovered(const KInstruction *ki,
+ uint64_t minDistAtRA);
+
+}
+
+#endif
diff --git a/lib/Core/TimingSolver.cpp b/lib/Core/TimingSolver.cpp
new file mode 100644
index 00000000..70e42836
--- /dev/null
+++ b/lib/Core/TimingSolver.cpp
@@ -0,0 +1,147 @@
+//===-- TimingSolver.cpp --------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TimingSolver.h"
+
+#include "klee/ExecutionState.h"
+#include "klee/Solver.h"
+#include "klee/Statistics.h"
+
+#include "CoreStats.h"
+
+#include "llvm/System/Process.h"
+
+using namespace klee;
+using namespace llvm;
+
+/***/
+
+bool TimingSolver::evaluate(const ExecutionState& state, ref<Expr> expr,
+ Solver::Validity &result) {
+ // Fast path, to avoid timer and OS overhead.
+ if (expr.isConstant()) {
+ result = expr.getConstantValue() ? Solver::True : Solver::False;
+ return true;
+ }
+
+ sys::TimeValue now(0,0),user(0,0),delta(0,0),sys(0,0);
+ sys::Process::GetTimeUsage(now,user,sys);
+
+ if (simplifyExprs)
+ expr = state.constraints.simplifyExpr(expr);
+
+ bool success = solver->evaluate(Query(state.constraints, expr), result);
+
+ sys::Process::GetTimeUsage(delta,user,sys);
+ delta -= now;
+ stats::solverTime += delta.usec();
+ state.queryCost += delta.usec()/1000000.;
+
+ return success;
+}
+
+bool TimingSolver::mustBeTrue(const ExecutionState& state, ref<Expr> expr,
+ bool &result) {
+ // Fast path, to avoid timer and OS overhead.
+ if (expr.isConstant()) {
+ result = expr.getConstantValue() ? true : false;
+ return true;
+ }
+
+ sys::TimeValue now(0,0),user(0,0),delta(0,0),sys(0,0);
+ sys::Process::GetTimeUsage(now,user,sys);
+
+ if (simplifyExprs)
+ expr = state.constraints.simplifyExpr(expr);
+
+ bool success = solver->mustBeTrue(Query(state.constraints, expr), result);
+
+ sys::Process::GetTimeUsage(delta,user,sys);
+ delta -= now;
+ stats::solverTime += delta.usec();
+ state.queryCost += delta.usec()/1000000.;
+
+ return success;
+}
+
+bool TimingSolver::mustBeFalse(const ExecutionState& state, ref<Expr> expr,
+ bool &result) {
+ return mustBeTrue(state, Expr::createNot(expr), result);
+}
+
+bool TimingSolver::mayBeTrue(const ExecutionState& state, ref<Expr> expr,
+ bool &result) {
+ bool res;
+ if (!mustBeFalse(state, expr, res))
+ return false;
+ result = !res;
+ return true;
+}
+
+bool TimingSolver::mayBeFalse(const ExecutionState& state, ref<Expr> expr,
+ bool &result) {
+ bool res;
+ if (!mustBeTrue(state, expr, res))
+ return false;
+ result = !res;
+ return true;
+}
+
+bool TimingSolver::getValue(const ExecutionState& state, ref<Expr> expr,
+ ref<Expr> &result) {
+ // Fast path, to avoid timer and OS overhead.
+ if (expr.isConstant()) {
+ result = expr;
+ return true;
+ }
+
+ sys::TimeValue now(0,0),user(0,0),delta(0,0),sys(0,0);
+ sys::Process::GetTimeUsage(now,user,sys);
+
+ if (simplifyExprs)
+ expr = state.constraints.simplifyExpr(expr);
+
+ bool success = solver->getValue(Query(state.constraints, expr), result);
+
+ sys::Process::GetTimeUsage(delta,user,sys);
+ delta -= now;
+ stats::solverTime += delta.usec();
+ state.queryCost += delta.usec()/1000000.;
+
+ return success;
+}
+
+bool
+TimingSolver::getInitialValues(const ExecutionState& state,
+ const std::vector<const Array*>
+ &objects,
+ std::vector< std::vector<unsigned char> >
+ &result) {
+ if (objects.empty())
+ return true;
+
+ sys::TimeValue now(0,0),user(0,0),delta(0,0),sys(0,0);
+ sys::Process::GetTimeUsage(now,user,sys);
+
+ bool success = solver->getInitialValues(Query(state.constraints,
+ ref<Expr>(0, Expr::Bool)),
+ objects, result);
+
+ sys::Process::GetTimeUsage(delta,user,sys);
+ delta -= now;
+ stats::solverTime += delta.usec();
+ state.queryCost += delta.usec()/1000000.;
+
+ return success;
+}
+
+std::pair< ref<Expr>, ref<Expr> >
+TimingSolver::getRange(const ExecutionState& state, ref<Expr> expr) {
+ return solver->getRange(Query(state.constraints, expr));
+}
diff --git a/lib/Core/TimingSolver.h b/lib/Core/TimingSolver.h
new file mode 100644
index 00000000..875216d9
--- /dev/null
+++ b/lib/Core/TimingSolver.h
@@ -0,0 +1,70 @@
+//===-- TimingSolver.h ------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_TIMINGSOLVER_H
+#define KLEE_TIMINGSOLVER_H
+
+#include "klee/Expr.h"
+#include "klee/Solver.h"
+
+#include <vector>
+
+namespace klee {
+ class ExecutionState;
+ class Solver;
+ class STPSolver;
+
+ /// TimingSolver - A simple class which wraps a solver and handles
+ /// tracking the statistics that we care about.
+ class TimingSolver {
+ public:
+ Solver *solver;
+ STPSolver *stpSolver;
+ bool simplifyExprs;
+
+ public:
+ /// TimingSolver - Construct a new timing solver.
+ ///
+ /// \param _simplifyExprs - Whether expressions should be
+ /// simplified (via the constraint manager interface) prior to
+ /// querying.
+ TimingSolver(Solver *_solver, STPSolver *_stpSolver,
+ bool _simplifyExprs = true)
+ : solver(_solver), stpSolver(_stpSolver), simplifyExprs(_simplifyExprs) {}
+ ~TimingSolver() {
+ delete solver;
+ }
+
+ void setTimeout(double t) {
+ stpSolver->setTimeout(t);
+ }
+
+ bool evaluate(const ExecutionState&, ref<Expr>, Solver::Validity &result);
+
+ bool mustBeTrue(const ExecutionState&, ref<Expr>, bool &result);
+
+ bool mustBeFalse(const ExecutionState&, ref<Expr>, bool &result);
+
+ bool mayBeTrue(const ExecutionState&, ref<Expr>, bool &result);
+
+ bool mayBeFalse(const ExecutionState&, ref<Expr>, bool &result);
+
+ bool getValue(const ExecutionState &, ref<Expr> expr, ref<Expr> &result);
+
+ bool getInitialValues(const ExecutionState&,
+ const std::vector<const Array*> &objects,
+ std::vector< std::vector<unsigned char> > &result);
+
+ virtual std::pair< ref<Expr>, ref<Expr> >
+ getRange(const ExecutionState&, ref<Expr> query);
+ };
+
+}
+
+#endif
diff --git a/lib/Core/UserSearcher.cpp b/lib/Core/UserSearcher.cpp
new file mode 100644
index 00000000..1aff9e5e
--- /dev/null
+++ b/lib/Core/UserSearcher.cpp
@@ -0,0 +1,175 @@
+//===-- UserSearcher.cpp --------------------------------------------------===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common.h"
+
+#include "UserSearcher.h"
+
+#include "Searcher.h"
+#include "Executor.h"
+
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+using namespace klee;
+
+namespace {
+ cl::opt<bool>
+ UseRandomSearch("use-random-search");
+
+ cl::opt<bool>
+ UseInterleavedRS("use-interleaved-RS");
+
+ cl::opt<bool>
+ UseInterleavedNURS("use-interleaved-NURS");
+
+ cl::opt<bool>
+ UseInterleavedMD2UNURS("use-interleaved-MD2U-NURS");
+
+ cl::opt<bool>
+ UseInterleavedInstCountNURS("use-interleaved-icnt-NURS");
+
+ cl::opt<bool>
+ UseInterleavedCPInstCountNURS("use-interleaved-cpicnt-NURS");
+
+ cl::opt<bool>
+ UseInterleavedQueryCostNURS("use-interleaved-query-cost-NURS");
+
+ cl::opt<bool>
+ UseInterleavedCovNewNURS("use-interleaved-covnew-NURS");
+
+ cl::opt<bool>
+ UseNonUniformRandomSearch("use-non-uniform-random-search");
+
+ cl::opt<bool>
+ UseRandomPathSearch("use-random-path");
+
+ cl::opt<WeightedRandomSearcher::WeightType>
+ WeightType("weight-type", cl::desc("Set the weight type for --use-non-uniform-random-search"),
+ cl::values(clEnumValN(WeightedRandomSearcher::Depth, "none", "use (2^depth)"),
+ clEnumValN(WeightedRandomSearcher::InstCount, "icnt", "use current pc exec count"),
+ clEnumValN(WeightedRandomSearcher::CPInstCount, "cpicnt", "use current pc exec count"),
+ clEnumValN(WeightedRandomSearcher::QueryCost, "query-cost", "use query cost"),
+ clEnumValN(WeightedRandomSearcher::MinDistToUncovered, "md2u", "use min dist to uncovered"),
+ clEnumValN(WeightedRandomSearcher::CoveringNew, "covnew", "use min dist to uncovered + coveringNew flag"),
+ clEnumValEnd));
+
+ cl::opt<bool>
+ UseMerge("use-merge",
+ cl::desc("Enable support for klee_merge() (experimental)"));
+
+ cl::opt<bool>
+ UseBumpMerge("use-bump-merge",
+ cl::desc("Enable support for klee_merge() (extra experimental)"));
+
+ cl::opt<bool>
+ UseIterativeDeepeningTimeSearch("use-iterative-deepening-time-search",
+ cl::desc("(experimental)"));
+
+ cl::opt<bool>
+ UseBatchingSearch("use-batching-search",
+ cl::desc("Use batching searcher (keep running selected state for N instructions/time, see --batch-instructions and --batch-time"));
+
+ cl::opt<unsigned>
+ BatchInstructions("batch-instructions",
+ cl::desc("Number of instructions to batch when using --use-batching-search"),
+ cl::init(10000));
+
+ cl::opt<double>
+ BatchTime("batch-time",
+ cl::desc("Amount of time to batch when using --use-batching-search"),
+ cl::init(5.0));
+}
+
+bool klee::userSearcherRequiresMD2U() {
+ return (WeightType==WeightedRandomSearcher::MinDistToUncovered ||
+ WeightType==WeightedRandomSearcher::CoveringNew ||
+ UseInterleavedMD2UNURS ||
+ UseInterleavedCovNewNURS ||
+ UseInterleavedInstCountNURS ||
+ UseInterleavedCPInstCountNURS ||
+ UseInterleavedQueryCostNURS);
+}
+
+// FIXME: Remove.
+bool klee::userSearcherRequiresBranchSequences() {
+ return false;
+}
+
+Searcher *klee::constructUserSearcher(Executor &executor) {
+ Searcher *searcher = 0;
+
+ if (UseRandomPathSearch) {
+ searcher = new RandomPathSearcher(executor);
+ } else if (UseNonUniformRandomSearch) {
+ searcher = new WeightedRandomSearcher(executor, WeightType);
+ } else if (UseRandomSearch) {
+ searcher = new RandomSearcher();
+ } else {
+ searcher = new DFSSearcher();
+ }
+
+ if (UseInterleavedNURS || UseInterleavedMD2UNURS || UseInterleavedRS ||
+ UseInterleavedCovNewNURS || UseInterleavedInstCountNURS ||
+ UseInterleavedCPInstCountNURS || UseInterleavedQueryCostNURS) {
+ std::vector<Searcher *> s;
+ s.push_back(searcher);
+
+ if (UseInterleavedNURS)
+ s.push_back(new WeightedRandomSearcher(executor,
+ WeightedRandomSearcher::Depth));
+ if (UseInterleavedMD2UNURS)
+ s.push_back(new WeightedRandomSearcher(executor,
+ WeightedRandomSearcher::MinDistToUncovered));
+
+ if (UseInterleavedCovNewNURS)
+ s.push_back(new WeightedRandomSearcher(executor,
+ WeightedRandomSearcher::CoveringNew));
+
+ if (UseInterleavedInstCountNURS)
+ s.push_back(new WeightedRandomSearcher(executor,
+ WeightedRandomSearcher::InstCount));
+
+ if (UseInterleavedCPInstCountNURS)
+ s.push_back(new WeightedRandomSearcher(executor,
+ WeightedRandomSearcher::CPInstCount));
+
+ if (UseInterleavedQueryCostNURS)
+ s.push_back(new WeightedRandomSearcher(executor,
+ WeightedRandomSearcher::QueryCost));
+
+ if (UseInterleavedRS)
+ s.push_back(new RandomSearcher());
+
+ searcher = new InterleavedSearcher(s);
+ }
+
+ if (UseBatchingSearch) {
+ searcher = new BatchingSearcher(searcher, BatchTime, BatchInstructions);
+ }
+
+ if (UseMerge) {
+ assert(!UseBumpMerge);
+ searcher = new MergingSearcher(executor, searcher);
+ } else if (UseBumpMerge) {
+ searcher = new BumpMergingSearcher(executor, searcher);
+ }
+
+ if (UseIterativeDeepeningTimeSearch) {
+ searcher = new IterativeDeepeningTimeSearcher(searcher);
+ }
+
+ std::ostream &os = executor.getHandler().getInfoStream();
+
+ os << "BEGIN searcher description\n";
+ searcher->printName(os);
+ os << "END searcher description\n";
+
+ return searcher;
+}
diff --git a/lib/Core/UserSearcher.h b/lib/Core/UserSearcher.h
new file mode 100644
index 00000000..9571bf5b
--- /dev/null
+++ b/lib/Core/UserSearcher.h
@@ -0,0 +1,25 @@
+//===-- UserSearcher.h ------------------------------------------*- C++ -*-===//
+//
+// The KLEE Symbolic Virtual Machine
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef KLEE_USERSEARCHER_H
+#define KLEE_USERSEARCHER_H
+
+namespace klee {
+ class Executor;
+ class Searcher;
+
+ // XXX gross, should be on demand?
+ bool userSearcherRequiresMD2U();
+
+ bool userSearcherRequiresBranchSequences();
+
+ Searcher *constructUserSearcher(Executor &executor);
+}
+
+#endif