aboutsummaryrefslogtreecommitdiffhomepage
path: root/lib/Expr
diff options
context:
space:
mode:
authorAndrea Mattavelli <andreamattavelli@gmail.com>2017-11-22 17:18:07 +0000
committerCristian Cadar <c.cadar@imperial.ac.uk>2018-10-23 18:53:46 +0300
commita53fade6e85394ef95dfaaa1c264149e85ea5451 (patch)
treee8d10b1cd288c0b013b5dc8039bdb3b8dbf42d8d /lib/Expr
parent916b72a7955cbb06d1a10640f8c6daea14da523e (diff)
downloadklee-a53fade6e85394ef95dfaaa1c264149e85ea5451.tar.gz
Added support for KLEE index-based array optimization
Diffstat (limited to 'lib/Expr')
-rw-r--r--lib/Expr/ArrayExprOptimizer.cpp135
-rw-r--r--lib/Expr/ArrayExprRewriter.cpp141
-rw-r--r--lib/Expr/ArrayExprVisitor.cpp171
-rw-r--r--lib/Expr/AssignmentGenerator.cpp327
-rw-r--r--lib/Expr/CMakeLists.txt4
5 files changed, 778 insertions, 0 deletions
diff --git a/lib/Expr/ArrayExprOptimizer.cpp b/lib/Expr/ArrayExprOptimizer.cpp
new file mode 100644
index 00000000..d3e8cbab
--- /dev/null
+++ b/lib/Expr/ArrayExprOptimizer.cpp
@@ -0,0 +1,135 @@
+#include "klee/ArrayExprOptimizer.h"
+
+#include "klee/ArrayExprRewriter.h"
+#include "klee/util/BitArray.h"
+
+#include <iostream>
+
+using namespace klee;
+
+namespace klee {
+llvm::cl::opt<ArrayOptimizationType> OptimizeArray(
+ "optimize-array",
+ llvm::cl::values(
+ clEnumValN(INDEX, "index", "Index-based transformation"),
+ clEnumValEnd),
+ llvm::cl::init(NONE),
+ llvm::cl::desc("Optimize accesses to either concrete or concrete/symbolic "
+ "arrays. (default=off)"));
+};
+
+void ExprOptimizer::optimizeExpr(const ref<Expr> &e, ref<Expr> &result,
+ bool valueOnly) {
+ unsigned hash = e->hash();
+ if (cacheExprUnapplicable.find(hash) != cacheExprUnapplicable.end()) {
+ return;
+ }
+
+ // Find cached expressions
+ auto cached = cacheExprOptimized.find(hash);
+ if (cached != cacheExprOptimized.end()) {
+ result = cached->second;
+ return;
+ }
+
+ // ----------------------- INDEX-BASED OPTIMIZATION -------------------------
+ if (!valueOnly && OptimizeArray == INDEX) {
+ array2idx_ty arrays;
+ ConstantArrayExprVisitor aev(arrays);
+ aev.visit(e);
+
+ if (arrays.size() == 0 || aev.isIncompatible()) {
+ // We do not optimize expressions other than those with concrete
+ // arrays with a symbolic index
+ // If we cannot optimize the expression, we return a failure only
+ // when we are not combining the optimizations
+ if (OptimizeArray == INDEX) {
+ cacheExprUnapplicable.insert(hash);
+ return;
+ }
+ } else {
+ mapIndexOptimizedExpr_ty idx_valIdx;
+
+ // Compute those indexes s.t. orig_expr =equisat= (k==i|k==j|..)
+ if (computeIndexes(arrays, e, idx_valIdx)) {
+ if (idx_valIdx.size() > 0) {
+ // Create new expression on indexes
+ result = ExprRewriter::createOptExpr(e, arrays, idx_valIdx);
+ } else {
+ klee_warning("OPT_I: infeasible branch!");
+ result = ConstantExpr::create(0, Expr::Bool);
+ }
+ // Add new expression to cache
+ if (result.get()) {
+ klee_warning("OPT_I: successful");
+ cacheExprOptimized[hash] = result;
+ } else {
+ klee_warning("OPT_I: unsuccessful");
+ }
+ } else {
+ klee_warning("OPT_I: unsuccessful");
+ cacheExprUnapplicable.insert(hash);
+ }
+ }
+ }
+}
+
+bool ExprOptimizer::computeIndexes(array2idx_ty &arrays, const ref<Expr> &e,
+ mapIndexOptimizedExpr_ty &idx_valIdx) const {
+ bool success = false;
+ // For each constant array found
+ for (auto &element : arrays) {
+ const Array *arr = element.first;
+
+ assert(arr->isConstantArray() && "Array is not concrete");
+ assert(element.second.size() == 1 && "Multiple indexes on the same array");
+
+ IndexTransformationExprVisitor idxt_v(arr);
+ idxt_v.visit(e);
+ assert((idxt_v.getWidth() % arr->range == 0) && "Read is not aligned");
+ Expr::Width width = idxt_v.getWidth() / arr->range;
+
+ if (idxt_v.getMul().get()) {
+ // If we have a MulExpr in the index, we can optimize our search by
+ // skipping all those indexes that are not multiple of such value.
+ // In fact, they will be rejected by the MulExpr interpreter since it
+ // will not find any integer solution
+ Expr &e = *idxt_v.getMul();
+ ConstantExpr &ce = static_cast<ConstantExpr &>(e);
+ uint64_t mulVal = (*ce.getAPValue().getRawData());
+ // So far we try to limit this optimization, but we may try some more
+ // aggressive conditions (i.e. mulVal > width)
+ if (width == 1 && mulVal > 1)
+ width = mulVal;
+ }
+
+ // For each concrete value 'i' stored in the array
+ for (size_t aIdx = 0; aIdx < arr->constantValues.size(); aIdx += width) {
+ Assignment *a = new Assignment();
+ v_arr_ty objects;
+ std::vector<std::vector<unsigned char> > values;
+
+ // For each symbolic index Expr(k) found
+ for (auto &index_it : element.second) {
+ ref<Expr> idx = index_it;
+ ref<Expr> val = ConstantExpr::alloc(aIdx, arr->getDomain());
+ // We create a partial assignment on 'k' s.t. Expr(k)==i
+ bool assignmentSuccess =
+ AssignmentGenerator::generatePartialAssignment(idx, val, a);
+ success |= assignmentSuccess;
+
+ // If the assignment satisfies both the expression 'e' and the PC
+ ref<Expr> evaluation = a->evaluate(e);
+ if (assignmentSuccess && evaluation->isTrue()) {
+ if (idx_valIdx.find(idx) == idx_valIdx.end()) {
+ idx_valIdx.insert(std::make_pair(idx, std::vector<ref<Expr> >()));
+ }
+ idx_valIdx[idx]
+ .push_back(ConstantExpr::alloc(aIdx, arr->getDomain()));
+ }
+ }
+ delete a;
+ }
+ }
+ return success;
+}
diff --git a/lib/Expr/ArrayExprRewriter.cpp b/lib/Expr/ArrayExprRewriter.cpp
new file mode 100644
index 00000000..d4dc9a21
--- /dev/null
+++ b/lib/Expr/ArrayExprRewriter.cpp
@@ -0,0 +1,141 @@
+#include "klee/ArrayExprRewriter.h"
+
+#include "klee/util/BitArray.h"
+
+#include "llvm/Support/CommandLine.h"
+
+using namespace klee;
+
+ref<Expr> ExprRewriter::createOptExpr(const ref<Expr> &e,
+ const array2idx_ty &arrays,
+ const mapIndexOptimizedExpr_ty &idx_valIdx) {
+ return rewrite(e, arrays, idx_valIdx);
+}
+
+ref<Expr>
+ExprRewriter::rewrite(const ref<Expr> &e,
+ const array2idx_ty &arrays,
+ const mapIndexOptimizedExpr_ty &idx_valIdx) {
+ ref<Expr> notFound;
+ std::vector<ref<Expr> > eqExprs;
+ bool invert = false;
+ for (auto &element : arrays) {
+ const Array* arr = element.first;
+ std::vector<ref<Expr> > indexes = element.second;
+
+ IndexTransformationExprVisitor idxt_v(arr);
+ idxt_v.visit(e);
+
+ assert((idxt_v.getWidth() % element.first->range == 0) && "Read is not aligned");
+
+ Expr::Width width = idxt_v.getWidth() / element.first->range;
+ if (idxt_v.getMul().get()) {
+ // If we have a MulExpr in the index, we can optimize our search by
+ // skipping all those indexes that are not multiple of such value.
+ // In fact, they will be rejected by the MulExpr interpreter since it
+ // will not find any integer solution
+ Expr &e = *idxt_v.getMul();
+ ConstantExpr &ce = static_cast<ConstantExpr &>(e);
+ llvm::APInt val = ce.getAPValue();
+ uint64_t mulVal = val.getZExtValue();
+ // So far we try to limit this optimization, but we may try some more
+ // aggressive conditions (i.e. mulVal > width)
+ if (width == 1 && mulVal > 1)
+ width = mulVal;
+ }
+
+ for (std::vector<ref<Expr> >::const_iterator index_it = indexes.begin();
+ index_it != indexes.end(); ++index_it) {
+ if (idx_valIdx.find((*index_it)) == idx_valIdx.end()) {
+ continue;
+ }
+ auto opt_indexes = idx_valIdx.at((*index_it));
+ if (opt_indexes.size() == 0) {
+ // We continue with other solutions
+ continue;
+ } else if (opt_indexes.size() == 1) {
+ // We treat this case as a special one, and we create an EqExpr (e.g. k==i)
+ eqExprs.push_back(createEqExpr((*index_it), opt_indexes[0]));
+ } else {
+ Expr::Width idxWidth = (*index_it).get()->getWidth();
+ unsigned set = 0;
+ BitArray ba(arr->size/width);
+ for (auto &vals : opt_indexes) {
+ ConstantExpr &ce = static_cast<ConstantExpr &>(*vals);
+ llvm::APInt v = ce.getAPValue();
+ ba.set(v.getZExtValue() / width);
+ set++;
+ }
+ if (set > 0 && set < arr->size/width)
+ invert = ((float)set / (float)(arr->size/width)) > 0.5 ? true : false;
+ int start = -1;
+ for (unsigned i = 0; i < arr->size/width; ++i) {
+ if ((!invert && ba.get(i)) || (invert && !ba.get(i))) {
+ if (start < 0)
+ start = i;
+ } else {
+ if (start >= 0) {
+ if (i - start == 1) {
+ eqExprs.push_back(createEqExpr((*index_it), ConstantExpr::create(start*width, idxWidth)));
+ } else {
+ // create range expr
+ ref<Expr> s = ConstantExpr::create(start*width, idxWidth);
+ ref<Expr> e = ConstantExpr::create((i-1)*width, idxWidth);
+ eqExprs.push_back(createRangeExpr((*index_it), s, e));
+ }
+ start = -1;
+ }
+ }
+ }
+ if (start >= 0) {
+ if ((arr->size/width) - start == 1) {
+ eqExprs.push_back(createEqExpr((*index_it), ConstantExpr::create(start*width, idxWidth)));
+ } else {
+ // create range expr
+ ref<Expr> s = ConstantExpr::create(start*width, idxWidth);
+ ref<Expr> e = ConstantExpr::create(((arr->size/width) - 1)*width, idxWidth);
+ eqExprs.push_back(createRangeExpr((*index_it), s, e));
+ }
+ }
+ }
+ }
+ }
+ if (eqExprs.size() == 0) {
+ return notFound;
+ } else if (eqExprs.size() == 1) {
+ if (isa<AndExpr>(eqExprs[0])) {
+ return EqExpr::alloc(ConstantExpr::alloc(invert ? 0 : 1, (eqExprs[0])->getWidth()),
+ eqExprs[0]);
+ }
+ return invert ? NotExpr::alloc(eqExprs[0]) : eqExprs[0];
+ } else {
+ // We have found at least 2 indexes, we combine them using an OrExpr (e.g. k==i|k==j)
+ ref<Expr> orExpr = concatenateOrExpr(eqExprs.begin(), eqExprs.end());
+ // Create Eq expression for true branch
+ return EqExpr::alloc(ConstantExpr::alloc(invert ? 0 : 1, (orExpr)->getWidth()), orExpr);
+ }
+}
+
+ref<Expr> ExprRewriter::concatenateOrExpr(
+ const std::vector<ref<Expr> >::const_iterator begin,
+ const std::vector<ref<Expr> >::const_iterator end) {
+ if (begin + 2 == end) {
+ return OrExpr::alloc(ZExtExpr::alloc((*begin), Expr::Int32),
+ ZExtExpr::alloc((*(begin + 1)), Expr::Int32));
+ } else {
+ return OrExpr::alloc(ZExtExpr::alloc((*begin), Expr::Int32),
+ concatenateOrExpr(begin + 1, end));
+ }
+}
+
+ref<Expr> ExprRewriter::createEqExpr(const ref<Expr> &index,
+ const ref<Expr> &valIndex) {
+ return EqExpr::alloc(valIndex, index);
+}
+
+ref<Expr> ExprRewriter::createRangeExpr(const ref<Expr> &index,
+ const ref<Expr> &valStart,
+ const ref<Expr> &valEnd) {
+ return AndExpr::alloc(UleExpr::alloc(valStart, index),
+ UleExpr::alloc(index, valEnd));
+}
diff --git a/lib/Expr/ArrayExprVisitor.cpp b/lib/Expr/ArrayExprVisitor.cpp
new file mode 100644
index 00000000..26ddbb95
--- /dev/null
+++ b/lib/Expr/ArrayExprVisitor.cpp
@@ -0,0 +1,171 @@
+#include "klee/util/ArrayExprVisitor.h"
+
+#include "klee/Internal/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+using namespace klee;
+
+//------------------------------ HELPER FUNCTIONS ---------------------------//
+bool ArrayExprHelper::isReadExprAtOffset(ref<Expr> e, const ReadExpr *base,
+ ref<Expr> offset) {
+ const ReadExpr *re = dyn_cast<ReadExpr>(e.get());
+ if (!re || (re->getWidth() != Expr::Int8))
+ return false;
+ return SubExpr::create(re->index, base->index) == offset;
+}
+
+ReadExpr *ArrayExprHelper::hasOrderedReads(const ConcatExpr &ce) {
+ const ReadExpr *base = dyn_cast<ReadExpr>(ce.getKid(0));
+
+ // right now, all Reads are byte reads but some
+ // transformations might change this
+ if (!base || base->getWidth() != Expr::Int8)
+ return NULL;
+
+ // Get stride expr in proper index width.
+ Expr::Width idxWidth = base->index->getWidth();
+ ref<Expr> strideExpr = ConstantExpr::alloc(-1, idxWidth);
+ ref<Expr> offset = ConstantExpr::create(0, idxWidth);
+
+ ref<Expr> e = ce.getKid(1);
+
+ // concat chains are unbalanced to the right
+ while (e->getKind() == Expr::Concat) {
+ offset = AddExpr::create(offset, strideExpr);
+ if (!isReadExprAtOffset(e->getKid(0), base, offset))
+ return NULL;
+ e = e->getKid(1);
+ }
+
+ offset = AddExpr::create(offset, strideExpr);
+ if (!isReadExprAtOffset(e, base, offset))
+ return NULL;
+
+ return cast<ReadExpr>(e.get());
+}
+
+//--------------------------- INDEX-BASED OPTIMIZATION-----------------------//
+ExprVisitor::Action
+ConstantArrayExprVisitor::visitConcat(const ConcatExpr &ce) {
+ ReadExpr *base = ArrayExprHelper::hasOrderedReads(ce);
+ if (base) {
+ // It is an interesting ReadExpr if it contains a concrete array
+ // that is read at a symbolic index
+ if (base->updates.root->isConstantArray() &&
+ !isa<ConstantExpr>(base->index)) {
+ for (const UpdateNode *un = base->updates.head; un; un = un->next) {
+ if (!isa<ConstantExpr>(un->index) || !isa<ConstantExpr>(un->value)) {
+ incompatible = true;
+ return Action::skipChildren();
+ }
+ }
+ IndexCompatibilityExprVisitor compatible;
+ compatible.visit(base->index);
+ if (compatible.isCompatible() &&
+ addedIndexes.find(base->index.get()->hash()) == addedIndexes.end()) {
+ if (arrays.find(base->updates.root) == arrays.end()) {
+ arrays.insert(
+ std::make_pair(base->updates.root, std::vector<ref<Expr> >()));
+ } else {
+ // Another possible index to resolve, currently unsupported
+ incompatible = true;
+ return Action::skipChildren();
+ }
+ arrays.find(base->updates.root)->second.push_back(base->index);
+ addedIndexes.insert(base->index.get()->hash());
+ } else if (compatible.hasInnerReads()) {
+ // This Read has an inner Read, we want to optimize the inner one
+ // to create a cascading effect during assignment evaluation
+ return Action::doChildren();
+ }
+ return Action::skipChildren();
+ }
+ }
+ return Action::doChildren();
+}
+ExprVisitor::Action ConstantArrayExprVisitor::visitRead(const ReadExpr &re) {
+ // It is an interesting ReadExpr if it contains a concrete array
+ // that is read at a symbolic index
+ if (re.updates.root->isConstantArray() && !isa<ConstantExpr>(re.index)) {
+ for (const UpdateNode *un = re.updates.head; un; un = un->next) {
+ if (!isa<ConstantExpr>(un->index) || !isa<ConstantExpr>(un->value)) {
+ incompatible = true;
+ return Action::skipChildren();
+ }
+ }
+ IndexCompatibilityExprVisitor compatible;
+ compatible.visit(re.index);
+ if (compatible.isCompatible() &&
+ addedIndexes.find(re.index.get()->hash()) == addedIndexes.end()) {
+ if (arrays.find(re.updates.root) == arrays.end()) {
+ arrays.insert(
+ std::make_pair(re.updates.root, std::vector<ref<Expr> >()));
+ } else {
+ // Another possible index to resolve, currently unsupported
+ incompatible = true;
+ return Action::skipChildren();
+ }
+ arrays.find(re.updates.root)->second.push_back(re.index);
+ addedIndexes.insert(re.index.get()->hash());
+ } else if (compatible.hasInnerReads()) {
+ // This Read has an inner Read, we want to optimize the inner one
+ // to create a cascading effect during assignment evaluation
+ return Action::doChildren();
+ }
+ return Action::skipChildren();
+ } else if (re.updates.root->isSymbolicArray()) {
+ incompatible = true;
+ }
+
+ return Action::doChildren();
+}
+
+ExprVisitor::Action
+IndexCompatibilityExprVisitor::visitRead(const ReadExpr &re) {
+ if (re.updates.head) {
+ compatible = false;
+ return Action::skipChildren();
+ } else if (re.updates.root->isConstantArray() &&
+ !isa<ConstantExpr>(re.index)) {
+ compatible = false;
+ inner = true;
+ return Action::skipChildren();
+ }
+ return Action::doChildren();
+}
+ExprVisitor::Action IndexCompatibilityExprVisitor::visitURem(const URemExpr &) {
+ compatible = false;
+ return Action::skipChildren();
+}
+ExprVisitor::Action IndexCompatibilityExprVisitor::visitSRem(const SRemExpr &) {
+ compatible = false;
+ return Action::skipChildren();
+}
+ExprVisitor::Action IndexCompatibilityExprVisitor::visitOr(const OrExpr &) {
+ compatible = false;
+ return Action::skipChildren();
+}
+
+ExprVisitor::Action
+IndexTransformationExprVisitor::visitConcat(const ConcatExpr &ce) {
+ if (ReadExpr *re = dyn_cast<ReadExpr>(ce.getKid(0))) {
+ if (re->updates.root->hash() == array->hash() && width < ce.getWidth()) {
+ if (width == Expr::InvalidWidth)
+ width = ce.getWidth();
+ }
+ } else if (ReadExpr *re = dyn_cast<ReadExpr>(ce.getKid(1))) {
+ if (re->updates.root->hash() == array->hash() && width < ce.getWidth()) {
+ if (width == Expr::InvalidWidth)
+ width = ce.getWidth();
+ }
+ }
+ return Action::doChildren();
+}
+ExprVisitor::Action IndexTransformationExprVisitor::visitMul(const MulExpr &e) {
+ if (isa<ConstantExpr>(e.getKid(0)))
+ mul = e.getKid(0);
+ else if (isa<ConstantExpr>(e.getKid(0)))
+ mul = e.getKid(1);
+ return Action::doChildren();
+}
diff --git a/lib/Expr/AssignmentGenerator.cpp b/lib/Expr/AssignmentGenerator.cpp
new file mode 100644
index 00000000..755263f0
--- /dev/null
+++ b/lib/Expr/AssignmentGenerator.cpp
@@ -0,0 +1,327 @@
+#include "klee/AssignmentGenerator.h"
+
+#include "llvm/Support/raw_ostream.h"
+
+using namespace klee;
+
+bool AssignmentGenerator::generatePartialAssignment(const ref<Expr> &e,
+ ref<Expr> &val,
+ Assignment *&a) {
+ return helperGenerateAssignment(e, val, a, e->getWidth(), false);
+}
+
+bool AssignmentGenerator::helperGenerateAssignment(const ref<Expr> &e,
+ ref<Expr> &val,
+ Assignment *&a,
+ Expr::Width width,
+ bool sign) {
+ Expr &ep = *e.get();
+ switch (ep.getKind()) {
+
+ // ARITHMETIC
+ case Expr::Add: {
+ // val = val - kid
+ ref<Expr> kid_val;
+ ref<Expr> kid_expr;
+ if (isa<ConstantExpr>(ep.getKid(0))) {
+ kid_val = ep.getKid(0);
+ kid_expr = ep.getKid(1);
+ } else if (isa<ConstantExpr>(ep.getKid(1))) {
+ kid_val = ep.getKid(1);
+ kid_expr = ep.getKid(0);
+ } else {
+ return false;
+ }
+ if (!ExtractExpr::create(kid_val, kid_val.get()->getWidth() - 1, 1).get()->isZero()) {
+ // FIXME: really bad hack to support those cases in which KLEE creates
+ // Add expressions with negative values
+ val = createAddExpr(kid_val, val);
+ } else {
+ val = createSubExpr(kid_val, val);
+ }
+ return helperGenerateAssignment(kid_expr, val, a, width, sign);
+ }
+ case Expr::Sub: {
+ // val = val + kid
+ ref<Expr> kid_val;
+ ref<Expr> kid_expr;
+ if (isa<ConstantExpr>(ep.getKid(0))) {
+ kid_val = ep.getKid(0);
+ kid_expr = ep.getKid(1);
+ } else if (isa<ConstantExpr>(ep.getKid(1))) {
+ kid_val = ep.getKid(1);
+ kid_expr = ep.getKid(0);
+ } else {
+ return false;
+ }
+ val = createAddExpr(kid_val, val);
+ return helperGenerateAssignment(kid_expr, val, a, width, sign);
+ }
+ case Expr::Mul: {
+ // val = val / kid (check for sign)
+ ref<Expr> kid_val;
+ ref<Expr> kid_expr;
+ if (isa<ConstantExpr>(ep.getKid(0))) {
+ kid_val = ep.getKid(0);
+ kid_expr = ep.getKid(1);
+ } else if (isa<ConstantExpr>(ep.getKid(1))) {
+ kid_val = ep.getKid(1);
+ kid_expr = ep.getKid(0);
+ } else {
+ return false;
+ }
+
+ if (kid_val.get()->isZero()) {
+ return false;
+ } else if (!createDivRem(kid_val, val, sign)->isZero()) {
+ return false;
+ }
+ val = createDivExpr(kid_val, val, sign);
+ return helperGenerateAssignment(kid_expr, val, a, width, sign);
+ }
+ case Expr::UDiv:
+ // val = val * kid
+ // no break, falling down to case SDiv
+ case Expr::SDiv: {
+ // val = val * kid
+ ref<Expr> kid_val;
+ ref<Expr> kid_expr;
+ if (isa<ConstantExpr>(ep.getKid(0))) {
+ kid_val = ep.getKid(0);
+ kid_expr = ep.getKid(1);
+ } else if (isa<ConstantExpr>(ep.getKid(1))) {
+ kid_val = ep.getKid(1);
+ kid_expr = ep.getKid(0);
+ } else {
+ return false;
+ }
+ val = createMulExpr(kid_val, val);
+ return helperGenerateAssignment(kid_expr, val, a, width, sign);
+ }
+
+ // LOGICAL
+ case Expr::LShr: {
+ if (!isa<ConstantExpr>(ep.getKid(1))) {
+ return false;
+ }
+ ref<Expr> kid_val = ep.getKid(1);
+ val = createShlExpr(val, kid_val);
+ return helperGenerateAssignment(ep.getKid(0), val, a, width, sign);
+ }
+ case Expr::Shl: {
+ if (!isa<ConstantExpr>(ep.getKid(1))) {
+ return false;
+ }
+ ref<Expr> kid_val = ep.getKid(1);
+ val = createLShrExpr(val, kid_val);
+ return helperGenerateAssignment(ep.getKid(0), val, a, width, sign);
+ }
+ case Expr::Not: {
+ return helperGenerateAssignment(ep.getKid(0), val, a, width, sign);
+ }
+ case Expr::And: {
+ // val = val & kid
+ ref<Expr> kid_val;
+ ref<Expr> kid_expr;
+ if (isa<ConstantExpr>(ep.getKid(0))) {
+ kid_val = ep.getKid(0);
+ kid_expr = ep.getKid(1);
+ } else if (isa<ConstantExpr>(ep.getKid(1))) {
+ kid_val = ep.getKid(1);
+ kid_expr = ep.getKid(0);
+ } else {
+ return false;
+ }
+ val = createAndExpr(kid_val, val);
+ return helperGenerateAssignment(kid_expr, val, a, width, sign);
+ }
+
+ // CASTING
+ case Expr::ZExt: {
+ val = createExtractExpr(ep.getKid(0), val);
+ return helperGenerateAssignment(ep.getKid(0), val, a, width, false);
+ }
+ case Expr::SExt: {
+ val = createExtractExpr(ep.getKid(0), val);
+ return helperGenerateAssignment(ep.getKid(0), val, a, width, true);
+ }
+
+ // SPECIAL
+ case Expr::Concat: {
+ ReadExpr *base = hasOrderedReads(&ep);
+ if (base) {
+ return helperGenerateAssignment(ref<Expr>(base), val, a, ep.getWidth(),
+ sign);
+ } else {
+ klee_warning("Not supported");
+ ep.printKind(llvm::errs(), ep.getKind());
+ return false;
+ }
+ }
+ case Expr::Extract: {
+ val = createExtendExpr(ep.getKid(0), val);
+ return helperGenerateAssignment(ep.getKid(0), val, a, width, sign);
+ }
+
+ // READ
+ case Expr::Read: {
+ ReadExpr &re = static_cast<ReadExpr &>(ep);
+ if (isa<ConstantExpr>(re.index)) {
+ if (re.updates.root->isSymbolicArray()) {
+ ConstantExpr &index = static_cast<ConstantExpr &>(*re.index.get());
+ std::vector<unsigned char> c_value =
+ getIndexedValue(getByteValue(val), index, re.updates.root->size);
+ if (c_value.size() == 0) {
+ return false;
+ }
+ if (a->bindings.find(re.updates.root) == a->bindings.end()) {
+ a->bindings.insert(std::make_pair(re.updates.root, c_value));
+ } else {
+ return false;
+ }
+ }
+ } else {
+ return helperGenerateAssignment(re.index, val, a, width, sign);
+ }
+ return true;
+ }
+ default:
+ std::string type_str;
+ llvm::raw_string_ostream rso(type_str);
+ ep.printKind(rso, ep.getKind());
+ klee_warning("%s is not supported", rso.str().c_str());
+ return false;
+ }
+}
+
+bool AssignmentGenerator::isReadExprAtOffset(ref<Expr> e, const ReadExpr *base,
+ ref<Expr> offset) {
+ const ReadExpr *re = dyn_cast<ReadExpr>(e.get());
+ if (!re || (re->getWidth() != Expr::Int8))
+ return false;
+ return SubExpr::create(re->index, base->index) == offset;
+}
+
+ReadExpr *AssignmentGenerator::hasOrderedReads(ref<Expr> e) {
+ assert(e->getKind() == Expr::Concat);
+
+ const ReadExpr *base = dyn_cast<ReadExpr>(e->getKid(0));
+
+ // right now, all Reads are byte reads but some
+ // transformations might change this
+ if (!base || base->getWidth() != Expr::Int8)
+ return NULL;
+
+ // Get stride expr in proper index width.
+ Expr::Width idxWidth = base->index->getWidth();
+ ref<Expr> strideExpr = ConstantExpr::alloc(-1, idxWidth);
+ ref<Expr> offset = ConstantExpr::create(0, idxWidth);
+
+ e = e->getKid(1);
+
+ // concat chains are unbalanced to the right
+ while (e->getKind() == Expr::Concat) {
+ offset = AddExpr::create(offset, strideExpr);
+ if (!isReadExprAtOffset(e->getKid(0), base, offset))
+ return NULL;
+ e = e->getKid(1);
+ }
+
+ offset = AddExpr::create(offset, strideExpr);
+ if (!isReadExprAtOffset(e, base, offset))
+ return NULL;
+
+ return cast<ReadExpr>(e.get());
+}
+
+ref<Expr> AssignmentGenerator::createSubExpr(const ref<Expr> &l, ref<Expr> &r) {
+ return SubExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createAddExpr(const ref<Expr> &l, ref<Expr> &r) {
+ return AddExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createMulExpr(const ref<Expr> &l, ref<Expr> &r) {
+ return MulExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createDivRem(const ref<Expr> &l, ref<Expr> &r,
+ bool sign) {
+ if (sign)
+ return SRemExpr::create(r, l);
+ else
+ return URemExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createDivExpr(const ref<Expr> &l, ref<Expr> &r,
+ bool sign) {
+ if (sign)
+ return SDivExpr::create(r, l);
+ else
+ return UDivExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createShlExpr(const ref<Expr> &l, ref<Expr> &r) {
+ return ShlExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createLShrExpr(const ref<Expr> &l,
+ ref<Expr> &r) {
+ return LShrExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createAndExpr(const ref<Expr> &l, ref<Expr> &r) {
+ return AndExpr::create(r, l);
+}
+
+ref<Expr> AssignmentGenerator::createExtractExpr(const ref<Expr> &l,
+ ref<Expr> &r) {
+ return ExtractExpr::create(r, 0, l.get()->getWidth());
+}
+
+ref<Expr> AssignmentGenerator::createExtendExpr(const ref<Expr> &l,
+ ref<Expr> &r) {
+ if (l.get()->getKind() == Expr::ZExt) {
+ return ZExtExpr::create(r, l.get()->getWidth());
+ } else {
+ return SExtExpr::create(r, l.get()->getWidth());
+ }
+}
+
+std::vector<unsigned char> AssignmentGenerator::getByteValue(ref<Expr> &val) {
+ std::vector<unsigned char> toReturn;
+ if (ConstantExpr *value = dyn_cast<ConstantExpr>(val)) {
+ for (unsigned w = 0; w < val.get()->getWidth() / 8; ++w) {
+ ref<ConstantExpr> byte = value->Extract(w * 8, Expr::Int8);
+ unsigned char mem_val;
+ byte->toMemory(&mem_val);
+ toReturn.push_back(mem_val);
+ }
+ }
+ return toReturn;
+}
+
+std::vector<unsigned char>
+AssignmentGenerator::getIndexedValue(const std::vector<unsigned char> &c_val,
+ ConstantExpr &index,
+ const unsigned int size) {
+ std::vector<unsigned char> toReturn;
+ const llvm::APInt index_val = index.getAPValue();
+ assert(!index_val.isSignBit() && "Negative index");
+ const uint64_t id = index_val.getZExtValue() / c_val.size();
+ uint64_t arraySize = size / c_val.size();
+ for (uint64_t i = 0; i < arraySize; ++i) {
+ if (i == id) {
+ for (unsigned arr_i = 0; arr_i < c_val.size(); ++arr_i) {
+ toReturn.push_back(c_val[arr_i]);
+ }
+ } else {
+ for (unsigned arr_i = 0; arr_i < c_val.size(); ++arr_i) {
+ toReturn.push_back(0);
+ }
+ }
+ }
+
+ return toReturn;
+}
diff --git a/lib/Expr/CMakeLists.txt b/lib/Expr/CMakeLists.txt
index 4c63fe5b..74407295 100644
--- a/lib/Expr/CMakeLists.txt
+++ b/lib/Expr/CMakeLists.txt
@@ -8,7 +8,11 @@
#===------------------------------------------------------------------------===#
klee_add_component(kleaverExpr
ArrayCache.cpp
+ ArrayExprOptimizer.cpp
+ ArrayExprRewriter.cpp
+ ArrayExprVisitor.cpp
Assigment.cpp
+ AssignmentGenerator.cpp
Constraints.cpp
ExprBuilder.cpp
Expr.cpp