//===-- Memory.cpp --------------------------------------------------------===// // // The KLEE Symbolic Virtual Machine // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Common.h" #include "Memory.h" #include "Context.h" #include "klee/Expr.h" #include "klee/Solver.h" #include "klee/util/BitArray.h" #include "ObjectHolder.h" #include "MemoryManager.h" #if LLVM_VERSION_CODE >= LLVM_VERSION(3, 3) #include #include #include #else #include #include #include #endif #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" #include #include #include using namespace llvm; using namespace klee; namespace { cl::opt UseConstantArrays("use-constant-arrays", cl::init(true)); } /***/ ObjectHolder::ObjectHolder(const ObjectHolder &b) : os(b.os) { if (os) ++os->refCount; } ObjectHolder::ObjectHolder(ObjectState *_os) : os(_os) { if (os) ++os->refCount; } ObjectHolder::~ObjectHolder() { if (os && --os->refCount==0) delete os; } ObjectHolder &ObjectHolder::operator=(const ObjectHolder &b) { if (b.os) ++b.os->refCount; if (os && --os->refCount==0) delete os; os = b.os; return *this; } /***/ int MemoryObject::counter = 0; MemoryObject::~MemoryObject() { if (parent) parent->markFreed(this); } void MemoryObject::getAllocInfo(std::string &result) const { llvm::raw_string_ostream info(result); info << "MO" << id << "[" << size << "]"; if (allocSite) { info << " allocated at "; if (const Instruction *i = dyn_cast(allocSite)) { info << i->getParent()->getParent()->getName() << "():"; info << *i; } else if (const GlobalValue *gv = dyn_cast(allocSite)) { info << "global:" << gv->getName(); } else { info << "value:" << *allocSite; } } else { info << " (no allocation info)"; } info.flush(); } /***/ ObjectState::ObjectState(const MemoryObject *mo) : copyOnWriteOwner(0), refCount(0), object(mo), concreteStore(new uint8_t[mo->size]), concreteMask(0), flushMask(0), knownSymbolics(0), updates(0, 0), size(mo->size), readOnly(false) { mo->refCount++; if (!UseConstantArrays) { // FIXME: Leaked. static unsigned id = 0; const Array *array = new Array("tmp_arr" + llvm::utostr(++id), size); updates = UpdateList(array, 0); } memset(concreteStore, 0, size); } ObjectState::ObjectState(const MemoryObject *mo, const Array *array) : copyOnWriteOwner(0), refCount(0), object(mo), concreteStore(new uint8_t[mo->size]), concreteMask(0), flushMask(0), knownSymbolics(0), updates(array, 0), size(mo->size), readOnly(false) { mo->refCount++; makeSymbolic(); memset(concreteStore, 0, size); } ObjectState::ObjectState(const ObjectState &os) : copyOnWriteOwner(0), refCount(0), object(os.object), concreteStore(new uint8_t[os.size]), concreteMask(os.concreteMask ? new BitArray(*os.concreteMask, os.size) : 0), flushMask(os.flushMask ? new BitArray(*os.flushMask, os.size) : 0), knownSymbolics(0), updates(os.updates), size(os.size), readOnly(false) { assert(!os.readOnly && "no need to copy read only object?"); if (object) object->refCount++; if (os.knownSymbolics) { knownSymbolics = new ref[size]; for (unsigned i=0; irefCount > 0); object->refCount--; if (object->refCount == 0) { delete object; } } } /***/ const UpdateList &ObjectState::getUpdates() const { // Constant arrays are created lazily. if (!updates.root) { // Collect the list of writes, with the oldest writes first. // FIXME: We should be able to do this more efficiently, we just need to be // careful to get the interaction with the cache right. In particular we // should avoid creating UpdateNode instances we never use. unsigned NumWrites = updates.head ? updates.head->getSize() : 0; std::vector< std::pair< ref, ref > > Writes(NumWrites); const UpdateNode *un = updates.head; for (unsigned i = NumWrites; i != 0; un = un->next) { --i; Writes[i] = std::make_pair(un->index, un->value); } std::vector< ref > Contents(size); // Initialize to zeros. for (unsigned i = 0, e = size; i != e; ++i) Contents[i] = ConstantExpr::create(0, Expr::Int8); // Pull off as many concrete writes as we can. unsigned Begin = 0, End = Writes.size(); for (; Begin != End; ++Begin) { // Push concrete writes into the constant array. ConstantExpr *Index = dyn_cast(Writes[Begin].first); if (!Index) break; ConstantExpr *Value = dyn_cast(Writes[Begin].second); if (!Value) break; Contents[Index->getZExtValue()] = Value; } // FIXME: We should unique these, there is no good reason to create multiple // ones. // Start a new update list. // FIXME: Leaked. static unsigned id = 0; const Array *array = new Array("const_arr" + llvm::utostr(++id), size, &Contents[0], &Contents[0] + Contents.size()); updates = UpdateList(array, 0); // Apply the remaining (non-constant) writes. for (; Begin != End; ++Begin) updates.extend(Writes[Begin].first, Writes[Begin].second); } return updates; } void ObjectState::makeConcrete() { if (concreteMask) delete concreteMask; if (flushMask) delete flushMask; if (knownSymbolics) delete[] knownSymbolics; concreteMask = 0; flushMask = 0; knownSymbolics = 0; } void ObjectState::makeSymbolic() { assert(!updates.head && "XXX makeSymbolic of objects with symbolic values is unsupported"); // XXX simplify this, can just delete various arrays I guess for (unsigned i=0; i !isByteConcrete(i) isByteConcrete(i) => !isByteKnownSymbolic(i) !isByteFlushed(i) => (isByteConcrete(i) || isByteKnownSymbolic(i)) */ void ObjectState::fastRangeCheckOffset(ref offset, unsigned *base_r, unsigned *size_r) const { *base_r = 0; *size_r = size; } void ObjectState::flushRangeForRead(unsigned rangeBase, unsigned rangeSize) const { if (!flushMask) flushMask = new BitArray(size, true); for (unsigned offset=rangeBase; offsetunset(offset); } } } void ObjectState::flushRangeForWrite(unsigned rangeBase, unsigned rangeSize) { if (!flushMask) flushMask = new BitArray(size, true); for (unsigned offset=rangeBase; offsetunset(offset); } else { // flushed bytes that are written over still need // to be marked out if (isByteConcrete(offset)) { markByteSymbolic(offset); } else if (isByteKnownSymbolic(offset)) { setKnownSymbolic(offset, 0); } } } } bool ObjectState::isByteConcrete(unsigned offset) const { return !concreteMask || concreteMask->get(offset); } bool ObjectState::isByteFlushed(unsigned offset) const { return flushMask && !flushMask->get(offset); } bool ObjectState::isByteKnownSymbolic(unsigned offset) const { return knownSymbolics && knownSymbolics[offset].get(); } void ObjectState::markByteConcrete(unsigned offset) { if (concreteMask) concreteMask->set(offset); } void ObjectState::markByteSymbolic(unsigned offset) { if (!concreteMask) concreteMask = new BitArray(size, true); concreteMask->unset(offset); } void ObjectState::markByteUnflushed(unsigned offset) { if (flushMask) flushMask->set(offset); } void ObjectState::markByteFlushed(unsigned offset) { if (!flushMask) { flushMask = new BitArray(size, false); } else { flushMask->unset(offset); } } void ObjectState::setKnownSymbolic(unsigned offset, Expr *value /* can be null */) { if (knownSymbolics) { knownSymbolics[offset] = value; } else { if (value) { knownSymbolics = new ref[size]; knownSymbolics[offset] = value; } } } /***/ ref ObjectState::read8(unsigned offset) const { if (isByteConcrete(offset)) { return ConstantExpr::create(concreteStore[offset], Expr::Int8); } else if (isByteKnownSymbolic(offset)) { return knownSymbolics[offset]; } else { assert(isByteFlushed(offset) && "unflushed byte without cache value"); return ReadExpr::create(getUpdates(), ConstantExpr::create(offset, Expr::Int32)); } } ref ObjectState::read8(ref offset) const { assert(!isa(offset) && "constant offset passed to symbolic read8"); unsigned base, size; fastRangeCheckOffset(offset, &base, &size); flushRangeForRead(base, size); if (size>4096) { std::string allocInfo; object->getAllocInfo(allocInfo); klee_warning_once(0, "flushing %d bytes on read, may be slow and/or crash: %s", size, allocInfo.c_str()); } return ReadExpr::create(getUpdates(), ZExtExpr::create(offset, Expr::Int32)); } void ObjectState::write8(unsigned offset, uint8_t value) { //assert(read_only == false && "writing to read-only object!"); concreteStore[offset] = value; setKnownSymbolic(offset, 0); markByteConcrete(offset); markByteUnflushed(offset); } void ObjectState::write8(unsigned offset, ref value) { // can happen when ExtractExpr special cases if (ConstantExpr *CE = dyn_cast(value)) { write8(offset, (uint8_t) CE->getZExtValue(8)); } else { setKnownSymbolic(offset, value.get()); markByteSymbolic(offset); markByteUnflushed(offset); } } void ObjectState::write8(ref offset, ref value) { assert(!isa(offset) && "constant offset passed to symbolic write8"); unsigned base, size; fastRangeCheckOffset(offset, &base, &size); flushRangeForWrite(base, size); if (size>4096) { std::string allocInfo; object->getAllocInfo(allocInfo); klee_warning_once(0, "flushing %d bytes on read, may be slow and/or crash: %s", size, allocInfo.c_str()); } updates.extend(ZExtExpr::create(offset, Expr::Int32), value); } /***/ ref ObjectState::read(ref offset, Expr::Width width) const { // Truncate offset to 32-bits. offset = ZExtExpr::create(offset, Expr::Int32); // Check for reads at constant offsets. if (ConstantExpr *CE = dyn_cast(offset)) return read(CE->getZExtValue(32), width); // Treat bool specially, it is the only non-byte sized write we allow. if (width == Expr::Bool) return ExtractExpr::create(read8(offset), 0, Expr::Bool); // Otherwise, follow the slow general case. unsigned NumBytes = width / 8; assert(width == NumBytes * 8 && "Invalid write size!"); ref Res(0); for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); ref Byte = read8(AddExpr::create(offset, ConstantExpr::create(idx, Expr::Int32))); Res = i ? ConcatExpr::create(Byte, Res) : Byte; } return Res; } ref ObjectState::read(unsigned offset, Expr::Width width) const { // Treat bool specially, it is the only non-byte sized write we allow. if (width == Expr::Bool) return ExtractExpr::create(read8(offset), 0, Expr::Bool); // Otherwise, follow the slow general case. unsigned NumBytes = width / 8; assert(width == NumBytes * 8 && "Invalid write size!"); ref Res(0); for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); ref Byte = read8(offset + idx); Res = i ? ConcatExpr::create(Byte, Res) : Byte; } return Res; } void ObjectState::write(ref offset, ref value) { // Truncate offset to 32-bits. offset = ZExtExpr::create(offset, Expr::Int32); // Check for writes at constant offsets. if (ConstantExpr *CE = dyn_cast(offset)) { write(CE->getZExtValue(32), value); return; } // Treat bool specially, it is the only non-byte sized write we allow. Expr::Width w = value->getWidth(); if (w == Expr::Bool) { write8(offset, ZExtExpr::create(value, Expr::Int8)); return; } // Otherwise, follow the slow general case. unsigned NumBytes = w / 8; assert(w == NumBytes * 8 && "Invalid write size!"); for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); write8(AddExpr::create(offset, ConstantExpr::create(idx, Expr::Int32)), ExtractExpr::create(value, 8 * i, Expr::Int8)); } } void ObjectState::write(unsigned offset, ref value) { // Check for writes of constant values. if (ConstantExpr *CE = dyn_cast(value)) { Expr::Width w = CE->getWidth(); if (w <= 64) { uint64_t val = CE->getZExtValue(); switch (w) { default: assert(0 && "Invalid write size!"); case Expr::Bool: case Expr::Int8: write8(offset, val); return; case Expr::Int16: write16(offset, val); return; case Expr::Int32: write32(offset, val); return; case Expr::Int64: write64(offset, val); return; } } } // Treat bool specially, it is the only non-byte sized write we allow. Expr::Width w = value->getWidth(); if (w == Expr::Bool) { write8(offset, ZExtExpr::create(value, Expr::Int8)); return; } // Otherwise, follow the slow general case. unsigned NumBytes = w / 8; assert(w == NumBytes * 8 && "Invalid write size!"); for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); write8(offset + idx, ExtractExpr::create(value, 8 * i, Expr::Int8)); } } void ObjectState::write16(unsigned offset, uint16_t value) { unsigned NumBytes = 2; for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); write8(offset + idx, (uint8_t) (value >> (8 * i))); } } void ObjectState::write32(unsigned offset, uint32_t value) { unsigned NumBytes = 4; for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); write8(offset + idx, (uint8_t) (value >> (8 * i))); } } void ObjectState::write64(unsigned offset, uint64_t value) { unsigned NumBytes = 8; for (unsigned i = 0; i != NumBytes; ++i) { unsigned idx = Context::get().isLittleEndian() ? i : (NumBytes - i - 1); write8(offset + idx, (uint8_t) (value >> (8 * i))); } } void ObjectState::print() { std::cerr << "-- ObjectState --\n"; std::cerr << "\tMemoryObject ID: " << object->id << "\n"; std::cerr << "\tRoot Object: " << updates.root << "\n"; std::cerr << "\tSize: " << size << "\n"; std::cerr << "\tBytes:\n"; for (unsigned i=0; inext) { std::cerr << "\t\t[" << un->index << "] = " << un->value << "\n"; } }