//===-- Executor.cpp ------------------------------------------------------===// // // The KLEE Symbolic Virtual Machine // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Executor.h" #include "AddressSpace.h" #include "Context.h" #include "CoreStats.h" #include "ExecutionState.h" #include "ExecutionTree.h" #include "ExternalDispatcher.h" #if LLVM_VERSION_CODE <= LLVM_VERSION(14, 0) #include "GetElementPtrTypeIterator.h" #endif #include "ImpliedValue.h" #include "Memory.h" #include "MemoryManager.h" #include "Searcher.h" #include "SeedInfo.h" #include "SpecialFunctionHandler.h" #include "StatsTracker.h" #include "TimingSolver.h" #include "UserSearcher.h" #include "klee/ADT/KTest.h" #include "klee/ADT/RNG.h" #include "klee/Config/Version.h" #include "klee/Core/Interpreter.h" #include "klee/Expr/ArrayExprOptimizer.h" #include "klee/Expr/Assignment.h" #include "klee/Expr/Expr.h" #include "klee/Expr/ExprPPrinter.h" #include "klee/Expr/ExprSMTLIBPrinter.h" #include "klee/Expr/ExprUtil.h" #include "klee/KDAlloc/kdalloc.h" #include "klee/Module/Cell.h" #include "klee/Module/InstructionInfoTable.h" #include "klee/Module/KCallable.h" #include "klee/Module/KInstruction.h" #include "klee/Module/KModule.h" #include "klee/Solver/Common.h" #include "klee/Solver/SolverCmdLine.h" #include "klee/Solver/SolverStats.h" #include "klee/Statistics/TimerStatIncrementer.h" #include "klee/Support/Casting.h" #include "klee/Support/ErrorHandling.h" #include "klee/Support/FileHandling.h" #include "klee/Support/ModuleUtil.h" #include "klee/Support/OptionCategories.h" #include "klee/System/MemoryUsage.h" #include "klee/System/Time.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #if LLVM_VERSION_CODE >= LLVM_VERSION(15, 0) #include "llvm/IR/GetElementPtrTypeIterator.h" #endif #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Path.h" #include "llvm/Support/Process.h" #if LLVM_VERSION_CODE >= LLVM_VERSION(10, 0) #include "llvm/Support/TypeSize.h" #else typedef unsigned TypeSize; #endif #include "llvm/Support/raw_ostream.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace llvm; using namespace klee; namespace klee { cl::OptionCategory DebugCat("Debugging options", "These are debugging options."); cl::OptionCategory ExtCallsCat("External call policy options", "These options impact external calls."); cl::OptionCategory SeedingCat( "Seeding options", "These options are related to the use of seeds to start exploration."); cl::OptionCategory TerminationCat("State and overall termination options", "These options control termination of the overall KLEE " "execution and of individual states."); cl::OptionCategory TestGenCat("Test generation options", "These options impact test generation."); cl::opt MaxTime( "max-time", cl::desc("Halt execution after the specified duration. " "Set to 0s to disable (default=0s)"), cl::init("0s"), cl::cat(TerminationCat)); /*** Misc options ***/ cl::opt SingleObjectResolution( "single-object-resolution", cl::desc("Try to resolve memory reads/writes to single objects " "when offsets are symbolic (default=false)"), cl::init(false), cl::cat(MiscCat)); } // namespace klee namespace { /*** Test generation options ***/ cl::opt DumpStatesOnHalt( "dump-states-on-halt", cl::init(true), cl::desc("Dump test cases for all active states on exit (default=true)"), cl::cat(TestGenCat)); cl::opt OnlyOutputStatesCoveringNew( "only-output-states-covering-new", cl::init(false), cl::desc("Only output test cases covering new code (default=false)"), cl::cat(TestGenCat)); cl::opt EmitAllErrors( "emit-all-errors", cl::init(false), cl::desc("Generate tests cases for all errors " "(default=false, i.e. one per (error,instruction) pair)"), cl::cat(TestGenCat)); /* Constraint solving options */ cl::opt MaxSymArraySize( "max-sym-array-size", cl::desc( "If a symbolic array exceeds this size (in bytes), symbolic addresses " "into this array are concretized. Set to 0 to disable (default=0)"), cl::init(0), cl::cat(SolvingCat)); cl::opt SimplifySymIndices("simplify-sym-indices", cl::init(false), cl::desc("Simplify symbolic accesses using equalities " "from other constraints (default=false)"), cl::cat(SolvingCat)); cl::opt EqualitySubstitution("equality-substitution", cl::init(true), cl::desc("Simplify equality expressions before " "querying the solver (default=true)"), cl::cat(SolvingCat)); /*** External call policy options ***/ enum class ExternalCallPolicy { None, // No external calls allowed Concrete, // Only external calls with concrete arguments allowed All, // All external calls allowed }; cl::opt ExternalCalls( "external-calls", cl::desc("Specify the external call policy"), cl::values( clEnumValN( ExternalCallPolicy::None, "none", "No external function calls are allowed. Note that KLEE always " "allows some external calls with concrete arguments to go through " "(in particular printf and puts), regardless of this option."), clEnumValN(ExternalCallPolicy::Concrete, "concrete", "Only external function calls with concrete arguments are " "allowed (default)"), clEnumValN(ExternalCallPolicy::All, "all", "All external function calls are allowed. This concretizes " "any symbolic arguments in calls to external functions.")), cl::init(ExternalCallPolicy::Concrete), cl::cat(ExtCallsCat)); /*** External call warnings options ***/ enum class ExtCallWarnings { None, // Never warn on external calls OncePerFunction, // Only warn once per function on external calls All, // Always warn on external calls }; cl::opt ExternalCallWarnings( "external-call-warnings", cl::desc("Specify when to warn about external calls"), cl::values( clEnumValN( ExtCallWarnings::None, "none", "Never warn"), clEnumValN(ExtCallWarnings::OncePerFunction, "once-per-function", "Warn once per external function (default)"), clEnumValN(ExternalCallPolicy::All, "all", "Always warn")), cl::init(ExtCallWarnings::OncePerFunction), cl::cat(ExtCallsCat)); cl::opt ExternalPageThreshold( "kdalloc-external-page-threshold", cl::init(1024), cl::desc( "Threshold for garbage collecting pages used by external calls. If " "there is a significant number of infrequently used pages resident in " "memory, these will only be cleaned up if the total number of pages " "used for external calls is above the given threshold (default=1024)."), cl::cat(ExtCallsCat)); /*** Seeding options ***/ cl::opt AlwaysOutputSeeds( "always-output-seeds", cl::init(true), cl::desc( "Dump test cases even if they are driven by seeds only (default=true)"), cl::cat(SeedingCat)); cl::opt OnlyReplaySeeds( "only-replay-seeds", cl::init(false), cl::desc("Discard states that do not have a seed (default=false)."), cl::cat(SeedingCat)); cl::opt OnlySeed("only-seed", cl::init(false), cl::desc("Stop execution after seeding is done without " "doing regular search (default=false)."), cl::cat(SeedingCat)); cl::opt AllowSeedExtension( "allow-seed-extension", cl::init(false), cl::desc("Allow extra values to become symbolic during seeding; " "the seed is extended with zeros (default=false)."), cl::cat(SeedingCat)); cl::opt AllowSeedTruncation( "allow-seed-truncation", cl::init(false), cl::desc("Allow smaller buffers than in seeds (default=false)."), cl::cat(SeedingCat)); cl::opt NamedSeedMatching( "named-seed-matching", cl::init(false), cl::desc("Use names to match symbolic objects to inputs (default=false)."), cl::cat(SeedingCat)); cl::opt SeedTime("seed-time", cl::desc("Amount of time to dedicate to seeds, before normal " "search (default=0s (off))"), cl::cat(SeedingCat)); /*** Termination criteria options ***/ cl::list ExitOnErrorType( "exit-on-error-type", cl::desc("Stop execution after reaching a specified condition (default=false)"), cl::values( clEnumValN(StateTerminationType::Abort, "Abort", "The program reached abort or klee_abort"), clEnumValN(StateTerminationType::Assert, "Assert", "An assertion was hit"), clEnumValN(StateTerminationType::BadVectorAccess, "BadVectorAccess", "Vector accessed out of bounds"), clEnumValN(StateTerminationType::Execution, "Execution", "Trying to execute an unexpected instruction"), clEnumValN(StateTerminationType::External, "External", "External objects referenced"), clEnumValN(StateTerminationType::Free, "Free", "Freeing invalid memory"), clEnumValN(StateTerminationType::Model, "Model", "Memory model limit hit"), clEnumValN(StateTerminationType::Overflow, "Overflow", "An overflow occurred"), clEnumValN(StateTerminationType::Ptr, "Ptr", "Pointer error"), clEnumValN(StateTerminationType::ReadOnly, "ReadOnly", "Write to read-only memory"), clEnumValN(StateTerminationType::ReportError, "ReportError", "klee_report_error called"), clEnumValN(StateTerminationType::InvalidBuiltin, "InvalidBuiltin", "Passing invalid value to compiler builtin"), clEnumValN(StateTerminationType::ImplicitTruncation, "ImplicitTruncation", "Implicit conversion from integer of larger bit width to " "smaller bit width that results in data loss"), clEnumValN(StateTerminationType::ImplicitConversion, "ImplicitConversion", "Implicit conversion between integer types that changes the " "sign of the value"), clEnumValN(StateTerminationType::UnreachableCall, "UnreachableCall", "Control flow reached an unreachable program point"), clEnumValN(StateTerminationType::MissingReturn, "MissingReturn", "Reaching the end of a value-returning function without " "returning a value"), clEnumValN(StateTerminationType::InvalidLoad, "InvalidLoad", "Load of a value which is not in the range of representable " "values for that type"), clEnumValN(StateTerminationType::NullableAttribute, "NullableAttribute", "Violation of nullable attribute detected"), clEnumValN(StateTerminationType::User, "User", "Wrong klee_* function invocation")), cl::ZeroOrMore, cl::cat(TerminationCat)); cl::opt MaxInstructions( "max-instructions", cl::desc("Stop execution after this many instructions. Set to 0 to disable (default=0)"), cl::init(0), cl::cat(TerminationCat)); cl::opt MaxForks("max-forks", cl::desc("Only fork this many times. Set to -1 to disable (default=-1)"), cl::init(~0u), cl::cat(TerminationCat)); cl::opt MaxDepth( "max-depth", cl::desc("Only allow this many symbolic branches. Set to 0 to disable (default=0)"), cl::init(0), cl::cat(TerminationCat)); cl::opt MaxMemory("max-memory", cl::desc("Refuse to fork when above this amount of " "memory (in MB) (see -max-memory-inhibit) and terminate " "states when additional 100MB allocated (default=2000)"), cl::init(2000), cl::cat(TerminationCat)); cl::opt MaxMemoryInhibit( "max-memory-inhibit", cl::desc( "Inhibit forking when above memory cap (see -max-memory) (default=true)"), cl::init(true), cl::cat(TerminationCat)); cl::opt RuntimeMaxStackFrames( "max-stack-frames", cl::desc("Terminate a state after this many stack frames. Set to 0 to " "disable (default=8192)"), cl::init(8192), cl::cat(TerminationCat)); cl::opt MaxStaticForkPct( "max-static-fork-pct", cl::init(1.), cl::desc("Maximum percentage spent by an instruction forking out of the " "forking of all instructions (default=1.0 (always))"), cl::cat(TerminationCat)); cl::opt MaxStaticSolvePct( "max-static-solve-pct", cl::init(1.), cl::desc("Maximum percentage of solving time that can be spent by a single " "instruction over total solving time for all instructions " "(default=1.0 (always))"), cl::cat(TerminationCat)); cl::opt MaxStaticCPForkPct( "max-static-cpfork-pct", cl::init(1.), cl::desc("Maximum percentage spent by an instruction of a call path " "forking out of the forking of all instructions in the call path " "(default=1.0 (always))"), cl::cat(TerminationCat)); cl::opt MaxStaticCPSolvePct( "max-static-cpsolve-pct", cl::init(1.), cl::desc("Maximum percentage of solving time that can be spent by a single " "instruction of a call path over total solving time for all " "instructions (default=1.0 (always))"), cl::cat(TerminationCat)); cl::opt MaxStaticPctCheckDelay( "max-static-pct-check-delay", cl::desc("Number of forks after which the --max-static-*-pct checks are enforced (default=1000)"), cl::init(1000), cl::cat(TerminationCat)); cl::opt TimerInterval( "timer-interval", cl::desc("Minimum interval to check timers. " "Affects -max-time, -istats-write-interval, -stats-write-interval, and -uncovered-update-interval (default=1s)"), cl::init("1s"), cl::cat(TerminationCat)); /*** Debugging options ***/ /// The different query logging solvers that can switched on/off enum PrintDebugInstructionsType { STDERR_ALL, /// STDERR_SRC, STDERR_COMPACT, FILE_ALL, /// FILE_SRC, /// FILE_COMPACT /// }; llvm::cl::bits DebugPrintInstructions( "debug-print-instructions", llvm::cl::desc("Log instructions during execution."), llvm::cl::values( clEnumValN(STDERR_ALL, "all:stderr", "Log all instructions to stderr " "in format [src, inst_id, " "llvm_inst]"), clEnumValN(STDERR_SRC, "src:stderr", "Log all instructions to stderr in format [src, inst_id]"), clEnumValN(STDERR_COMPACT, "compact:stderr", "Log all instructions to stderr in format [inst_id]"), clEnumValN(FILE_ALL, "all:file", "Log all instructions to file " "instructions.txt in format [src, " "inst_id, llvm_inst]"), clEnumValN(FILE_SRC, "src:file", "Log all instructions to file " "instructions.txt in format [src, " "inst_id]"), clEnumValN(FILE_COMPACT, "compact:file", "Log all instructions to file instructions.txt in format " "[inst_id]")), llvm::cl::CommaSeparated, cl::cat(DebugCat)); #ifdef HAVE_ZLIB_H cl::opt DebugCompressInstructions( "debug-compress-instructions", cl::init(false), cl::desc( "Compress the logged instructions in gzip format (default=false)."), cl::cat(DebugCat)); #endif cl::opt DebugCheckForImpliedValues( "debug-check-for-implied-values", cl::init(false), cl::desc("Debug the implied value optimization"), cl::cat(DebugCat)); } // namespace // XXX hack extern "C" unsigned dumpStates, dumpExecutionTree; unsigned dumpStates = 0, dumpExecutionTree = 0; Executor::Executor(LLVMContext &ctx, const InterpreterOptions &opts, InterpreterHandler *ih) : Interpreter(opts), interpreterHandler(ih), searcher(0), externalDispatcher(new ExternalDispatcher(ctx)), statsTracker(0), pathWriter(0), symPathWriter(0), specialFunctionHandler(0), timers{time::Span(TimerInterval)}, replayKTest(0), replayPath(0), usingSeeds(0), atMemoryLimit(false), inhibitForking(false), haltExecution(false), ivcEnabled(false), debugLogBuffer(debugBufferString) { const time::Span maxTime{MaxTime}; if (maxTime) timers.add( std::make_unique(maxTime, [&]{ klee_message("HaltTimer invoked"); setHaltExecution(true); })); coreSolverTimeout = time::Span{MaxCoreSolverTime}; if (coreSolverTimeout) UseForkedCoreSolver = true; std::unique_ptr coreSolver = klee::createCoreSolver(CoreSolverToUse); if (!coreSolver) { klee_error("Failed to create core solver\n"); } std::unique_ptr solver = constructSolverChain( std::move(coreSolver), interpreterHandler->getOutputFilename(ALL_QUERIES_SMT2_FILE_NAME), interpreterHandler->getOutputFilename(SOLVER_QUERIES_SMT2_FILE_NAME), interpreterHandler->getOutputFilename(ALL_QUERIES_KQUERY_FILE_NAME), interpreterHandler->getOutputFilename(SOLVER_QUERIES_KQUERY_FILE_NAME)); this->solver = std::make_unique(std::move(solver), EqualitySubstitution); memory = std::make_unique(&arrayCache); initializeSearchOptions(); if (OnlyOutputStatesCoveringNew && !StatsTracker::useIStats()) klee_error("To use --only-output-states-covering-new, you need to enable --output-istats."); if (DebugPrintInstructions.isSet(FILE_ALL) || DebugPrintInstructions.isSet(FILE_COMPACT) || DebugPrintInstructions.isSet(FILE_SRC)) { std::string debug_file_name = interpreterHandler->getOutputFilename("instructions.txt"); std::string error; #ifdef HAVE_ZLIB_H if (!DebugCompressInstructions) { #endif debugInstFile = klee_open_output_file(debug_file_name, error); #ifdef HAVE_ZLIB_H } else { debug_file_name.append(".gz"); debugInstFile = klee_open_compressed_output_file(debug_file_name, error); } #endif if (!debugInstFile) { klee_error("Could not open file %s : %s", debug_file_name.c_str(), error.c_str()); } } } llvm::Module * Executor::setModule(std::vector> &modules, const ModuleOptions &opts) { assert(!kmodule && !modules.empty() && "can only register one module"); // XXX gross kmodule = std::unique_ptr(new KModule()); // Preparing the final module happens in multiple stages // Link with KLEE intrinsics library before running any optimizations SmallString<128> LibPath(opts.LibraryDir); llvm::sys::path::append(LibPath, "libkleeRuntimeIntrinsic" + opts.OptSuffix + ".bca"); std::string error; if (!klee::loadFile(LibPath.c_str(), modules[0]->getContext(), modules, error)) { klee_error("Could not load KLEE intrinsic file %s", LibPath.c_str()); } // 1.) Link the modules together while (kmodule->link(modules, opts.EntryPoint)) { // 2.) Apply different instrumentation kmodule->instrument(opts); } // 3.) Optimise and prepare for KLEE // Create a list of functions that should be preserved if used std::vector preservedFunctions; specialFunctionHandler = new SpecialFunctionHandler(*this); specialFunctionHandler->prepare(preservedFunctions); preservedFunctions.push_back(opts.EntryPoint.c_str()); // Preserve the free-standing library calls preservedFunctions.push_back("memset"); preservedFunctions.push_back("memcpy"); preservedFunctions.push_back("memcmp"); preservedFunctions.push_back("memmove"); kmodule->optimiseAndPrepare(opts, preservedFunctions); kmodule->checkModule(); // 4.) Manifest the module kmodule->manifest(interpreterHandler, StatsTracker::useStatistics()); specialFunctionHandler->bind(); if (StatsTracker::useStatistics() || userSearcherRequiresMD2U()) { statsTracker = new StatsTracker(*this, interpreterHandler->getOutputFilename("assembly.ll"), userSearcherRequiresMD2U()); } // Initialize the context. DataLayout *TD = kmodule->targetData.get(); Context::initialize(TD->isLittleEndian(), (Expr::Width)TD->getPointerSizeInBits()); return kmodule->module.get(); } Executor::~Executor() { delete externalDispatcher; delete specialFunctionHandler; delete statsTracker; } /***/ void Executor::initializeGlobalObject(ExecutionState &state, ObjectState *os, const Constant *c, unsigned offset) { const auto targetData = kmodule->targetData.get(); if (const ConstantVector *cp = dyn_cast(c)) { unsigned elementSize = targetData->getTypeStoreSize(cp->getType()->getElementType()); for (unsigned i=0, e=cp->getNumOperands(); i != e; ++i) initializeGlobalObject(state, os, cp->getOperand(i), offset + i*elementSize); } else if (isa(c)) { unsigned i, size = targetData->getTypeStoreSize(c->getType()); for (i=0; iwrite8(offset+i, (uint8_t) 0); } else if (const ConstantArray *ca = dyn_cast(c)) { unsigned elementSize = targetData->getTypeStoreSize(ca->getType()->getElementType()); for (unsigned i=0, e=ca->getNumOperands(); i != e; ++i) initializeGlobalObject(state, os, ca->getOperand(i), offset + i*elementSize); } else if (const ConstantStruct *cs = dyn_cast(c)) { const StructLayout *sl = targetData->getStructLayout(cast(cs->getType())); for (unsigned i=0, e=cs->getNumOperands(); i != e; ++i) initializeGlobalObject(state, os, cs->getOperand(i), offset + sl->getElementOffset(i)); } else if (const ConstantDataSequential *cds = dyn_cast(c)) { unsigned elementSize = targetData->getTypeStoreSize(cds->getElementType()); for (unsigned i=0, e=cds->getNumElements(); i != e; ++i) initializeGlobalObject(state, os, cds->getElementAsConstant(i), offset + i*elementSize); } else if (!isa(c) && !isa(c)) { unsigned StoreBits = targetData->getTypeStoreSizeInBits(c->getType()); ref C = evalConstant(c); // Extend the constant if necessary; assert(StoreBits >= C->getWidth() && "Invalid store size!"); if (StoreBits > C->getWidth()) C = C->ZExt(StoreBits); os->write(offset, C); } } MemoryObject * Executor::addExternalObject(ExecutionState &state, void *addr, unsigned size, bool isReadOnly) { auto mo = memory->allocateFixed(reinterpret_cast(addr), size, nullptr); ObjectState *os = bindObjectInState(state, mo, false); for(unsigned i = 0; i < size; i++) os->write8(i, ((uint8_t*)addr)[i]); if(isReadOnly) os->setReadOnly(true); return mo; } extern void *__dso_handle __attribute__ ((__weak__)); void Executor::initializeGlobals(ExecutionState &state) { // allocate and initialize globals, done in two passes since we may // need address of a global in order to initialize some other one. // allocate memory objects for all globals allocateGlobalObjects(state); // initialize aliases first, may be needed for global objects initializeGlobalAliases(); // finally, do the actual initialization initializeGlobalObjects(state); } void Executor::allocateGlobalObjects(ExecutionState &state) { Module *m = kmodule->module.get(); if (m->getModuleInlineAsm() != "") klee_warning("executable has module level assembly (ignoring)"); // represent function globals using the address of the actual llvm function // object. given that we use malloc to allocate memory in states this also // ensures that we won't conflict. we don't need to allocate a memory object // since reading/writing via a function pointer is unsupported anyway. for (Function &f : *m) { ref addr; // If the symbol has external weak linkage then it is implicitly // not defined in this module; if it isn't resolvable then it // should be null. if (f.hasExternalWeakLinkage() && !externalDispatcher->resolveSymbol(f.getName().str())) { addr = Expr::createPointer(0); } else { // We allocate an object to represent each function, // its address can be used for function pointers. // TODO: Check whether the object is accessed? auto mo = memory->allocate(8, false, true, &state, &f, 8); addr = Expr::createPointer(mo->address); legalFunctions.emplace(mo->address, &f); } globalAddresses.emplace(&f, addr); } #ifndef WINDOWS int *errno_addr = getErrnoLocation(state); MemoryObject *errnoObj = addExternalObject(state, (void *)errno_addr, sizeof *errno_addr, false); // Copy values from and to program space explicitly errnoObj->isUserSpecified = true; #endif // Disabled, we don't want to promote use of live externals. #ifdef HAVE_CTYPE_EXTERNALS #ifndef WINDOWS #ifndef DARWIN /* from /usr/include/ctype.h: These point into arrays of 384, so they can be indexed by any `unsigned char' value [0,255]; by EOF (-1); or by any `signed char' value [-128,-1). ISO C requires that the ctype functions work for `unsigned */ const uint16_t **addr = __ctype_b_loc(); addExternalObject(state, const_cast(*addr-128), 384 * sizeof **addr, true); addExternalObject(state, addr, sizeof(*addr), true); const int32_t **lower_addr = __ctype_tolower_loc(); addExternalObject(state, const_cast(*lower_addr-128), 384 * sizeof **lower_addr, true); addExternalObject(state, lower_addr, sizeof(*lower_addr), true); const int32_t **upper_addr = __ctype_toupper_loc(); addExternalObject(state, const_cast(*upper_addr-128), 384 * sizeof **upper_addr, true); addExternalObject(state, upper_addr, sizeof(*upper_addr), true); #endif #endif #endif for (const GlobalVariable &v : m->globals()) { std::size_t globalObjectAlignment = getAllocationAlignment(&v); Type *ty = v.getValueType(); std::uint64_t size = 0; if (ty->isSized()) size = kmodule->targetData->getTypeStoreSize(ty); if (v.isDeclaration()) { // FIXME: We have no general way of handling unknown external // symbols. If we really cared about making external stuff work // better we could support user definition, or use the EXE style // hack where we check the object file information. if (!ty->isSized()) { klee_warning("Type for %.*s is not sized", static_cast(v.getName().size()), v.getName().data()); } // XXX - DWD - hardcode some things until we decide how to fix. #ifndef WINDOWS if (v.getName() == "_ZTVN10__cxxabiv117__class_type_infoE") { size = 0x2C; } else if (v.getName() == "_ZTVN10__cxxabiv120__si_class_type_infoE") { size = 0x2C; } else if (v.getName() == "_ZTVN10__cxxabiv121__vmi_class_type_infoE") { size = 0x2C; } #endif if (size == 0) { klee_warning("Unable to find size for global variable: %.*s (use will " "result in out of bounds access)", static_cast(v.getName().size()), v.getName().data()); } } MemoryObject *mo = memory->allocate(size, /*isLocal=*/false, /*isGlobal=*/true, /*state=*/nullptr, /*allocSite=*/&v, /*alignment=*/globalObjectAlignment); if (!mo) klee_error("out of memory"); globalObjects.emplace(&v, mo); globalAddresses.emplace(&v, mo->getBaseExpr()); } } void Executor::initializeGlobalAlias(const llvm::Constant *c) { // aliasee may either be a global value or constant expression const auto *ga = dyn_cast(c); if (ga) { if (globalAddresses.count(ga)) { // already resolved by previous invocation return; } const llvm::Constant *aliasee = ga->getAliasee(); if (const auto *gv = dyn_cast(aliasee)) { // aliasee is global value auto it = globalAddresses.find(gv); // uninitialized only if aliasee is another global alias if (it != globalAddresses.end()) { globalAddresses.emplace(ga, it->second); return; } } } // resolve aliases in all sub-expressions for (const auto *op : c->operand_values()) { initializeGlobalAlias(cast(op)); } if (ga) { // aliasee is constant expression (or global alias) globalAddresses.emplace(ga, evalConstant(ga->getAliasee())); } } void Executor::initializeGlobalAliases() { const Module *m = kmodule->module.get(); for (const GlobalAlias &a : m->aliases()) { initializeGlobalAlias(&a); } } void Executor::initializeGlobalObjects(ExecutionState &state) { const Module *m = kmodule->module.get(); for (const GlobalVariable &v : m->globals()) { MemoryObject *mo = globalObjects.find(&v)->second; ObjectState *os = bindObjectInState(state, mo, false); if (v.isDeclaration() && mo->size) { // Program already running -> object already initialized. // Read concrete value and write it to our copy. void *addr; if (v.getName() == "__dso_handle") { addr = &__dso_handle; // wtf ? } else { addr = externalDispatcher->resolveSymbol(v.getName().str()); } if (!addr) { klee_error("Unable to load symbol(%.*s) while initializing globals", static_cast(v.getName().size()), v.getName().data()); } for (unsigned offset = 0; offset < mo->size; offset++) { os->write8(offset, static_cast(addr)[offset]); } } else if (v.hasInitializer()) { initializeGlobalObject(state, os, v.getInitializer(), 0); if (v.isConstant()) { os->setReadOnly(true); // initialise constant memory that may be used with external calls state.addressSpace.copyOutConcrete(mo, os); } } else { os->initializeToRandom(); } } } bool Executor::branchingPermitted(const ExecutionState &state) const { if ((MaxMemoryInhibit && atMemoryLimit) || state.forkDisabled || inhibitForking || (MaxForks!=~0u && stats::forks >= MaxForks)) { if (MaxMemoryInhibit && atMemoryLimit) klee_warning_once(0, "skipping fork (memory cap exceeded)"); else if (state.forkDisabled) klee_warning_once(0, "skipping fork (fork disabled on current path)"); else if (inhibitForking) klee_warning_once(0, "skipping fork (fork disabled globally)"); else klee_warning_once(0, "skipping fork (max-forks reached)"); return false; } return true; } void Executor::branch(ExecutionState &state, const std::vector> &conditions, std::vector &result, BranchType reason) { TimerStatIncrementer timer(stats::forkTime); unsigned N = conditions.size(); assert(N); if (!branchingPermitted(state)) { unsigned next = theRNG.getInt32() % N; for (unsigned i=0; ibranch(); addedStates.push_back(ns); result.push_back(ns); executionTree->attach(es->executionTreeNode, ns, es, reason); } } // If necessary redistribute seeds to match conditions, killing // states if necessary due to OnlyReplaySeeds (inefficient but // simple). std::map< ExecutionState*, std::vector >::iterator it = seedMap.find(&state); if (it != seedMap.end()) { std::vector seeds = it->second; seedMap.erase(it); // Assume each seed only satisfies one condition (necessarily true // when conditions are mutually exclusive and their conjunction is // a tautology). for (std::vector::iterator siit = seeds.begin(), siie = seeds.end(); siit != siie; ++siit) { unsigned i; for (i=0; i res; bool success = solver->getValue( state.constraints, siit->assignment.evaluate(conditions[i]), res, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (res->isTrue()) break; } // If we didn't find a satisfying condition randomly pick one // (the seed will be patched). if (i==N) i = theRNG.getInt32() % N; // Extra check in case we're replaying seeds with a max-fork if (result[i]) seedMap[result[i]].push_back(*siit); } if (OnlyReplaySeeds) { for (unsigned i=0; i Executor::maxStaticPctChecks(ExecutionState ¤t, ref condition) { if (isa(condition)) return condition; if (MaxStaticForkPct == 1. && MaxStaticSolvePct == 1. && MaxStaticCPForkPct == 1. && MaxStaticCPSolvePct == 1.) return condition; // These checks are performed only after at least MaxStaticPctCheckDelay forks // have been performed since execution started if (stats::forks < MaxStaticPctCheckDelay) return condition; StatisticManager &sm = *theStatisticManager; CallPathNode *cpn = current.stack.back().callPathNode; bool reached_max_fork_limit = (MaxStaticForkPct < 1. && (sm.getIndexedValue(stats::forks, sm.getIndex()) > stats::forks * MaxStaticForkPct)); bool reached_max_cp_fork_limit = (MaxStaticCPForkPct < 1. && cpn && (cpn->statistics.getValue(stats::forks) > stats::forks * MaxStaticCPForkPct)); bool reached_max_solver_limit = (MaxStaticSolvePct < 1 && (sm.getIndexedValue(stats::solverTime, sm.getIndex()) > stats::solverTime * MaxStaticSolvePct)); bool reached_max_cp_solver_limit = (MaxStaticCPForkPct < 1. && cpn && (cpn->statistics.getValue(stats::solverTime) > stats::solverTime * MaxStaticCPSolvePct)); if (reached_max_fork_limit || reached_max_cp_fork_limit || reached_max_solver_limit || reached_max_cp_solver_limit) { ref value; bool success = solver->getValue(current.constraints, condition, value, current.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void)success; std::string msg("skipping fork and concretizing condition (MaxStatic*Pct " "limit reached) at "); llvm::raw_string_ostream os(msg); os << current.prevPC->getSourceLocation(); klee_warning_once(0, "%s", os.str().c_str()); addConstraint(current, EqExpr::create(value, condition)); condition = value; } return condition; } Executor::StatePair Executor::fork(ExecutionState ¤t, ref condition, bool isInternal, BranchType reason) { Solver::Validity res; std::map< ExecutionState*, std::vector >::iterator it = seedMap.find(¤t); bool isSeeding = it != seedMap.end(); if (!isSeeding) condition = maxStaticPctChecks(current, condition); time::Span timeout = coreSolverTimeout; if (isSeeding) timeout *= static_cast(it->second.size()); solver->setTimeout(timeout); bool success = solver->evaluate(current.constraints, condition, res, current.queryMetaData); solver->setTimeout(time::Span()); if (!success) { current.pc = current.prevPC; terminateStateOnSolverError(current, "Query timed out (fork)."); return StatePair(nullptr, nullptr); } if (!isSeeding) { if (replayPath && !isInternal) { assert(replayPositionsize() && "ran out of branches in replay path mode"); bool branch = (*replayPath)[replayPosition++]; if (res==Solver::True) { assert(branch && "hit invalid branch in replay path mode"); } else if (res==Solver::False) { assert(!branch && "hit invalid branch in replay path mode"); } else { // add constraints if(branch) { res = Solver::True; addConstraint(current, condition); } else { res = Solver::False; addConstraint(current, Expr::createIsZero(condition)); } } } else if (res==Solver::Unknown) { assert(!replayKTest && "in replay mode, only one branch can be true."); if (!branchingPermitted(current)) { TimerStatIncrementer timer(stats::forkTime); if (theRNG.getBool()) { addConstraint(current, condition); res = Solver::True; } else { addConstraint(current, Expr::createIsZero(condition)); res = Solver::False; } ++stats::inhibitedForks; } } } // Fix branch in only-replay-seed mode, if we don't have both true // and false seeds. if (isSeeding && (current.forkDisabled || OnlyReplaySeeds) && res == Solver::Unknown) { bool trueSeed=false, falseSeed=false; // Is seed extension still ok here? for (std::vector::iterator siit = it->second.begin(), siie = it->second.end(); siit != siie; ++siit) { ref res; bool success = solver->getValue(current.constraints, siit->assignment.evaluate(condition), res, current.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (res->isTrue()) { trueSeed = true; } else { falseSeed = true; } if (trueSeed && falseSeed) break; } if (!(trueSeed && falseSeed)) { assert(trueSeed || falseSeed); res = trueSeed ? Solver::True : Solver::False; addConstraint(current, trueSeed ? condition : Expr::createIsZero(condition)); } } // XXX - even if the constraint is provable one way or the other we // can probably benefit by adding this constraint and allowing it to // reduce the other constraints. For example, if we do a binary // search on a particular value, and then see a comparison against // the value it has been fixed at, we should take this as a nice // hint to just use the single constraint instead of all the binary // search ones. If that makes sense. if (res==Solver::True) { if (!isInternal) { if (pathWriter) { current.pathOS << "1"; } } return StatePair(¤t, nullptr); } else if (res==Solver::False) { if (!isInternal) { if (pathWriter) { current.pathOS << "0"; } } return StatePair(nullptr, ¤t); } else { TimerStatIncrementer timer(stats::forkTime); ExecutionState *falseState, *trueState = ¤t; ++stats::forks; falseState = trueState->branch(); addedStates.push_back(falseState); if (it != seedMap.end()) { std::vector seeds = it->second; it->second.clear(); std::vector &trueSeeds = seedMap[trueState]; std::vector &falseSeeds = seedMap[falseState]; for (std::vector::iterator siit = seeds.begin(), siie = seeds.end(); siit != siie; ++siit) { ref res; bool success = solver->getValue(current.constraints, siit->assignment.evaluate(condition), res, current.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (res->isTrue()) { trueSeeds.push_back(*siit); } else { falseSeeds.push_back(*siit); } } bool swapInfo = false; if (trueSeeds.empty()) { if (¤t == trueState) swapInfo = true; seedMap.erase(trueState); } if (falseSeeds.empty()) { if (¤t == falseState) swapInfo = true; seedMap.erase(falseState); } if (swapInfo) { std::swap(trueState->coveredNew, falseState->coveredNew); std::swap(trueState->coveredLines, falseState->coveredLines); } } executionTree->attach(current.executionTreeNode, falseState, trueState, reason); stats::incBranchStat(reason, 1); if (pathWriter) { // Need to update the pathOS.id field of falseState, otherwise the same id // is used for both falseState and trueState. falseState->pathOS = pathWriter->open(current.pathOS); if (!isInternal) { trueState->pathOS << "1"; falseState->pathOS << "0"; } } if (symPathWriter) { falseState->symPathOS = symPathWriter->open(current.symPathOS); if (!isInternal) { trueState->symPathOS << "1"; falseState->symPathOS << "0"; } } addConstraint(*trueState, condition); addConstraint(*falseState, Expr::createIsZero(condition)); // Kinda gross, do we even really still want this option? if (MaxDepth && MaxDepth<=trueState->depth) { terminateStateEarly(*trueState, "max-depth exceeded.", StateTerminationType::MaxDepth); terminateStateEarly(*falseState, "max-depth exceeded.", StateTerminationType::MaxDepth); return StatePair(nullptr, nullptr); } return StatePair(trueState, falseState); } } void Executor::addConstraint(ExecutionState &state, ref condition) { if (ConstantExpr *CE = dyn_cast(condition)) { if (!CE->isTrue()) llvm::report_fatal_error("attempt to add invalid constraint"); return; } // Check to see if this constraint violates seeds. std::map< ExecutionState*, std::vector >::iterator it = seedMap.find(&state); if (it != seedMap.end()) { bool warn = false; for (std::vector::iterator siit = it->second.begin(), siie = it->second.end(); siit != siie; ++siit) { bool res; bool success = solver->mustBeFalse(state.constraints, siit->assignment.evaluate(condition), res, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (res) { siit->patchSeed(state, condition, solver.get()); warn = true; } } if (warn) klee_warning("seeds patched for violating constraint"); } state.addConstraint(condition); if (ivcEnabled) doImpliedValueConcretization(state, condition, ConstantExpr::alloc(1, Expr::Bool)); } const Cell& Executor::eval(KInstruction *ki, unsigned index, ExecutionState &state) const { assert(index < ki->inst->getNumOperands()); int vnumber = ki->operands[index]; assert(vnumber != -1 && "Invalid operand to eval(), not a value or constant!"); // Determine if this is a constant or not. if (vnumber < 0) { unsigned index = -vnumber - 2; return kmodule->constantTable[index]; } else { unsigned index = vnumber; StackFrame &sf = state.stack.back(); return sf.locals[index]; } } void Executor::bindLocal(KInstruction *target, ExecutionState &state, ref value) { getDestCell(state, target).value = value; } void Executor::bindArgument(KFunction *kf, unsigned index, ExecutionState &state, ref value) { getArgumentCell(state, kf, index).value = value; } ref Executor::toUnique(const ExecutionState &state, ref &e) { ref result = e; if (!isa(e)) { ref value; bool isTrue = false; e = optimizer.optimizeExpr(e, true); solver->setTimeout(coreSolverTimeout); if (solver->getValue(state.constraints, e, value, state.queryMetaData)) { ref cond = EqExpr::create(e, value); cond = optimizer.optimizeExpr(cond, false); if (solver->mustBeTrue(state.constraints, cond, isTrue, state.queryMetaData) && isTrue) result = value; } solver->setTimeout(time::Span()); } return result; } /* Concretize the given expression, and return a possible constant value. 'reason' is just a documentation string stating the reason for concretization. */ ref Executor::toConstant(ExecutionState &state, ref e, const char *reason) { e = ConstraintManager::simplifyExpr(state.constraints, e); if (ConstantExpr *CE = dyn_cast(e)) return CE; /* If no seed evaluation results in a constant, call the solver */ ref cvalue = getValueFromSeeds(state, e); if (!cvalue) { [[maybe_unused]] bool success = solver->getValue(state.constraints, e, cvalue, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); } std::string str; llvm::raw_string_ostream os(str); os << "silently concretizing (reason: " << reason << ") expression " << e << " to value " << cvalue << " (" << (*(state.pc)).info->file << ":" << (*(state.pc)).info->line << ")"; if (ExternalCallWarnings == ExtCallWarnings::All) klee_warning("%s", os.str().c_str()); else klee_warning_once(reason, "%s", os.str().c_str()); addConstraint(state, EqExpr::create(e, cvalue)); return cvalue; } ref Executor::getValueFromSeeds(ExecutionState &state, ref e) { auto found = seedMap.find(&state); if (found == seedMap.end()) return nullptr; auto seeds = found->second; for (auto const &seed : seeds) { auto value = seed.assignment.evaluate(e); if (isa(value)) return value; } return nullptr; } void Executor::executeGetValue(ExecutionState &state, ref e, KInstruction *target) { e = ConstraintManager::simplifyExpr(state.constraints, e); std::map< ExecutionState*, std::vector >::iterator it = seedMap.find(&state); if (it==seedMap.end() || isa(e)) { ref value; e = optimizer.optimizeExpr(e, true); bool success = solver->getValue(state.constraints, e, value, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; bindLocal(target, state, value); } else { std::set< ref > values; for (std::vector::iterator siit = it->second.begin(), siie = it->second.end(); siit != siie; ++siit) { ref cond = siit->assignment.evaluate(e); cond = optimizer.optimizeExpr(cond, true); ref value; bool success = solver->getValue(state.constraints, cond, value, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; values.insert(value); } std::vector< ref > conditions; for (std::set< ref >::iterator vit = values.begin(), vie = values.end(); vit != vie; ++vit) conditions.push_back(EqExpr::create(e, *vit)); std::vector branches; branch(state, conditions, branches, BranchType::GetVal); std::vector::iterator bit = branches.begin(); for (std::set< ref >::iterator vit = values.begin(), vie = values.end(); vit != vie; ++vit) { ExecutionState *es = *bit; if (es) bindLocal(target, *es, *vit); ++bit; } } } void Executor::printDebugInstructions(ExecutionState &state) { // print nothing if option unset if (DebugPrintInstructions.getBits() == 0) return; // set output stream (stderr/file) llvm::raw_ostream *stream = nullptr; if (DebugPrintInstructions.isSet(STDERR_ALL) || DebugPrintInstructions.isSet(STDERR_SRC) || DebugPrintInstructions.isSet(STDERR_COMPACT)) stream = &llvm::errs(); else stream = &debugLogBuffer; // print: // [all] src location:asm line:state ID:instruction // [compact] asm line:state ID // [src] src location:asm line:state ID if (!DebugPrintInstructions.isSet(STDERR_COMPACT) && !DebugPrintInstructions.isSet(FILE_COMPACT)) { (*stream) << " " << state.pc->getSourceLocation() << ':'; } (*stream) << state.pc->info->assemblyLine << ':' << state.getID(); if (DebugPrintInstructions.isSet(STDERR_ALL) || DebugPrintInstructions.isSet(FILE_ALL)) (*stream) << ':' << *(state.pc->inst); (*stream) << '\n'; // flush to file if (DebugPrintInstructions.isSet(FILE_ALL) || DebugPrintInstructions.isSet(FILE_COMPACT) || DebugPrintInstructions.isSet(FILE_SRC)) { debugLogBuffer.flush(); (*debugInstFile) << debugLogBuffer.str(); debugBufferString = ""; } } void Executor::stepInstruction(ExecutionState &state) { printDebugInstructions(state); if (statsTracker) statsTracker->stepInstruction(state); ++stats::instructions; ++state.steppedInstructions; state.prevPC = state.pc; ++state.pc; if (stats::instructions == MaxInstructions) haltExecution = true; } static inline const llvm::fltSemantics *fpWidthToSemantics(unsigned width) { switch (width) { case Expr::Int32: return &llvm::APFloat::IEEEsingle(); case Expr::Int64: return &llvm::APFloat::IEEEdouble(); case Expr::Fl80: return &llvm::APFloat::x87DoubleExtended(); default: return 0; } } MemoryObject *Executor::serializeLandingpad(ExecutionState &state, const llvm::LandingPadInst &lpi, bool &stateTerminated) { stateTerminated = false; std::vector serialized; for (unsigned current_clause_id = 0; current_clause_id < lpi.getNumClauses(); ++current_clause_id) { if (lpi.isCatch(current_clause_id)) { // catch-clause serialized.push_back(0); std::uint64_t ti_addr = 0; llvm::Constant *catchClause = lpi.getClause(current_clause_id); llvm::Constant *typeInfo = catchClause->stripPointerCasts(); if (auto *gv = dyn_cast(typeInfo)) { ti_addr = globalAddresses[gv]->getZExtValue(); } else if (auto *clause_bitcast = dyn_cast(catchClause)) { auto *clause_type = dyn_cast(clause_bitcast->getOperand(0)); ti_addr = globalAddresses[clause_type]->getZExtValue(); } else if (catchClause->isNullValue()) { ti_addr = 0; } else { terminateStateOnExecError( state, "Internal: Clause is not a bitcast or null (catch-all)"); stateTerminated = true; return nullptr; } const std::size_t old_size = serialized.size(); serialized.resize(old_size + 8); memcpy(serialized.data() + old_size, &ti_addr, sizeof(ti_addr)); } else if (lpi.isFilter(current_clause_id)) { llvm::Constant *filter_clause = lpi.getClause(current_clause_id); if (filter_clause->isNullValue()) { // special handling for a catch-all filter clause, i.e., "[0 x i8*]" // for this case we serialize 1 element.. serialized.push_back(1); // which is a 64bit-wide 0. serialized.resize(serialized.size() + 8, 0); } else { const auto *ca = cast(filter_clause); // serialize `num_elements+1` as unsigned char unsigned const num_elements = ca->getNumOperands(); unsigned char serialized_num_elements = 0; if (num_elements >= std::numeric_limits::max()) { terminateStateOnExecError( state, "Internal: too many elements in landingpad filter"); stateTerminated = true; return nullptr; } serialized_num_elements = num_elements; serialized.push_back(serialized_num_elements + 1); // serialize the exception-types occurring in this filter-clause for (llvm::Value const *v : ca->operands()) { llvm::GlobalValue const *clause_value = nullptr; if (auto const *bitcast = dyn_cast(v)) { clause_value = dyn_cast(bitcast->getOperand(0)); } if (auto const *gv = dyn_cast(v)) { clause_value = gv; } if (!clause_value) { terminateStateOnExecError(state, "Internal: expected value inside a " "filter-clause bitcast to be a GlobalValue"); stateTerminated = true; return nullptr; } std::uint64_t const ti_addr = globalAddresses[clause_value]->getZExtValue(); const std::size_t old_size = serialized.size(); serialized.resize(old_size + 8); memcpy(serialized.data() + old_size, &ti_addr, sizeof(ti_addr)); } } } } MemoryObject *mo = memory->allocate(serialized.size(), true, false, &state, nullptr, 1); ObjectState *os = bindObjectInState(state, mo, false); for (unsigned i = 0; i < serialized.size(); i++) { os->write8(i, serialized[i]); } return mo; } void Executor::unwindToNextLandingpad(ExecutionState &state) { UnwindingInformation *ui = state.unwindingInformation.get(); assert(ui && "unwinding without unwinding information"); std::size_t startIndex; std::size_t lowestStackIndex; bool popFrames; if (auto *sui = dyn_cast(ui)) { startIndex = sui->unwindingProgress; lowestStackIndex = 0; popFrames = false; } else if (auto *cui = dyn_cast(ui)) { startIndex = state.stack.size() - 1; lowestStackIndex = cui->catchingStackIndex; popFrames = true; } else { assert(false && "invalid UnwindingInformation subclass"); } for (std::size_t i = startIndex; i > lowestStackIndex; i--) { auto const &sf = state.stack.at(i); Instruction *inst = sf.caller ? sf.caller->inst : nullptr; if (popFrames) { state.popFrame(); if (statsTracker != nullptr) { statsTracker->framePopped(state); } } if (InvokeInst *invoke = dyn_cast(inst)) { // we found the next invoke instruction in the call stack, handle it // depending on the current phase. if (auto *sui = dyn_cast(ui)) { // in the search phase, run personality function to check if this // landingpad catches the exception LandingPadInst *lpi = invoke->getUnwindDest()->getLandingPadInst(); assert(lpi && "unwind target of an invoke instruction did not lead to " "a landingpad"); // check if this is a pure cleanup landingpad first if (lpi->isCleanup() && lpi->getNumClauses() == 0) { // pure cleanup lpi, this can't be a handler, so skip it continue; } bool stateTerminated = false; MemoryObject *clauses_mo = serializeLandingpad(state, *lpi, stateTerminated); assert((stateTerminated != bool(clauses_mo)) && "illegal serializeLandingpad result"); if (stateTerminated) { return; } assert(sui->serializedLandingpad == nullptr && "serializedLandingpad should be reset"); sui->serializedLandingpad = clauses_mo; llvm::Function *personality_fn = kmodule->module->getFunction("_klee_eh_cxx_personality"); KFunction *kf = kmodule->functionMap[personality_fn]; state.pushFrame(state.prevPC, kf); state.pc = kf->instructions; bindArgument(kf, 0, state, sui->exceptionObject); bindArgument(kf, 1, state, clauses_mo->getSizeExpr()); bindArgument(kf, 2, state, clauses_mo->getBaseExpr()); if (statsTracker) { statsTracker->framePushed(state, &state.stack[state.stack.size() - 2]); } // make sure we remember our search progress afterwards sui->unwindingProgress = i - 1; } else { // in the cleanup phase, redirect control flow transferToBasicBlock(invoke->getUnwindDest(), invoke->getParent(), state); } // we are done, stop search/unwinding here return; } } // no further invoke instruction/landingpad found if (isa(ui)) { // in phase 1, simply stop unwinding. this will return // control flow back to _Unwind_RaiseException, which will // return the correct error. // clean up unwinding state state.unwindingInformation.reset(); } else { // in phase 2, this represent a situation that should // not happen, as we only progressed to phase 2 because // we found a handler in phase 1. // therefore terminate the state. terminateStateOnExecError(state, "Missing landingpad in phase 2 of unwinding"); } } ref Executor::getEhTypeidFor(ref type_info) { // FIXME: Handling getEhTypeidFor is non-deterministic and depends on the // order states have been processed and executed. auto eh_type_iterator = std::find(std::begin(eh_typeids), std::end(eh_typeids), type_info); if (eh_type_iterator == std::end(eh_typeids)) { eh_typeids.push_back(type_info); eh_type_iterator = std::prev(std::end(eh_typeids)); } // +1 because typeids must always be positive, so they can be distinguished // from 'no landingpad clause matched' which has value 0 auto res = ConstantExpr::create(eh_type_iterator - std::begin(eh_typeids) + 1, Expr::Int32); return res; } void Executor::executeCall(ExecutionState &state, KInstruction *ki, Function *f, std::vector> &arguments) { Instruction *i = ki->inst; if (isa_and_nonnull(i)) return; if (f && f->isDeclaration()) { switch (f->getIntrinsicID()) { case Intrinsic::not_intrinsic: { // state may be destroyed by this call, cannot touch callExternalFunction(state, ki, kmodule->functionMap[f], arguments); break; } case Intrinsic::fabs: { ref arg = toConstant(state, arguments[0], "floating point"); if (!fpWidthToSemantics(arg->getWidth())) return terminateStateOnExecError( state, "Unsupported intrinsic llvm.fabs call"); llvm::APFloat Res(*fpWidthToSemantics(arg->getWidth()), arg->getAPValue()); Res = llvm::abs(Res); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Intrinsic::fma: case Intrinsic::fmuladd: { // Both fma and fmuladd support float, double and fp80. Note, that fp80 // is not mentioned in the documentation of fmuladd, nevertheless, it is // still supported. For details see // https://github.com/klee/klee/pull/1507/files#r894993332 if (isa(i->getOperand(0)->getType())) return terminateStateOnExecError( state, f->getName() + " with vectors is not supported"); ref op1 = toConstant(state, eval(ki, 1, state).value, "floating point"); ref op2 = toConstant(state, eval(ki, 2, state).value, "floating point"); ref op3 = toConstant(state, eval(ki, 3, state).value, "floating point"); if (!fpWidthToSemantics(op1->getWidth()) || !fpWidthToSemantics(op2->getWidth()) || !fpWidthToSemantics(op3->getWidth())) return terminateStateOnExecError( state, "Unsupported " + f->getName() + " call"); // (op1 * op2) + op3 APFloat Res(*fpWidthToSemantics(op1->getWidth()), op1->getAPValue()); Res.fusedMultiplyAdd( APFloat(*fpWidthToSemantics(op2->getWidth()), op2->getAPValue()), APFloat(*fpWidthToSemantics(op3->getWidth()), op3->getAPValue()), APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } #if LLVM_VERSION_CODE >= LLVM_VERSION(12, 0) case Intrinsic::abs: { if (isa(i->getOperand(0)->getType())) return terminateStateOnExecError( state, "llvm.abs with vectors is not supported"); ref op = eval(ki, 1, state).value; ref poison = eval(ki, 2, state).value; assert(poison->getWidth() == 1 && "Second argument is not an i1"); unsigned bw = op->getWidth(); uint64_t moneVal = APInt(bw, -1, true).getZExtValue(); uint64_t sminVal = APInt::getSignedMinValue(bw).getZExtValue(); ref zero = ConstantExpr::create(0, bw); ref mone = ConstantExpr::create(moneVal, bw); ref smin = ConstantExpr::create(sminVal, bw); if (poison->isTrue()) { ref issmin = EqExpr::create(op, smin); if (issmin->isTrue()) return terminateStateOnExecError( state, "llvm.abs called with poison and INT_MIN"); } // conditions to flip the sign: INT_MIN < op < 0 ref negative = SltExpr::create(op, zero); ref notsmin = NeExpr::create(op, smin); ref cond = AndExpr::create(negative, notsmin); // flip and select the result ref flip = MulExpr::create(op, mone); ref result = SelectExpr::create(cond, flip, op); bindLocal(ki, state, result); break; } case Intrinsic::smax: case Intrinsic::smin: case Intrinsic::umax: case Intrinsic::umin: { if (isa(i->getOperand(0)->getType()) || isa(i->getOperand(1)->getType())) return terminateStateOnExecError( state, "llvm.{s,u}{max,min} with vectors is not supported"); ref op1 = eval(ki, 1, state).value; ref op2 = eval(ki, 2, state).value; ref cond = nullptr; if (f->getIntrinsicID() == Intrinsic::smax) cond = SgtExpr::create(op1, op2); else if (f->getIntrinsicID() == Intrinsic::smin) cond = SltExpr::create(op1, op2); else if (f->getIntrinsicID() == Intrinsic::umax) cond = UgtExpr::create(op1, op2); else // (f->getIntrinsicID() == Intrinsic::umin) cond = UltExpr::create(op1, op2); ref result = SelectExpr::create(cond, op1, op2); bindLocal(ki, state, result); break; } #endif case Intrinsic::fshr: case Intrinsic::fshl: { ref op1 = eval(ki, 1, state).value; ref op2 = eval(ki, 2, state).value; ref op3 = eval(ki, 3, state).value; unsigned w = op1->getWidth(); assert(w == op2->getWidth() && "type mismatch"); assert(w == op3->getWidth() && "type mismatch"); ref c = ConcatExpr::create(op1, op2); // op3 = zeroExtend(op3 % w) op3 = URemExpr::create(op3, ConstantExpr::create(w, w)); op3 = ZExtExpr::create(op3, w+w); if (f->getIntrinsicID() == Intrinsic::fshl) { // shift left and take top half ref s = ShlExpr::create(c, op3); bindLocal(ki, state, ExtractExpr::create(s, w, w)); } else { // shift right and take bottom half // note that LShr and AShr will have same behaviour ref s = LShrExpr::create(c, op3); bindLocal(ki, state, ExtractExpr::create(s, 0, w)); } break; } // va_arg is handled by caller and intrinsic lowering, see comment for // ExecutionState::varargs case Intrinsic::vastart: { StackFrame &sf = state.stack.back(); // varargs can be zero if no varargs were provided if (!sf.varargs) return; // FIXME: This is really specific to the architecture, not the pointer // size. This happens to work for x86-32 and x86-64, however. Expr::Width WordSize = Context::get().getPointerWidth(); if (WordSize == Expr::Int32) { executeMemoryOperation(state, true, arguments[0], sf.varargs->getBaseExpr(), 0); } else { assert(WordSize == Expr::Int64 && "Unknown word size!"); // x86-64 has quite complicated calling convention. However, // instead of implementing it, we can do a simple hack: just // make a function believe that all varargs are on stack. executeMemoryOperation( state, true, arguments[0], ConstantExpr::create(48, 32), 0); // gp_offset executeMemoryOperation( state, true, AddExpr::create(arguments[0], ConstantExpr::create(4, 64)), ConstantExpr::create(304, 32), 0); // fp_offset executeMemoryOperation( state, true, AddExpr::create(arguments[0], ConstantExpr::create(8, 64)), sf.varargs->getBaseExpr(), 0); // overflow_arg_area executeMemoryOperation( state, true, AddExpr::create(arguments[0], ConstantExpr::create(16, 64)), ConstantExpr::create(0, 64), 0); // reg_save_area } break; } #ifdef SUPPORT_KLEE_EH_CXX case Intrinsic::eh_typeid_for: { bindLocal(ki, state, getEhTypeidFor(arguments.at(0))); break; } #endif case Intrinsic::vaend: // va_end is a noop for the interpreter. // // FIXME: We should validate that the target didn't do something bad // with va_end, however (like call it twice). break; case Intrinsic::vacopy: // va_copy should have been lowered. // // FIXME: It would be nice to check for errors in the usage of this as // well. default: klee_warning("unimplemented intrinsic: %s", f->getName().data()); terminateStateOnExecError(state, "unimplemented intrinsic"); return; } if (InvokeInst *ii = dyn_cast(i)) { transferToBasicBlock(ii->getNormalDest(), i->getParent(), state); } } else { // Check if maximum stack size was reached. // We currently only count the number of stack frames if (RuntimeMaxStackFrames && state.stack.size() > RuntimeMaxStackFrames) { terminateStateEarly(state, "Maximum stack size reached.", StateTerminationType::OutOfStackMemory); klee_warning("Maximum stack size reached."); return; } // FIXME: I'm not really happy about this reliance on prevPC but it is ok, I // guess. This just done to avoid having to pass KInstIterator everywhere // instead of the actual instruction, since we can't make a KInstIterator // from just an instruction (unlike LLVM). KFunction *kf = kmodule->functionMap[f]; state.pushFrame(state.prevPC, kf); state.pc = kf->instructions; if (statsTracker) statsTracker->framePushed(state, &state.stack[state.stack.size() - 2]); // TODO: support zeroext, signext, sret attributes unsigned callingArgs = arguments.size(); unsigned funcArgs = f->arg_size(); if (!f->isVarArg()) { if (callingArgs > funcArgs) { klee_warning_once(f, "calling %s with extra arguments.", f->getName().data()); } else if (callingArgs < funcArgs) { terminateStateOnUserError(state, "calling function with too few arguments"); return; } } else { if (callingArgs < funcArgs) { terminateStateOnUserError(state, "calling function with too few arguments"); return; } // Only x86-32 and x86-64 are supported Expr::Width WordSize = Context::get().getPointerWidth(); assert(((WordSize == Expr::Int32) || (WordSize == Expr::Int64)) && "Unknown word size!"); uint64_t size = 0; // total size of variadic arguments bool requires16ByteAlignment = false; uint64_t offsets[callingArgs]; // offsets of variadic arguments uint64_t argWidth; // width of current variadic argument const CallBase &cb = cast(*i); for (unsigned k = funcArgs; k < callingArgs; k++) { if (cb.isByValArgument(k)) { Type *t = cb.getParamByValType(k); argWidth = kmodule->targetData->getTypeSizeInBits(t); } else { argWidth = arguments[k]->getWidth(); } #if LLVM_VERSION_CODE >= LLVM_VERSION(11, 0) MaybeAlign ma = cb.getParamAlign(k); unsigned alignment = ma ? ma->value() : 0; #else unsigned alignment = cb.getParamAlignment(k); #endif if (WordSize == Expr::Int32 && !alignment) alignment = 4; else { // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a // 16 byte boundary if alignment needed by type exceeds 8 byte // boundary. if (!alignment && argWidth > Expr::Int64) { alignment = 16; requires16ByteAlignment = true; } if (!alignment) alignment = 8; } size = llvm::alignTo(size, alignment); offsets[k] = size; if (WordSize == Expr::Int32) size += Expr::getMinBytesForWidth(argWidth); else { // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: // l->overflow_arg_area + sizeof(type) // Step 10. Align l->overflow_arg_area upwards to an 8 byte boundary. size += llvm::alignTo(argWidth, WordSize) / 8; } } StackFrame &sf = state.stack.back(); MemoryObject *mo = sf.varargs = memory->allocate(size, true, false, &state, state.prevPC->inst, (requires16ByteAlignment ? 16 : 8)); if (!mo && size) { terminateStateOnExecError(state, "out of memory (varargs)"); return; } if (mo) { if ((WordSize == Expr::Int64) && (mo->address & 15) && requires16ByteAlignment) { // Both 64bit Linux/Glibc and 64bit MacOSX should align to 16 bytes. klee_warning_once( 0, "While allocating varargs: malloc did not align to 16 bytes."); } ObjectState *os = bindObjectInState(state, mo, true); for (unsigned k = funcArgs; k < callingArgs; k++) { if (!cb.isByValArgument(k)) { os->write(offsets[k], arguments[k]); } else { ConstantExpr *CE = dyn_cast(arguments[k]); assert(CE); // byval argument needs to be a concrete pointer ObjectPair op; state.addressSpace.resolveOne(CE, op); const ObjectState *osarg = op.second; assert(osarg); for (unsigned i = 0; i < osarg->size; i++) os->write(offsets[k] + i, osarg->read8(i)); } } } } unsigned numFormals = f->arg_size(); for (unsigned k = 0; k < numFormals; k++) bindArgument(kf, k, state, arguments[k]); } } void Executor::transferToBasicBlock(BasicBlock *dst, BasicBlock *src, ExecutionState &state) { // Note that in general phi nodes can reuse phi values from the same // block but the incoming value is the eval() result *before* the // execution of any phi nodes. this is pathological and doesn't // really seem to occur, but just in case we run the PhiCleanerPass // which makes sure this cannot happen and so it is safe to just // eval things in order. The PhiCleanerPass also makes sure that all // incoming blocks have the same order for each PHINode so we only // have to compute the index once. // // With that done we simply set an index in the state so that PHI // instructions know which argument to eval, set the pc, and continue. // XXX this lookup has to go ? KFunction *kf = state.stack.back().kf; unsigned entry = kf->basicBlockEntry[dst]; state.pc = &kf->instructions[entry]; if (state.pc->inst->getOpcode() == Instruction::PHI) { PHINode *first = static_cast(state.pc->inst); state.incomingBBIndex = first->getBasicBlockIndex(src); } } /// Compute the true target of a function call, resolving LLVM aliases /// and bitcasts. Function *Executor::getTargetFunction(Value *calledVal) { SmallPtrSet Visited; Constant *c = dyn_cast(calledVal); if (!c) return 0; while (true) { if (GlobalValue *gv = dyn_cast(c)) { if (!Visited.insert(gv).second) return 0; if (Function *f = dyn_cast(gv)) return f; else if (GlobalAlias *ga = dyn_cast(gv)) c = ga->getAliasee(); else return 0; } else if (llvm::ConstantExpr *ce = dyn_cast(c)) { if (ce->getOpcode()==Instruction::BitCast) c = ce->getOperand(0); else return 0; } else return 0; } } void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { Instruction *i = ki->inst; switch (i->getOpcode()) { // Control flow case Instruction::Ret: { ReturnInst *ri = cast(i); KInstIterator kcaller = state.stack.back().caller; Instruction *caller = kcaller ? kcaller->inst : nullptr; bool isVoidReturn = (ri->getNumOperands() == 0); ref result = ConstantExpr::alloc(0, Expr::Bool); if (!isVoidReturn) { result = eval(ki, 0, state).value; } if (state.stack.size() <= 1) { assert(!caller && "caller set on initial stack frame"); terminateStateOnExit(state); } else { state.popFrame(); if (statsTracker) statsTracker->framePopped(state); if (InvokeInst *ii = dyn_cast(caller)) { transferToBasicBlock(ii->getNormalDest(), caller->getParent(), state); } else { state.pc = kcaller; ++state.pc; } #ifdef SUPPORT_KLEE_EH_CXX if (ri->getFunction()->getName() == "_klee_eh_cxx_personality") { assert(dyn_cast(result) && "result from personality fn must be a concrete value"); auto *sui = dyn_cast_or_null( state.unwindingInformation.get()); assert(sui && "return from personality function outside of " "search phase unwinding"); // unbind the MO we used to pass the serialized landingpad state.addressSpace.unbindObject(sui->serializedLandingpad); sui->serializedLandingpad = nullptr; if (result->isZero()) { // this lpi doesn't handle the exception, continue the search unwindToNextLandingpad(state); } else { // a clause (or a catch-all clause or filter clause) matches: // remember the stack index and switch to clean-up phase state.unwindingInformation = std::make_unique( sui->exceptionObject, cast(result), sui->unwindingProgress); // this pointer is now invalidated sui = nullptr; // continue the unwinding process (which will now start with the // cleanup phase) unwindToNextLandingpad(state); } // never return normally from the personality fn break; } #endif // SUPPORT_KLEE_EH_CXX if (!isVoidReturn) { Type *t = caller->getType(); if (t != Type::getVoidTy(i->getContext())) { // may need to do coercion due to bitcasts Expr::Width from = result->getWidth(); Expr::Width to = getWidthForLLVMType(t); if (from != to) { const CallBase &cb = cast(*caller); // XXX need to check other param attrs ? bool isSExt = cb.hasRetAttr(llvm::Attribute::SExt); if (isSExt) { result = SExtExpr::create(result, to); } else { result = ZExtExpr::create(result, to); } } bindLocal(kcaller, state, result); } } else { // We check that the return value has no users instead of // checking the type, since C defaults to returning int for // undeclared functions. if (!caller->use_empty()) { terminateStateOnExecError(state, "return void when caller expected a result"); } } } break; } case Instruction::Br: { BranchInst *bi = cast(i); if (bi->isUnconditional()) { transferToBasicBlock(bi->getSuccessor(0), bi->getParent(), state); } else { // FIXME: Find a way that we don't have this hidden dependency. assert(bi->getCondition() == bi->getOperand(0) && "Wrong operand index!"); ref cond = eval(ki, 0, state).value; cond = optimizer.optimizeExpr(cond, false); Executor::StatePair branches = fork(state, cond, false, BranchType::Conditional); // NOTE: There is a hidden dependency here, markBranchVisited // requires that we still be in the context of the branch // instruction (it reuses its statistic id). Should be cleaned // up with convenient instruction specific data. if (statsTracker && state.stack.back().kf->trackCoverage) statsTracker->markBranchVisited(branches.first, branches.second); if (branches.first) transferToBasicBlock(bi->getSuccessor(0), bi->getParent(), *branches.first); if (branches.second) transferToBasicBlock(bi->getSuccessor(1), bi->getParent(), *branches.second); } break; } case Instruction::IndirectBr: { // implements indirect branch to a label within the current function const auto bi = cast(i); auto address = eval(ki, 0, state).value; address = toUnique(state, address); // concrete address if (const auto CE = dyn_cast(address.get())) { const auto bb_address = (BasicBlock *) CE->getZExtValue(Context::get().getPointerWidth()); transferToBasicBlock(bb_address, bi->getParent(), state); break; } // symbolic address const auto numDestinations = bi->getNumDestinations(); std::vector targets; targets.reserve(numDestinations); std::vector> expressions; expressions.reserve(numDestinations); ref errorCase = ConstantExpr::alloc(1, Expr::Bool); SmallPtrSet destinations; // collect and check destinations from label list for (unsigned k = 0; k < numDestinations; ++k) { // filter duplicates const auto d = bi->getDestination(k); if (destinations.count(d)) continue; destinations.insert(d); // create address expression const auto PE = Expr::createPointer(reinterpret_cast(d)); ref e = EqExpr::create(address, PE); // exclude address from errorCase errorCase = AndExpr::create(errorCase, Expr::createIsZero(e)); // check feasibility bool result; bool success __attribute__((unused)) = solver->mayBeTrue(state.constraints, e, result, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); if (result) { targets.push_back(d); expressions.push_back(e); } } // check errorCase feasibility bool result; bool success __attribute__((unused)) = solver->mayBeTrue( state.constraints, errorCase, result, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); if (result) { expressions.push_back(errorCase); } // fork states std::vector branches; branch(state, expressions, branches, BranchType::Indirect); // terminate error state if (result) { terminateStateOnExecError(*branches.back(), "indirectbr: illegal label address"); branches.pop_back(); } // branch states to resp. target blocks assert(targets.size() == branches.size()); for (std::vector::size_type k = 0; k < branches.size(); ++k) { if (branches[k]) { transferToBasicBlock(targets[k], bi->getParent(), *branches[k]); } } break; } case Instruction::Switch: { SwitchInst *si = cast(i); ref cond = eval(ki, 0, state).value; BasicBlock *bb = si->getParent(); cond = toUnique(state, cond); if (ConstantExpr *CE = dyn_cast(cond)) { // Somewhat gross to create these all the time, but fine till we // switch to an internal rep. llvm::IntegerType *Ty = cast(si->getCondition()->getType()); ConstantInt *ci = ConstantInt::get(Ty, CE->getZExtValue()); unsigned index = si->findCaseValue(ci)->getSuccessorIndex(); transferToBasicBlock(si->getSuccessor(index), si->getParent(), state); } else { // Handle possible different branch targets // We have the following assumptions: // - each case value is mutual exclusive to all other values // - order of case branches is based on the order of the expressions of // the case values, still default is handled last std::vector bbOrder; std::map > branchTargets; std::map, BasicBlock *> expressionOrder; // Iterate through all non-default cases and order them by expressions for (auto i : si->cases()) { ref value = evalConstant(i.getCaseValue()); BasicBlock *caseSuccessor = i.getCaseSuccessor(); expressionOrder.insert(std::make_pair(value, caseSuccessor)); } // Track default branch values ref defaultValue = ConstantExpr::alloc(1, Expr::Bool); // iterate through all non-default cases but in order of the expressions for (std::map, BasicBlock *>::iterator it = expressionOrder.begin(), itE = expressionOrder.end(); it != itE; ++it) { ref match = EqExpr::create(cond, it->first); // skip if case has same successor basic block as default case // (should work even with phi nodes as a switch is a single terminating instruction) if (it->second == si->getDefaultDest()) continue; // Make sure that the default value does not contain this target's value defaultValue = AndExpr::create(defaultValue, Expr::createIsZero(match)); // Check if control flow could take this case bool result; match = optimizer.optimizeExpr(match, false); bool success = solver->mayBeTrue(state.constraints, match, result, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (result) { BasicBlock *caseSuccessor = it->second; // Handle the case that a basic block might be the target of multiple // switch cases. // Currently we generate an expression containing all switch-case // values for the same target basic block. We spare us forking too // many times but we generate more complex condition expressions // TODO Add option to allow to choose between those behaviors std::pair >::iterator, bool> res = branchTargets.insert(std::make_pair( caseSuccessor, ConstantExpr::alloc(0, Expr::Bool))); res.first->second = OrExpr::create(match, res.first->second); // Only add basic blocks which have not been target of a branch yet if (res.second) { bbOrder.push_back(caseSuccessor); } } } // Check if control could take the default case defaultValue = optimizer.optimizeExpr(defaultValue, false); bool res; bool success = solver->mayBeTrue(state.constraints, defaultValue, res, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (res) { std::pair >::iterator, bool> ret = branchTargets.insert( std::make_pair(si->getDefaultDest(), defaultValue)); if (ret.second) { bbOrder.push_back(si->getDefaultDest()); } } // Fork the current state with each state having one of the possible // successors of this switch std::vector< ref > conditions; for (std::vector::iterator it = bbOrder.begin(), ie = bbOrder.end(); it != ie; ++it) { conditions.push_back(branchTargets[*it]); } std::vector branches; branch(state, conditions, branches, BranchType::Switch); std::vector::iterator bit = branches.begin(); for (std::vector::iterator it = bbOrder.begin(), ie = bbOrder.end(); it != ie; ++it) { ExecutionState *es = *bit; if (es) transferToBasicBlock(*it, bb, *es); ++bit; } } break; } case Instruction::Unreachable: // Note that this is not necessarily an internal bug, llvm will // generate unreachable instructions in cases where it knows the // program will crash. So it is effectively a SEGV or internal // error. terminateStateOnExecError(state, "reached \"unreachable\" instruction"); break; case Instruction::Invoke: case Instruction::Call: { // Ignore debug intrinsic calls if (isa(i)) break; const CallBase &cb = cast(*i); Value *fp = cb.getCalledOperand(); unsigned numArgs = cb.arg_size(); Function *f = getTargetFunction(fp); // evaluate arguments std::vector< ref > arguments; arguments.reserve(numArgs); for (unsigned j=0; j(fp)) { //TODO: move to `executeCall` if (ExternalCalls != ExternalCallPolicy::None) { KInlineAsm callable(asmValue); callExternalFunction(state, ki, &callable, arguments); } else { terminateStateOnExecError(state, "external calls disallowed (in particular inline asm)"); } break; } if (f) { const FunctionType *fType = f->getFunctionType(); #if LLVM_VERSION_MAJOR >= 15 const FunctionType *fpType = cb.getFunctionType(); #else const FunctionType *fpType = dyn_cast(fp->getType()->getPointerElementType()); #endif // special case the call with a bitcast case if (fType != fpType) { assert(fType && fpType && "unable to get function type"); // XXX check result coercion // XXX this really needs thought and validation unsigned i=0; for (std::vector< ref >::iterator ai = arguments.begin(), ie = arguments.end(); ai != ie; ++ai) { Expr::Width to, from = (*ai)->getWidth(); if (igetNumParams()) { to = getWidthForLLVMType(fType->getParamType(i)); if (from != to) { // XXX need to check other param attrs ? bool isSExt = cb.paramHasAttr(i, llvm::Attribute::SExt); if (isSExt) { arguments[i] = SExtExpr::create(arguments[i], to); } else { arguments[i] = ZExtExpr::create(arguments[i], to); } } } i++; } } executeCall(state, ki, f, arguments); } else { ref v = eval(ki, 0, state).value; ExecutionState *free = &state; bool hasInvalid = false, first = true; /* XXX This is wasteful, no need to do a full evaluate since we have already got a value. But in the end the caches should handle it for us, albeit with some overhead. */ do { v = optimizer.optimizeExpr(v, true); ref value; bool success = solver->getValue(free->constraints, v, value, free->queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; StatePair res = fork(*free, EqExpr::create(v, value), true, BranchType::Call); if (res.first) { uint64_t addr = value->getZExtValue(); auto it = legalFunctions.find(addr); if (it != legalFunctions.end()) { f = it->second; // Don't give warning on unique resolution if (res.second || !first) klee_warning_once(reinterpret_cast(addr), "resolved symbolic function pointer to: %s", f->getName().data()); executeCall(*res.first, ki, f, arguments); } else { if (!hasInvalid) { terminateStateOnExecError(state, "invalid function pointer"); hasInvalid = true; } } } first = false; free = res.second; } while (free); } break; } case Instruction::PHI: { ref result = eval(ki, state.incomingBBIndex, state).value; bindLocal(ki, state, result); break; } // Special instructions case Instruction::Select: { // NOTE: It is not required that operands 1 and 2 be of scalar type. ref cond = eval(ki, 0, state).value; ref tExpr = eval(ki, 1, state).value; ref fExpr = eval(ki, 2, state).value; ref result = SelectExpr::create(cond, tExpr, fExpr); bindLocal(ki, state, result); break; } case Instruction::VAArg: terminateStateOnExecError(state, "unexpected VAArg instruction"); break; // Arithmetic / logical case Instruction::Add: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; bindLocal(ki, state, AddExpr::create(left, right)); break; } case Instruction::Sub: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; bindLocal(ki, state, SubExpr::create(left, right)); break; } case Instruction::Mul: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; bindLocal(ki, state, MulExpr::create(left, right)); break; } case Instruction::UDiv: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = UDivExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::SDiv: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SDivExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::URem: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = URemExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::SRem: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SRemExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::And: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = AndExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::Or: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = OrExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::Xor: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = XorExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::Shl: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = ShlExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::LShr: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = LShrExpr::create(left, right); bindLocal(ki, state, result); break; } case Instruction::AShr: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = AShrExpr::create(left, right); bindLocal(ki, state, result); break; } // Compare case Instruction::ICmp: { CmpInst *ci = cast(i); ICmpInst *ii = cast(ci); switch(ii->getPredicate()) { case ICmpInst::ICMP_EQ: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = EqExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_NE: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = NeExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_UGT: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = UgtExpr::create(left, right); bindLocal(ki, state,result); break; } case ICmpInst::ICMP_UGE: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = UgeExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_ULT: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = UltExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_ULE: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = UleExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_SGT: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SgtExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_SGE: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SgeExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_SLT: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SltExpr::create(left, right); bindLocal(ki, state, result); break; } case ICmpInst::ICMP_SLE: { ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SleExpr::create(left, right); bindLocal(ki, state, result); break; } default: terminateStateOnExecError(state, "invalid ICmp predicate"); } break; } // Memory instructions... case Instruction::Alloca: { AllocaInst *ai = cast(i); unsigned elementSize = kmodule->targetData->getTypeStoreSize(ai->getAllocatedType()); ref size = Expr::createPointer(elementSize); if (ai->isArrayAllocation()) { ref count = eval(ki, 0, state).value; count = Expr::createZExtToPointerWidth(count); size = MulExpr::create(size, count); } executeAlloc(state, size, true, ki); break; } case Instruction::Load: { ref base = eval(ki, 0, state).value; executeMemoryOperation(state, false, base, 0, ki); break; } case Instruction::Store: { ref base = eval(ki, 1, state).value; ref value = eval(ki, 0, state).value; executeMemoryOperation(state, true, base, value, 0); break; } case Instruction::GetElementPtr: { KGEPInstruction *kgepi = static_cast(ki); ref base = eval(ki, 0, state).value; ref original_base = base; for (std::vector< std::pair >::iterator it = kgepi->indices.begin(), ie = kgepi->indices.end(); it != ie; ++it) { uint64_t elementSize = it->second; ref index = eval(ki, it->first, state).value; base = AddExpr::create(base, MulExpr::create(Expr::createSExtToPointerWidth(index), Expr::createPointer(elementSize))); } if (kgepi->offset) base = AddExpr::create(base, Expr::createPointer(kgepi->offset)); if (SingleObjectResolution) { if (isa(original_base) && !isa(base)) { // the initial base address was a constant expression, the final is not: // store the mapping between constant address and the non-const // reference in the state ref c_orig_base = dyn_cast(original_base); ObjectPair op; if (state.addressSpace.resolveOne(c_orig_base, op)) { // store the address of the MemoryObject associated with this GEP // instruction state.base_mos[op.first->address].insert(base); ref r = ConstantExpr::alloc(op.first->address, Expr::Int64); state.base_addrs[base] = r; } else { // this case should not happen - we have a GEP instruction with const // base address, so we should be able to find an exact memory object // match klee_warning("Failed to find a memory object for address %" PRIx64, c_orig_base->getZExtValue()); } } else if (!isa(original_base)) { auto base_it = state.base_addrs.find(original_base); if (base_it != state.base_addrs.end()) { // we need to update the current entry with a new value uint64_t address = base_it->second->getZExtValue(); auto refs_it = state.base_mos[address].find(base_it->first); if (refs_it != state.base_mos[address].end()) { state.base_mos[address].erase(refs_it); } state.base_mos[address].insert(base); state.base_addrs[base] = base_it->second; state.base_addrs.erase(base_it->first); } } } bindLocal(ki, state, base); break; } // Conversion case Instruction::Trunc: { CastInst *ci = cast(i); ref result = ExtractExpr::create(eval(ki, 0, state).value, 0, getWidthForLLVMType(ci->getType())); bindLocal(ki, state, result); break; } case Instruction::ZExt: { CastInst *ci = cast(i); ref result = ZExtExpr::create(eval(ki, 0, state).value, getWidthForLLVMType(ci->getType())); bindLocal(ki, state, result); break; } case Instruction::SExt: { CastInst *ci = cast(i); ref result = SExtExpr::create(eval(ki, 0, state).value, getWidthForLLVMType(ci->getType())); bindLocal(ki, state, result); break; } case Instruction::IntToPtr: { CastInst *ci = cast(i); Expr::Width pType = getWidthForLLVMType(ci->getType()); ref arg = eval(ki, 0, state).value; bindLocal(ki, state, ZExtExpr::create(arg, pType)); break; } case Instruction::PtrToInt: { CastInst *ci = cast(i); Expr::Width iType = getWidthForLLVMType(ci->getType()); ref arg = eval(ki, 0, state).value; bindLocal(ki, state, ZExtExpr::create(arg, iType)); break; } case Instruction::BitCast: { ref result = eval(ki, 0, state).value; bindLocal(ki, state, result); break; } // Floating point instructions case Instruction::FNeg: { ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); if (!fpWidthToSemantics(arg->getWidth())) return terminateStateOnExecError(state, "Unsupported FNeg operation"); llvm::APFloat Res(*fpWidthToSemantics(arg->getWidth()), arg->getAPValue()); Res = llvm::neg(Res); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Instruction::FAdd: { ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, "floating point"); if (!fpWidthToSemantics(left->getWidth()) || !fpWidthToSemantics(right->getWidth())) return terminateStateOnExecError(state, "Unsupported FAdd operation"); llvm::APFloat Res(*fpWidthToSemantics(left->getWidth()), left->getAPValue()); Res.add(APFloat(*fpWidthToSemantics(right->getWidth()),right->getAPValue()), APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Instruction::FSub: { ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, "floating point"); if (!fpWidthToSemantics(left->getWidth()) || !fpWidthToSemantics(right->getWidth())) return terminateStateOnExecError(state, "Unsupported FSub operation"); llvm::APFloat Res(*fpWidthToSemantics(left->getWidth()), left->getAPValue()); Res.subtract(APFloat(*fpWidthToSemantics(right->getWidth()), right->getAPValue()), APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Instruction::FMul: { ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, "floating point"); if (!fpWidthToSemantics(left->getWidth()) || !fpWidthToSemantics(right->getWidth())) return terminateStateOnExecError(state, "Unsupported FMul operation"); llvm::APFloat Res(*fpWidthToSemantics(left->getWidth()), left->getAPValue()); Res.multiply(APFloat(*fpWidthToSemantics(right->getWidth()), right->getAPValue()), APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Instruction::FDiv: { ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, "floating point"); if (!fpWidthToSemantics(left->getWidth()) || !fpWidthToSemantics(right->getWidth())) return terminateStateOnExecError(state, "Unsupported FDiv operation"); llvm::APFloat Res(*fpWidthToSemantics(left->getWidth()), left->getAPValue()); Res.divide(APFloat(*fpWidthToSemantics(right->getWidth()), right->getAPValue()), APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Instruction::FRem: { ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, "floating point"); if (!fpWidthToSemantics(left->getWidth()) || !fpWidthToSemantics(right->getWidth())) return terminateStateOnExecError(state, "Unsupported FRem operation"); llvm::APFloat Res(*fpWidthToSemantics(left->getWidth()), left->getAPValue()); Res.mod( APFloat(*fpWidthToSemantics(right->getWidth()), right->getAPValue())); bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } case Instruction::FPTrunc: { FPTruncInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); if (!fpWidthToSemantics(arg->getWidth()) || resultType > arg->getWidth()) return terminateStateOnExecError(state, "Unsupported FPTrunc operation"); llvm::APFloat Res(*fpWidthToSemantics(arg->getWidth()), arg->getAPValue()); bool losesInfo = false; Res.convert(*fpWidthToSemantics(resultType), llvm::APFloat::rmNearestTiesToEven, &losesInfo); bindLocal(ki, state, ConstantExpr::alloc(Res)); break; } case Instruction::FPExt: { FPExtInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); if (!fpWidthToSemantics(arg->getWidth()) || arg->getWidth() > resultType) return terminateStateOnExecError(state, "Unsupported FPExt operation"); llvm::APFloat Res(*fpWidthToSemantics(arg->getWidth()), arg->getAPValue()); bool losesInfo = false; Res.convert(*fpWidthToSemantics(resultType), llvm::APFloat::rmNearestTiesToEven, &losesInfo); bindLocal(ki, state, ConstantExpr::alloc(Res)); break; } case Instruction::FPToUI: { FPToUIInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); if (!fpWidthToSemantics(arg->getWidth()) || resultType > 64) return terminateStateOnExecError(state, "Unsupported FPToUI operation"); llvm::APFloat Arg(*fpWidthToSemantics(arg->getWidth()), arg->getAPValue()); uint64_t value = 0; bool isExact = true; #if LLVM_VERSION_CODE >= LLVM_VERSION(16, 0) auto valueRef = llvm::MutableArrayRef(value); #else auto valueRef = makeMutableArrayRef(value); #endif Arg.convertToInteger(valueRef, resultType, false, llvm::APFloat::rmTowardZero, &isExact); bindLocal(ki, state, ConstantExpr::alloc(value, resultType)); break; } case Instruction::FPToSI: { FPToSIInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); if (!fpWidthToSemantics(arg->getWidth()) || resultType > 64) return terminateStateOnExecError(state, "Unsupported FPToSI operation"); llvm::APFloat Arg(*fpWidthToSemantics(arg->getWidth()), arg->getAPValue()); uint64_t value = 0; bool isExact = true; #if LLVM_VERSION_CODE >= LLVM_VERSION(16, 0) auto valueRef = llvm::MutableArrayRef(value); #else auto valueRef = makeMutableArrayRef(value); #endif Arg.convertToInteger(valueRef, resultType, true, llvm::APFloat::rmTowardZero, &isExact); bindLocal(ki, state, ConstantExpr::alloc(value, resultType)); break; } case Instruction::UIToFP: { UIToFPInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); const llvm::fltSemantics *semantics = fpWidthToSemantics(resultType); if (!semantics) return terminateStateOnExecError(state, "Unsupported UIToFP operation"); llvm::APFloat f(*semantics, 0); f.convertFromAPInt(arg->getAPValue(), false, llvm::APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(f)); break; } case Instruction::SIToFP: { SIToFPInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, "floating point"); const llvm::fltSemantics *semantics = fpWidthToSemantics(resultType); if (!semantics) return terminateStateOnExecError(state, "Unsupported SIToFP operation"); llvm::APFloat f(*semantics, 0); f.convertFromAPInt(arg->getAPValue(), true, llvm::APFloat::rmNearestTiesToEven); bindLocal(ki, state, ConstantExpr::alloc(f)); break; } case Instruction::FCmp: { FCmpInst *fi = cast(i); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, "floating point"); if (!fpWidthToSemantics(left->getWidth()) || !fpWidthToSemantics(right->getWidth())) return terminateStateOnExecError(state, "Unsupported FCmp operation"); APFloat LHS(*fpWidthToSemantics(left->getWidth()),left->getAPValue()); APFloat RHS(*fpWidthToSemantics(right->getWidth()),right->getAPValue()); APFloat::cmpResult CmpRes = LHS.compare(RHS); bool Result = false; switch( fi->getPredicate() ) { // Predicates which only care about whether or not the operands are NaNs. case FCmpInst::FCMP_ORD: Result = (CmpRes != APFloat::cmpUnordered); break; case FCmpInst::FCMP_UNO: Result = (CmpRes == APFloat::cmpUnordered); break; // Ordered comparisons return false if either operand is NaN. Unordered // comparisons return true if either operand is NaN. case FCmpInst::FCMP_UEQ: Result = (CmpRes == APFloat::cmpUnordered || CmpRes == APFloat::cmpEqual); break; case FCmpInst::FCMP_OEQ: Result = (CmpRes != APFloat::cmpUnordered && CmpRes == APFloat::cmpEqual); break; case FCmpInst::FCMP_UGT: Result = (CmpRes == APFloat::cmpUnordered || CmpRes == APFloat::cmpGreaterThan); break; case FCmpInst::FCMP_OGT: Result = (CmpRes != APFloat::cmpUnordered && CmpRes == APFloat::cmpGreaterThan); break; case FCmpInst::FCMP_UGE: Result = (CmpRes == APFloat::cmpUnordered || (CmpRes == APFloat::cmpGreaterThan || CmpRes == APFloat::cmpEqual)); break; case FCmpInst::FCMP_OGE: Result = (CmpRes != APFloat::cmpUnordered && (CmpRes == APFloat::cmpGreaterThan || CmpRes == APFloat::cmpEqual)); break; case FCmpInst::FCMP_ULT: Result = (CmpRes == APFloat::cmpUnordered || CmpRes == APFloat::cmpLessThan); break; case FCmpInst::FCMP_OLT: Result = (CmpRes != APFloat::cmpUnordered && CmpRes == APFloat::cmpLessThan); break; case FCmpInst::FCMP_ULE: Result = (CmpRes == APFloat::cmpUnordered || (CmpRes == APFloat::cmpLessThan || CmpRes == APFloat::cmpEqual)); break; case FCmpInst::FCMP_OLE: Result = (CmpRes != APFloat::cmpUnordered && (CmpRes == APFloat::cmpLessThan || CmpRes == APFloat::cmpEqual)); break; case FCmpInst::FCMP_UNE: Result = (CmpRes == APFloat::cmpUnordered || CmpRes != APFloat::cmpEqual); break; case FCmpInst::FCMP_ONE: Result = (CmpRes != APFloat::cmpUnordered && CmpRes != APFloat::cmpEqual); break; default: assert(0 && "Invalid FCMP predicate!"); break; case FCmpInst::FCMP_FALSE: Result = false; break; case FCmpInst::FCMP_TRUE: Result = true; break; } bindLocal(ki, state, ConstantExpr::alloc(Result, Expr::Bool)); break; } case Instruction::InsertValue: { KGEPInstruction *kgepi = static_cast(ki); ref agg = eval(ki, 0, state).value; ref val = eval(ki, 1, state).value; ref l = NULL, r = NULL; unsigned lOffset = kgepi->offset*8, rOffset = kgepi->offset*8 + val->getWidth(); if (lOffset > 0) l = ExtractExpr::create(agg, 0, lOffset); if (rOffset < agg->getWidth()) r = ExtractExpr::create(agg, rOffset, agg->getWidth() - rOffset); ref result; if (l && r) result = ConcatExpr::create(r, ConcatExpr::create(val, l)); else if (l) result = ConcatExpr::create(val, l); else if (r) result = ConcatExpr::create(r, val); else result = val; bindLocal(ki, state, result); break; } case Instruction::ExtractValue: { KGEPInstruction *kgepi = static_cast(ki); ref agg = eval(ki, 0, state).value; ref result = ExtractExpr::create(agg, kgepi->offset*8, getWidthForLLVMType(i->getType())); bindLocal(ki, state, result); break; } case Instruction::Fence: { // Ignore for now break; } case Instruction::InsertElement: { InsertElementInst *iei = cast(i); ref vec = eval(ki, 0, state).value; ref newElt = eval(ki, 1, state).value; ref idx = eval(ki, 2, state).value; ConstantExpr *cIdx = dyn_cast(idx); if (cIdx == NULL) { terminateStateOnExecError( state, "InsertElement, support for symbolic index not implemented"); return; } uint64_t iIdx = cIdx->getZExtValue(); #if LLVM_VERSION_MAJOR >= 11 const auto *vt = cast(iei->getType()); #else const llvm::VectorType *vt = iei->getType(); #endif unsigned EltBits = getWidthForLLVMType(vt->getElementType()); if (iIdx >= vt->getNumElements()) { // Out of bounds write terminateStateOnProgramError(state, "Out of bounds write when inserting element", StateTerminationType::BadVectorAccess); return; } const unsigned elementCount = vt->getNumElements(); llvm::SmallVector, 8> elems; elems.reserve(elementCount); for (unsigned i = elementCount; i != 0; --i) { auto of = i - 1; unsigned bitOffset = EltBits * of; elems.push_back( of == iIdx ? newElt : ExtractExpr::create(vec, bitOffset, EltBits)); } assert(Context::get().isLittleEndian() && "FIXME:Broken for big endian"); ref Result = ConcatExpr::createN(elementCount, elems.data()); bindLocal(ki, state, Result); break; } case Instruction::ExtractElement: { ExtractElementInst *eei = cast(i); ref vec = eval(ki, 0, state).value; ref idx = eval(ki, 1, state).value; ConstantExpr *cIdx = dyn_cast(idx); if (cIdx == NULL) { terminateStateOnExecError( state, "ExtractElement, support for symbolic index not implemented"); return; } uint64_t iIdx = cIdx->getZExtValue(); #if LLVM_VERSION_MAJOR >= 11 const auto *vt = cast(eei->getVectorOperandType()); #else const llvm::VectorType *vt = eei->getVectorOperandType(); #endif unsigned EltBits = getWidthForLLVMType(vt->getElementType()); if (iIdx >= vt->getNumElements()) { // Out of bounds read terminateStateOnProgramError(state, "Out of bounds read when extracting element", StateTerminationType::BadVectorAccess); return; } unsigned bitOffset = EltBits * iIdx; ref Result = ExtractExpr::create(vec, bitOffset, EltBits); bindLocal(ki, state, Result); break; } case Instruction::ShuffleVector: // Should never happen due to Scalarizer pass removing ShuffleVector // instructions. terminateStateOnExecError(state, "Unexpected ShuffleVector instruction"); break; #ifdef SUPPORT_KLEE_EH_CXX case Instruction::Resume: { auto *cui = dyn_cast_or_null( state.unwindingInformation.get()); if (!cui) { terminateStateOnExecError( state, "resume-instruction executed outside of cleanup phase unwinding"); break; } ref arg = eval(ki, 0, state).value; ref exceptionPointer = ExtractExpr::create(arg, 0, Expr::Int64); ref selectorValue = ExtractExpr::create(arg, Expr::Int64, Expr::Int32); if (!dyn_cast(exceptionPointer) || !dyn_cast(selectorValue)) { terminateStateOnExecError( state, "resume-instruction called with non constant expression"); break; } if (!Expr::createIsZero(selectorValue)->isTrue()) { klee_warning("resume-instruction called with non-0 selector value"); } if (!EqExpr::create(exceptionPointer, cui->exceptionObject)->isTrue()) { terminateStateOnExecError( state, "resume-instruction called with unexpected exception pointer"); break; } unwindToNextLandingpad(state); break; } case Instruction::LandingPad: { auto *cui = dyn_cast_or_null( state.unwindingInformation.get()); if (!cui) { terminateStateOnExecError( state, "Executing landing pad but not in unwinding phase 2"); break; } ref exceptionPointer = cui->exceptionObject; ref selectorValue; // check on which frame we are currently if (state.stack.size() - 1 == cui->catchingStackIndex) { // we are in the target stack frame, return the selector value // that was returned by the personality fn in phase 1 and stop unwinding. selectorValue = cui->selectorValue; // stop unwinding by cleaning up our unwinding information. state.unwindingInformation.reset(); // this would otherwise now be a dangling pointer cui = nullptr; } else { // we are not yet at the target stack frame. the landingpad might have // a cleanup clause or not, anyway, we give it the selector value "0", // which represents a cleanup, and expect it to handle it. // This is explicitly allowed by LLVM, see // https://llvm.org/docs/ExceptionHandling.html#id18 selectorValue = ConstantExpr::create(0, Expr::Int32); } // we have to return a {i8*, i32} ref result = ConcatExpr::create( ZExtExpr::create(selectorValue, Expr::Int32), exceptionPointer); bindLocal(ki, state, result); break; } #endif // SUPPORT_KLEE_EH_CXX case Instruction::AtomicRMW: terminateStateOnExecError(state, "Unexpected Atomic instruction, should be " "lowered by LowerAtomicInstructionPass"); break; case Instruction::AtomicCmpXchg: terminateStateOnExecError(state, "Unexpected AtomicCmpXchg instruction, should be " "lowered by LowerAtomicInstructionPass"); break; // Other instructions... // Unhandled default: terminateStateOnExecError(state, "illegal instruction"); break; } } void Executor::updateStates(ExecutionState *current) { if (searcher) { searcher->update(current, addedStates, removedStates); } states.insert(addedStates.begin(), addedStates.end()); addedStates.clear(); for (std::vector::iterator it = removedStates.begin(), ie = removedStates.end(); it != ie; ++it) { ExecutionState *es = *it; std::set::iterator it2 = states.find(es); assert(it2!=states.end()); states.erase(it2); std::map >::iterator it3 = seedMap.find(es); if (it3 != seedMap.end()) seedMap.erase(it3); executionTree->remove(es->executionTreeNode); delete es; } removedStates.clear(); } template void Executor::computeOffsetsSeqTy(KGEPInstruction *kgepi, ref &constantOffset, uint64_t index, const TypeIt it) { #if LLVM_VERSION_CODE <= LLVM_VERSION(14, 0) assert(it->getNumContainedTypes() == 1 && "Sequential type must contain one subtype"); uint64_t elementSize = kmodule->targetData->getTypeStoreSize(it->getContainedType(0)); #else assert(it.isSequential() && "Called with non-sequential type"); // Get the size of a single element std::uint64_t elementSize = kmodule->targetData->getTypeStoreSize(it.getIndexedType()); #endif const Value *operand = it.getOperand(); if (const Constant *c = dyn_cast(operand)) { ref index = evalConstant(c)->SExt(Context::get().getPointerWidth()); ref addend = index->Mul( ConstantExpr::alloc(elementSize, Context::get().getPointerWidth())); constantOffset = constantOffset->Add(addend); } else { kgepi->indices.emplace_back(index, elementSize); } } template void Executor::computeOffsets(KGEPInstruction *kgepi, TypeIt ib, TypeIt ie) { ref constantOffset = ConstantExpr::alloc(0, Context::get().getPointerWidth()); uint64_t index = 1; for (TypeIt ii = ib; ii != ie; ++ii) { #if LLVM_VERSION_CODE <= LLVM_VERSION(14, 0) if (StructType *st = dyn_cast(*ii)) { #else if (StructType *st = ii.getStructTypeOrNull()) { #endif const StructLayout *sl = kmodule->targetData->getStructLayout(st); const ConstantInt *ci = cast(ii.getOperand()); uint64_t addend = sl->getElementOffset((unsigned) ci->getZExtValue()); constantOffset = constantOffset->Add(ConstantExpr::alloc(addend, Context::get().getPointerWidth())); #if LLVM_VERSION_CODE <= LLVM_VERSION(14, 0) } else if (ii->isArrayTy() || ii->isVectorTy() || ii->isPointerTy()) { #else } else if (ii.isSequential()) { #endif computeOffsetsSeqTy(kgepi, constantOffset, index, ii); } else assert("invalid type" && 0); index++; } kgepi->offset = constantOffset->getZExtValue(); } void Executor::bindInstructionConstants(KInstruction *KI) { if (GetElementPtrInst *gepi = dyn_cast(KI->inst)) { KGEPInstruction *kgepi = static_cast(KI); #if LLVM_VERSION_CODE <= LLVM_VERSION(14, 0) computeOffsets(kgepi, klee::gep_type_begin(gepi), klee::gep_type_end(gepi)); #else computeOffsets(kgepi, llvm::gep_type_begin(gepi), llvm::gep_type_end(gepi)); #endif } else if (InsertValueInst *ivi = dyn_cast(KI->inst)) { KGEPInstruction *kgepi = static_cast(KI); #if LLVM_VERSION_CODE <= LLVM_VERSION(10, 0) computeOffsets(kgepi, iv_type_begin(ivi), iv_type_end(ivi)); assert(kgepi->indices.empty() && "InsertValue constant offset expected"); #else llvm::Value *agg = ivi->getAggregateOperand(); llvm::Type *current_type = agg->getType(); std::uint64_t offset = 0; for (auto index : ivi->indices()) { if (StructType *st = dyn_cast(current_type)) { const StructLayout *sl = kmodule->targetData->getStructLayout(st); std::uint64_t addend = sl->getElementOffset(index); offset = offset + addend; } else if (current_type->isArrayTy() || current_type->isVectorTy() || current_type->isPointerTy()) { std::uint64_t elementSize = kmodule->targetData->getTypeStoreSize( current_type->getArrayElementType()); offset += elementSize * index; } else { assert(0 && "Unknown type"); } current_type = GetElementPtrInst::getTypeAtIndex(current_type, index); } kgepi->offset = offset; #endif } else if (ExtractValueInst *evi = dyn_cast(KI->inst)) { KGEPInstruction *kgepi = static_cast(KI); #if LLVM_VERSION_CODE <= LLVM_VERSION(10, 0) computeOffsets(kgepi, ev_type_begin(evi), ev_type_end(evi)); assert(kgepi->indices.empty() && "ExtractValue constant offset expected"); #else llvm::Value *agg = evi->getAggregateOperand(); llvm::Type *current_type = agg->getType(); uint64_t offset = 0; for (auto index : evi->indices()) { if (StructType *st = dyn_cast(current_type)) { const StructLayout *sl = kmodule->targetData->getStructLayout(st); uint64_t addend = sl->getElementOffset(index); offset = offset + addend; } else if (current_type->isArrayTy() || current_type->isVectorTy() || current_type->isPointerTy()) { uint64_t elementSize = kmodule->targetData->getTypeStoreSize( current_type->getArrayElementType()); offset += elementSize * index; } else { assert(0 && "Unknown type"); } current_type = GetElementPtrInst::getTypeAtIndex(current_type, index); } kgepi->offset = offset; #endif } } void Executor::bindModuleConstants() { for (auto &kfp : kmodule->functions) { KFunction *kf = kfp.get(); for (unsigned i=0; inumInstructions; ++i) bindInstructionConstants(kf->instructions[i]); } kmodule->constantTable = std::unique_ptr(new Cell[kmodule->constants.size()]); for (unsigned i=0; iconstants.size(); ++i) { Cell &c = kmodule->constantTable[i]; c.value = evalConstant(kmodule->constants[i]); } } bool Executor::checkMemoryUsage() { if (!MaxMemory) return true; // We need to avoid calling GetTotalMallocUsage() often because it // is O(elts on freelist). This is really bad since we start // to pummel the freelist once we hit the memory cap. if ((stats::instructions & 0xFFFFU) != 0) // every 65536 instructions return true; // check memory limit const auto mallocUsage = util::GetTotalMallocUsage() >> 20U; const auto mmapUsage = memory->getUsedDeterministicSize() >> 20U; const auto totalUsage = mallocUsage + mmapUsage; atMemoryLimit = totalUsage > MaxMemory; // inhibit forking if (!atMemoryLimit) return true; // only terminate states when threshold (+100MB) exceeded if (totalUsage <= MaxMemory + 100) return true; // just guess at how many to kill const auto numStates = states.size(); auto toKill = std::max(1UL, numStates - numStates * MaxMemory / totalUsage); klee_warning("killing %lu states (over memory cap: %luMB)", toKill, totalUsage); // randomly select states for early termination std::vector arr(states.begin(), states.end()); // FIXME: expensive for (unsigned i = 0, N = arr.size(); N && i < toKill; ++i, --N) { unsigned idx = theRNG.getInt32() % N; // Make two pulls to try and not hit a state that // covered new code. if (arr[idx]->coveredNew) idx = theRNG.getInt32() % N; std::swap(arr[idx], arr[N - 1]); terminateStateEarly(*arr[N - 1], "Memory limit exceeded.", StateTerminationType::OutOfMemory); } return false; } void Executor::doDumpStates() { if (!DumpStatesOnHalt || states.empty()) { interpreterHandler->incPathsExplored(states.size()); return; } klee_message("halting execution, dumping remaining states"); for (const auto &state : states) terminateStateEarly(*state, "Execution halting.", StateTerminationType::Interrupted); updateStates(nullptr); } void Executor::run(ExecutionState &initialState) { bindModuleConstants(); // Delay init till now so that ticks don't accrue during optimization and such. timers.reset(); states.insert(&initialState); if (usingSeeds) { std::vector &v = seedMap[&initialState]; for (std::vector::const_iterator it = usingSeeds->begin(), ie = usingSeeds->end(); it != ie; ++it) v.push_back(SeedInfo(*it)); int lastNumSeeds = usingSeeds->size()+10; time::Point lastTime, startTime = lastTime = time::getWallTime(); ExecutionState *lastState = 0; while (!seedMap.empty()) { if (haltExecution) { doDumpStates(); return; } std::map >::iterator it = seedMap.upper_bound(lastState); if (it == seedMap.end()) it = seedMap.begin(); lastState = it->first; ExecutionState &state = *lastState; KInstruction *ki = state.pc; stepInstruction(state); executeInstruction(state, ki); timers.invoke(); if (::dumpStates) dumpStates(); if (::dumpExecutionTree) dumpExecutionTree(); updateStates(&state); if ((stats::instructions % 1000) == 0) { int numSeeds = 0, numStates = 0; for (std::map >::iterator it = seedMap.begin(), ie = seedMap.end(); it != ie; ++it) { numSeeds += it->second.size(); numStates++; } const auto time = time::getWallTime(); const time::Span seedTime(SeedTime); if (seedTime && time > startTime + seedTime) { klee_warning("seed time expired, %d seeds remain over %d states", numSeeds, numStates); break; } else if (numSeeds<=lastNumSeeds-10 || time - lastTime >= time::seconds(10)) { lastTime = time; lastNumSeeds = numSeeds; klee_message("%d seeds remaining over: %d states", numSeeds, numStates); } } } klee_message("seeding done (%d states remain)", (int) states.size()); if (OnlySeed) { doDumpStates(); return; } } searcher = constructUserSearcher(*this); std::vector newStates(states.begin(), states.end()); searcher->update(0, newStates, std::vector()); // main interpreter loop while (!states.empty() && !haltExecution) { ExecutionState &state = searcher->selectState(); KInstruction *ki = state.pc; stepInstruction(state); executeInstruction(state, ki); timers.invoke(); if (::dumpStates) dumpStates(); if (::dumpExecutionTree) dumpExecutionTree(); updateStates(&state); if (!checkMemoryUsage()) { // update searchers when states were terminated early due to memory pressure updateStates(nullptr); } } delete searcher; searcher = nullptr; doDumpStates(); } std::string Executor::getAddressInfo(ExecutionState &state, ref address) const{ std::string Str; llvm::raw_string_ostream info(Str); info << "\taddress: " << address << "\n"; uint64_t example; if (ConstantExpr *CE = dyn_cast(address)) { example = CE->getZExtValue(); } else { ref value; bool success = solver->getValue(state.constraints, address, value, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; example = value->getZExtValue(); info << "\texample: " << example << "\n"; std::pair, ref> res = solver->getRange(state.constraints, address, state.queryMetaData); info << "\trange: [" << res.first << ", " << res.second <<"]\n"; } MemoryObject hack((unsigned) example); MemoryMap::iterator lower = state.addressSpace.objects.upper_bound(&hack); info << "\tnext: "; if (lower==state.addressSpace.objects.end()) { info << "none\n"; } else { const MemoryObject *mo = lower->first; std::string alloc_info; mo->getAllocInfo(alloc_info); info << "object at " << mo->address << " of size " << mo->size << "\n" << "\t\t" << alloc_info << "\n"; } if (lower!=state.addressSpace.objects.begin()) { --lower; info << "\tprev: "; if (lower==state.addressSpace.objects.end()) { info << "none\n"; } else { const MemoryObject *mo = lower->first; std::string alloc_info; mo->getAllocInfo(alloc_info); info << "object at " << mo->address << " of size " << mo->size << "\n" << "\t\t" << alloc_info << "\n"; } } return info.str(); } void Executor::terminateState(ExecutionState &state, StateTerminationType reason) { if (replayKTest && replayPosition!=replayKTest->numObjects) { klee_warning_once(replayKTest, "replay did not consume all objects in test input."); } interpreterHandler->incPathsExplored(); executionTree->setTerminationType(state, reason); std::vector::iterator it = std::find(addedStates.begin(), addedStates.end(), &state); if (it==addedStates.end()) { state.pc = state.prevPC; removedStates.push_back(&state); } else { // never reached searcher, just delete immediately std::map< ExecutionState*, std::vector >::iterator it3 = seedMap.find(&state); if (it3 != seedMap.end()) seedMap.erase(it3); addedStates.erase(it); executionTree->remove(state.executionTreeNode); delete &state; } } static bool shouldWriteTest(const ExecutionState &state) { return !OnlyOutputStatesCoveringNew || state.coveredNew; } static std::string terminationTypeFileExtension(StateTerminationType type) { std::string ret; #undef TTYPE #undef TTMARK #define TTYPE(N,I,S) case StateTerminationType::N: ret = (S); break; #define TTMARK(N,I) switch (type) { TERMINATION_TYPES } return ret; }; void Executor::terminateStateOnExit(ExecutionState &state) { ++stats::terminationExit; if (shouldWriteTest(state) || (AlwaysOutputSeeds && seedMap.count(&state))) interpreterHandler->processTestCase( state, nullptr, terminationTypeFileExtension(StateTerminationType::Exit).c_str()); interpreterHandler->incPathsCompleted(); terminateState(state, StateTerminationType::Exit); } void Executor::terminateStateEarly(ExecutionState &state, const Twine &message, StateTerminationType reason) { if (reason <= StateTerminationType::EARLY) { assert(reason > StateTerminationType::EXIT); ++stats::terminationEarly; } if ((reason <= StateTerminationType::EARLY && shouldWriteTest(state)) || (AlwaysOutputSeeds && seedMap.count(&state))) { interpreterHandler->processTestCase( state, (message + "\n").str().c_str(), terminationTypeFileExtension(reason).c_str()); } terminateState(state, reason); } void Executor::terminateStateEarlyAlgorithm(ExecutionState &state, const llvm::Twine &message, StateTerminationType reason) { assert(reason > StateTerminationType::EXECERR && reason <= StateTerminationType::EARLYALGORITHM); ++stats::terminationEarlyAlgorithm; terminateStateEarly(state, message, reason); } void Executor::terminateStateEarlyUser(ExecutionState &state, const llvm::Twine &message) { ++stats::terminationEarlyUser; terminateStateEarly(state, message, StateTerminationType::SilentExit); } const InstructionInfo & Executor::getLastNonKleeInternalInstruction(const ExecutionState &state, Instruction ** lastInstruction) { // unroll the stack of the applications state and find // the last instruction which is not inside a KLEE internal function ExecutionState::stack_ty::const_reverse_iterator it = state.stack.rbegin(), itE = state.stack.rend(); // don't check beyond the outermost function (i.e. main()) itE--; const InstructionInfo * ii = 0; if (kmodule->internalFunctions.count(it->kf->function) == 0){ ii = state.prevPC->info; *lastInstruction = state.prevPC->inst; // Cannot return yet because even though // it->function is not an internal function it might of // been called from an internal function. } // Wind up the stack and check if we are in a KLEE internal function. // We visit the entire stack because we want to return a CallInstruction // that was not reached via any KLEE internal functions. for (;it != itE; ++it) { // check calling instruction and if it is contained in a KLEE internal function const Function * f = (*it->caller).inst->getParent()->getParent(); if (kmodule->internalFunctions.count(f)){ ii = 0; continue; } if (!ii){ ii = (*it->caller).info; *lastInstruction = (*it->caller).inst; } } if (!ii) { // something went wrong, play safe and return the current instruction info *lastInstruction = state.prevPC->inst; return *state.prevPC->info; } return *ii; } bool shouldExitOn(StateTerminationType reason) { auto it = std::find(ExitOnErrorType.begin(), ExitOnErrorType.end(), reason); return it != ExitOnErrorType.end(); } void Executor::terminateStateOnError(ExecutionState &state, const llvm::Twine &messaget, StateTerminationType terminationType, const llvm::Twine &info, const char *suffix) { std::string message = messaget.str(); static std::set< std::pair > emittedErrors; Instruction * lastInst; const InstructionInfo &ii = getLastNonKleeInternalInstruction(state, &lastInst); if (EmitAllErrors || emittedErrors.insert(std::make_pair(lastInst, message)).second) { if (!ii.file.empty()) { klee_message("ERROR: %s:%d: %s", ii.file.c_str(), ii.line, message.c_str()); } else { klee_message("ERROR: (location information missing) %s", message.c_str()); } if (!EmitAllErrors) klee_message("NOTE: now ignoring this error at this location"); std::string MsgString; llvm::raw_string_ostream msg(MsgString); msg << "Error: " << message << '\n'; if (!ii.file.empty()) { msg << "File: " << ii.file << '\n' << "Line: " << ii.line << '\n' << "assembly.ll line: " << ii.assemblyLine << '\n' << "State: " << state.getID() << '\n'; } msg << "Stack: \n"; state.dumpStack(msg); std::string info_str = info.str(); if (!info_str.empty()) msg << "Info: \n" << info_str; const std::string ext = terminationTypeFileExtension(terminationType); // use user provided suffix from klee_report_error() const char * file_suffix = suffix ? suffix : ext.c_str(); interpreterHandler->processTestCase(state, msg.str().c_str(), file_suffix); } terminateState(state, terminationType); if (shouldExitOn(terminationType)) haltExecution = true; } void Executor::terminateStateOnExecError(ExecutionState &state, const llvm::Twine &message, StateTerminationType reason) { assert(reason > StateTerminationType::USERERR && reason <= StateTerminationType::EXECERR); ++stats::terminationExecutionError; terminateStateOnError(state, message, reason, ""); } void Executor::terminateStateOnProgramError(ExecutionState &state, const llvm::Twine &message, StateTerminationType reason, const llvm::Twine &info, const char *suffix) { assert(reason > StateTerminationType::SOLVERERR && reason <= StateTerminationType::PROGERR); ++stats::terminationProgramError; terminateStateOnError(state, message, reason, info, suffix); } void Executor::terminateStateOnSolverError(ExecutionState &state, const llvm::Twine &message) { ++stats::terminationSolverError; terminateStateOnError(state, message, StateTerminationType::Solver, ""); } void Executor::terminateStateOnUserError(ExecutionState &state, const llvm::Twine &message, bool writeErr) { ++stats::terminationUserError; if (writeErr) { terminateStateOnError(state, message, StateTerminationType::User, ""); } else { terminateState(state, StateTerminationType::User); } } // XXX shoot me static const char *okExternalsList[] = { "printf", "fprintf", "puts", "getpid" }; static std::set okExternals(okExternalsList, okExternalsList + (sizeof(okExternalsList)/sizeof(okExternalsList[0]))); void Executor::callExternalFunction(ExecutionState &state, KInstruction *target, KCallable *callable, std::vector< ref > &arguments) { // check if specialFunctionHandler wants it if (const auto *func = dyn_cast(callable)) { if (specialFunctionHandler->handle(state, func->function, target, arguments)) return; } if (ExternalCalls == ExternalCallPolicy::None && !okExternals.count(callable->getName().str())) { klee_warning("Disallowed call to external function: %s\n", callable->getName().str().c_str()); terminateStateOnUserError(state, "external calls disallowed"); return; } // normal external function handling path // allocate 512 bits for each argument (+return value) to support // fp80's and SIMD vectors as parameters for external calls; // we could iterate through all the arguments first and determine the exact // size we need, but this is faster, and the memory usage isn't significant. size_t allocatedBytes = Expr::MaxWidth / 8 * (arguments.size() + 1); uint64_t *args = (uint64_t*) alloca(allocatedBytes); memset(args, 0, allocatedBytes); unsigned wordIndex = 2; for (std::vector >::iterator ai = arguments.begin(), ae = arguments.end(); ai!=ae; ++ai) { if (ExternalCalls == ExternalCallPolicy::All) { // don't bother checking uniqueness *ai = optimizer.optimizeExpr(*ai, true); ref cvalue = getValueFromSeeds(state, *ai); /* If no seed evaluation results in a constant, call the solver */ if (!cvalue) { [[maybe_unused]] bool success = solver->getValue( state.constraints, *ai, cvalue, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); } cvalue->toMemory(&args[wordIndex]); ObjectPair op; // Checking to see if the argument is a pointer to something if (cvalue->getWidth() == Context::get().getPointerWidth() && state.addressSpace.resolveOne(cvalue, op)) { op.second->flushToConcreteStore(solver.get(), state); } wordIndex += (cvalue->getWidth() + 63) / 64; } else { ref arg = toUnique(state, *ai); if (ConstantExpr *ce = dyn_cast(arg)) { // fp80 must be aligned to 16 according to the System V AMD 64 ABI if (ce->getWidth() == Expr::Fl80 && wordIndex & 0x01) wordIndex++; // XXX kick toMemory functions from here ce->toMemory(&args[wordIndex]); wordIndex += (ce->getWidth()+63)/64; } else { terminateStateOnExecError(state, "external call with symbolic argument: " + callable->getName()); return; } } } // Prepare external memory for invoking the function static std::size_t residentPages = 0; double avgNeededPages = 0; if (MemoryManager::isDeterministic) { auto const minflt = [] { struct rusage ru = {}; [[maybe_unused]] int ret = getrusage(RUSAGE_SELF, &ru); assert(!ret && "getrusage failed"); assert(ru.ru_minflt >= 0); return ru.ru_minflt; }; auto tmp = minflt(); std::size_t neededPages = state.addressSpace.copyOutConcretes(); auto newPages = minflt() - tmp; assert(newPages >= 0); residentPages += newPages; assert(residentPages >= neededPages && "allocator too full, assumption that each object occupies its own " "page is no longer true"); // average of pages needed for an external function call static double avgNeededPages_ = residentPages; // exponential moving average with alpha = 1/3 avgNeededPages_ = (3.0 * avgNeededPages_ + neededPages) / 4.0; avgNeededPages = avgNeededPages_; } else { state.addressSpace.copyOutConcretes(); } #ifndef WINDOWS // Update external errno state with local state value int *errno_addr = getErrnoLocation(state); ObjectPair result; bool resolved = state.addressSpace.resolveOne( ConstantExpr::create((uint64_t)errno_addr, Expr::Int64), result); if (!resolved) klee_error("Could not resolve memory object for errno"); ref errValueExpr = result.second->read(0, sizeof(*errno_addr) * 8); ConstantExpr *errnoValue = dyn_cast(errValueExpr); if (!errnoValue) { terminateStateOnExecError(state, "external call with errno value symbolic: " + callable->getName()); return; } externalDispatcher->setLastErrno( errnoValue->getZExtValue(sizeof(*errno_addr) * 8)); #endif if (ExternalCallWarnings != ExtCallWarnings::None) { std::string TmpStr; llvm::raw_string_ostream os(TmpStr); os << "calling external: " << callable->getName().str() << "("; for (unsigned i=0; igetSourceLocation(); if (ExternalCallWarnings == ExtCallWarnings::All) klee_warning("%s", os.str().c_str()); else klee_warning_once(callable->getValue(), "%s", os.str().c_str()); } bool success = externalDispatcher->executeCall(callable, target->inst, args); if (!success) { terminateStateOnExecError(state, "failed external call: " + callable->getName(), StateTerminationType::External); return; } if (!state.addressSpace.copyInConcretes()) { terminateStateOnExecError(state, "external modified read-only object", StateTerminationType::External); return; } if (MemoryManager::isDeterministic && residentPages > ExternalPageThreshold && residentPages > 2 * avgNeededPages) { if (memory->markMappingsAsUnneeded()) { residentPages = 0; } } #ifndef WINDOWS // Update errno memory object with the errno value from the call int error = externalDispatcher->getLastErrno(); state.addressSpace.copyInConcrete(result.first, result.second, (uint64_t)&error); #endif Type *resultType = target->inst->getType(); if (resultType != Type::getVoidTy(kmodule->module->getContext())) { ref e = ConstantExpr::fromMemory((void*) args, getWidthForLLVMType(resultType)); bindLocal(target, state, e); } } /***/ ref Executor::replaceReadWithSymbolic(ExecutionState &state, ref e) { unsigned n = interpreterOpts.MakeConcreteSymbolic; if (!n || replayKTest || replayPath) return e; // right now, we don't replace symbolics (is there any reason to?) if (!isa(e)) return e; if (n != 1 && random() % n) return e; // create a new fresh location, assert it is equal to concrete value in e // and return it. static unsigned id; const Array *array = arrayCache.CreateArray("rrws_arr" + llvm::utostr(++id), Expr::getMinBytesForWidth(e->getWidth())); ref res = Expr::createTempRead(array, e->getWidth()); ref eq = NotOptimizedExpr::create(EqExpr::create(e, res)); llvm::errs() << "Making symbolic: " << eq << "\n"; state.addConstraint(eq); return res; } ObjectState *Executor::bindObjectInState(ExecutionState &state, const MemoryObject *mo, bool isLocal, const Array *array) { ObjectState *os = array ? new ObjectState(mo, array) : new ObjectState(mo); state.addressSpace.bindObject(mo, os); // Its possible that multiple bindings of the same mo in the state // will put multiple copies on this list, but it doesn't really // matter because all we use this list for is to unbind the object // on function return. if (isLocal) state.stack.back().allocas.push_back(mo); return os; } void Executor::executeAlloc(ExecutionState &state, ref size, bool isLocal, KInstruction *target, bool zeroMemory, const ObjectState *reallocFrom, size_t allocationAlignment) { size = toUnique(state, size); if (ConstantExpr *CE = dyn_cast(size)) { const llvm::Value *allocSite = state.prevPC->inst; if (allocationAlignment == 0) { allocationAlignment = getAllocationAlignment(allocSite); } MemoryObject *mo = memory->allocate(CE->getZExtValue(), isLocal, /*isGlobal=*/false, &state, allocSite, allocationAlignment); if (!mo) { bindLocal(target, state, ConstantExpr::alloc(0, Context::get().getPointerWidth())); } else { ObjectState *os = bindObjectInState(state, mo, isLocal); if (zeroMemory) { os->initializeToZero(); } else { os->initializeToRandom(); } bindLocal(target, state, mo->getBaseExpr()); if (reallocFrom) { unsigned count = std::min(reallocFrom->size, os->size); for (unsigned i=0; iwrite(i, reallocFrom->read8(i)); const MemoryObject *reallocObject = reallocFrom->getObject(); state.deallocate(reallocObject); state.addressSpace.unbindObject(reallocObject); } } } else { // XXX For now we just pick a size. Ideally we would support // symbolic sizes fully but even if we don't it would be better to // "smartly" pick a value, for example we could fork and pick the // min and max values and perhaps some intermediate (reasonable // value). // // It would also be nice to recognize the case when size has // exactly two values and just fork (but we need to get rid of // return argument first). This shows up in pcre when llvm // collapses the size expression with a select. size = optimizer.optimizeExpr(size, true); // Check if in seed mode, then try to replicate size from a seed ref example = getValueFromSeeds(state, size); if (!example) { bool success = solver->getValue(state.constraints, size, example, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void)success; // Try and start with a small example. Expr::Width W = example->getWidth(); while (example->Ugt(ConstantExpr::alloc(128, W))->isTrue()) { ref tmp = example->LShr(ConstantExpr::alloc(1, W)); bool res; [[maybe_unused]] bool success = solver->mayBeTrue(state.constraints, EqExpr::create(tmp, size), res, state.queryMetaData); assert(success && "FIXME: Unhandled solver failure"); if (!res) break; example = tmp; } } StatePair fixedSize = fork(state, EqExpr::create(example, size), true, BranchType::Alloc); if (fixedSize.second) { // Check for exactly two values ref tmp; bool success = solver->getValue(fixedSize.second->constraints, size, tmp, fixedSize.second->queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; bool res; success = solver->mustBeTrue(fixedSize.second->constraints, EqExpr::create(tmp, size), res, fixedSize.second->queryMetaData); assert(success && "FIXME: Unhandled solver failure"); (void) success; if (res) { executeAlloc(*fixedSize.second, tmp, isLocal, target, zeroMemory, reallocFrom); } else { // See if a *really* big value is possible. If so assume // malloc will fail for it, so lets fork and return 0. StatePair hugeSize = fork(*fixedSize.second, UltExpr::create( ConstantExpr::alloc(1U << 31, example->getWidth()), size), true, BranchType::Alloc); if (hugeSize.first) { klee_message("NOTE: found huge malloc, returning 0"); bindLocal(target, *hugeSize.first, ConstantExpr::alloc(0, Context::get().getPointerWidth())); } if (hugeSize.second) { std::string Str; llvm::raw_string_ostream info(Str); ExprPPrinter::printOne(info, " size expr", size); info << " concretization : " << example << "\n"; info << " unbound example: " << tmp << "\n"; terminateStateOnProgramError(*hugeSize.second, "concretized symbolic size", StateTerminationType::Model, info.str()); } } } if (fixedSize.first) // can be zero when fork fails executeAlloc(*fixedSize.first, example, isLocal, target, zeroMemory, reallocFrom); } } void Executor::executeFree(ExecutionState &state, ref address, KInstruction *target) { address = optimizer.optimizeExpr(address, true); StatePair zeroPointer = fork(state, Expr::createIsZero(address), true, BranchType::Free); if (zeroPointer.first) { if (target) bindLocal(target, *zeroPointer.first, Expr::createPointer(0)); } if (zeroPointer.second) { // address != 0 ExactResolutionList rl; resolveExact(*zeroPointer.second, address, rl, "free"); for (Executor::ExactResolutionList::iterator it = rl.begin(), ie = rl.end(); it != ie; ++it) { const MemoryObject *mo = it->first.first; if (mo->isLocal) { terminateStateOnProgramError(*it->second, "free of alloca", StateTerminationType::Free, getAddressInfo(*it->second, address)); } else if (mo->isGlobal) { terminateStateOnProgramError(*it->second, "free of global", StateTerminationType::Free, getAddressInfo(*it->second, address)); } else { it->second->deallocate(mo); it->second->addressSpace.unbindObject(mo); if (target) bindLocal(target, *it->second, Expr::createPointer(0)); } } } } void Executor::resolveExact(ExecutionState &state, ref p, ExactResolutionList &results, const std::string &name) { p = optimizer.optimizeExpr(p, true); // XXX we may want to be capping this? ResolutionList rl; state.addressSpace.resolve(state, solver.get(), p, rl); ExecutionState *unbound = &state; for (ResolutionList::iterator it = rl.begin(), ie = rl.end(); it != ie; ++it) { ref inBounds = EqExpr::create(p, it->first->getBaseExpr()); StatePair branches = fork(*unbound, inBounds, true, BranchType::ResolvePointer); if (branches.first) results.push_back(std::make_pair(*it, branches.first)); unbound = branches.second; if (!unbound) // Fork failure break; } if (unbound) { auto CE = dyn_cast(p); if (MemoryManager::isDeterministic && CE) { using kdalloc::LocationInfo; auto ptr = reinterpret_cast(CE->getZExtValue()); auto li = unbound->heapAllocator.locationInfo(ptr, 1); if (li == LocationInfo::LI_AllocatedOrQuarantined && li.getBaseAddress() == ptr && name == "free") { terminateStateOnProgramError(*unbound, "memory error: double free", StateTerminationType::Ptr, getAddressInfo(*unbound, p)); return; } } terminateStateOnProgramError( *unbound, "memory error: invalid pointer: " + name, StateTerminationType::Ptr, getAddressInfo(*unbound, p)); } } void Executor::executeMemoryOperation(ExecutionState &state, bool isWrite, ref address, ref value /* undef if read */, KInstruction *target /* undef if write */) { Expr::Width type = (isWrite ? value->getWidth() : getWidthForLLVMType(target->inst->getType())); unsigned bytes = Expr::getMinBytesForWidth(type); if (SimplifySymIndices) { if (!isa(address)) address = ConstraintManager::simplifyExpr(state.constraints, address); if (isWrite && !isa(value)) value = ConstraintManager::simplifyExpr(state.constraints, value); } address = optimizer.optimizeExpr(address, true); ObjectPair op; bool success; solver->setTimeout(coreSolverTimeout); bool resolveSingleObject = SingleObjectResolution; if (resolveSingleObject && !isa(address)) { // Address is symbolic resolveSingleObject = false; auto base_it = state.base_addrs.find(address); if (base_it != state.base_addrs.end()) { // Concrete address found in the map, now find the associated memory // object if (!state.addressSpace.resolveOne(state, solver.get(), base_it->second, op, success) || !success) { klee_warning("Failed to resolve concrete address from the base_addrs " "map to a memory object"); } else { // We have resolved the stored concrete address to a memory object. // Now let's see if we can prove an overflow - we are only interested in // two cases: either we overflow and it's a bug or we don't and we carry // on; in this mode we are not interested in trying out other memory // objects resolveSingleObject = true; } } } else { resolveSingleObject = false; } if (!resolveSingleObject) { if (!state.addressSpace.resolveOne(state, solver.get(), address, op, success)) { address = toConstant(state, address, "resolveOne failure"); success = state.addressSpace.resolveOne(cast(address), op); } solver->setTimeout(time::Span()); if (success) { const MemoryObject *mo = op.first; if (MaxSymArraySize && mo->size >= MaxSymArraySize) { address = toConstant(state, address, "max-sym-array-size"); } ref offset = mo->getOffsetExpr(address); ref check = mo->getBoundsCheckOffset(offset, bytes); check = optimizer.optimizeExpr(check, true); bool inBounds; solver->setTimeout(coreSolverTimeout); bool success = solver->mustBeTrue(state.constraints, check, inBounds, state.queryMetaData); solver->setTimeout(time::Span()); if (!success) { state.pc = state.prevPC; terminateStateOnSolverError(state, "Query timed out (bounds check)."); return; } if (inBounds) { const ObjectState *os = op.second; if (isWrite) { if (os->readOnly) { terminateStateOnProgramError(state, "memory error: object read only", StateTerminationType::ReadOnly); } else { ObjectState *wos = state.addressSpace.getWriteable(mo, os); wos->write(offset, value); } } else { ref result = os->read(offset, type); if (interpreterOpts.MakeConcreteSymbolic) result = replaceReadWithSymbolic(state, result); bindLocal(target, state, result); } return; } } } // we are on an error path (no resolution, multiple resolution, one // resolution with out of bounds), or we do a single object resolution address = optimizer.optimizeExpr(address, true); ResolutionList rl; bool incomplete = false; if (!resolveSingleObject) { solver->setTimeout(coreSolverTimeout); incomplete = state.addressSpace.resolve(state, solver.get(), address, rl, 0, coreSolverTimeout); solver->setTimeout(time::Span()); } else { rl.push_back(op); // we already have the object pair, no need to look for it } // XXX there is some query wasteage here. who cares? ExecutionState *unbound = &state; for (ResolutionList::iterator i = rl.begin(), ie = rl.end(); i != ie; ++i) { const MemoryObject *mo = i->first; const ObjectState *os = i->second; ref inBounds = mo->getBoundsCheckPointer(address, bytes); StatePair branches = fork(*unbound, inBounds, true, BranchType::MemOp); ExecutionState *bound = branches.first; // bound can be 0 on failure or overlapped if (bound) { if (isWrite) { if (os->readOnly) { terminateStateOnProgramError(*bound, "memory error: object read only", StateTerminationType::ReadOnly); } else { ObjectState *wos = bound->addressSpace.getWriteable(mo, os); wos->write(mo->getOffsetExpr(address), value); } } else { ref result = os->read(mo->getOffsetExpr(address), type); bindLocal(target, *bound, result); } } unbound = branches.second; if (!unbound) break; } // XXX should we distinguish out of bounds and overlapped cases? if (unbound) { if (incomplete) { terminateStateOnSolverError(*unbound, "Query timed out (resolve)."); } else { if (auto CE = dyn_cast(address)) { std::uintptr_t ptrval = CE->getZExtValue(); auto ptr = reinterpret_cast(ptrval); if (ptrval < MemoryManager::pageSize) { terminateStateOnProgramError( *unbound, "memory error: null page access", StateTerminationType::Ptr, getAddressInfo(*unbound, address)); return; } else if (MemoryManager::isDeterministic) { using kdalloc::LocationInfo; auto li = unbound->heapAllocator.locationInfo(ptr, bytes); if (li == LocationInfo::LI_AllocatedOrQuarantined) { // In case there is no size mismatch (checked by resolving for base // address), the object is quarantined. auto base = reinterpret_cast(li.getBaseAddress()); auto baseExpr = Expr::createPointer(base); ObjectPair op; if (!unbound->addressSpace.resolveOne(baseExpr, op)) { terminateStateOnProgramError( *unbound, "memory error: use after free", StateTerminationType::Ptr, getAddressInfo(*unbound, address)); return; } } } } terminateStateOnProgramError( *unbound, "memory error: out of bound pointer", StateTerminationType::Ptr, getAddressInfo(*unbound, address)); } } } void Executor::executeMakeSymbolic(ExecutionState &state, const MemoryObject *mo, const std::string &name) { // Create a new object state for the memory object (instead of a copy). if (!replayKTest) { // Find a unique name for this array. First try the original name, // or if that fails try adding a unique identifier. unsigned id = 0; std::string uniqueName = name; while (!state.arrayNames.insert(uniqueName).second) { uniqueName = name + "_" + llvm::utostr(++id); } const Array *array = arrayCache.CreateArray(uniqueName, mo->size); bindObjectInState(state, mo, false, array); state.addSymbolic(mo, array); auto found = seedMap.find(&state); if (found != seedMap.end()) { // In seed mode we need to add this as binding for (SeedInfo &si : found->second) { KTestObject *obj = si.getNextInput(mo, NamedSeedMatching); if (!obj) { if (AllowSeedExtension) { std::vector &values = si.assignment.bindings[array]; values = std::vector(mo->size, '\0'); } else /*if (!AllowSeedExtension)*/ { terminateStateOnUserError(state, "ran out of inputs during seeding"); break; } } else { /* The condition below implies obj->numBytes != mo->size */ if ((obj->numBytes < mo->size && !AllowSeedExtension) || (obj->numBytes > mo->size && !AllowSeedTruncation)) { std::stringstream msg; msg << "replace size mismatch: " << mo->name << "[" << mo->size << "]" << " vs " << obj->name << "[" << obj->numBytes << "]" << " in test\n"; terminateStateOnUserError(state, msg.str()); break; } else { /* Either sizes are equal or seed extension/trucation is allowed */ std::vector &values = si.assignment.bindings[array]; values.insert(values.begin(), obj->bytes, obj->bytes + std::min(obj->numBytes, mo->size)); for (unsigned i = obj->numBytes; i < mo->size; ++i) values.push_back('\0'); } } } } } else { ObjectState *os = bindObjectInState(state, mo, false); if (replayPosition >= replayKTest->numObjects) { terminateStateOnUserError(state, "replay count mismatch"); } else { KTestObject *obj = &replayKTest->objects[replayPosition++]; if (obj->numBytes != mo->size) { terminateStateOnUserError(state, "replay size mismatch"); } else { for (unsigned i=0; isize; i++) os->write8(i, obj->bytes[i]); } } } } /***/ void Executor::runFunctionAsMain(Function *f, int argc, char **argv, char **envp) { std::vector > arguments; // force deterministic initialization of memory objects srand(1); srandom(1); MemoryObject *argvMO = 0; // In order to make uclibc happy and be closer to what the system is // doing we lay out the environments at the end of the argv array // (both are terminated by a null). There is also a final terminating // null that uclibc seems to expect, possibly the ELF header? int envc; for (envc=0; envp[envc]; ++envc) ; unsigned NumPtrBytes = Context::get().getPointerWidth() / 8; KFunction *kf = kmodule->functionMap[f]; assert(kf); Function::arg_iterator ai = f->arg_begin(), ae = f->arg_end(); if (ai!=ae) { arguments.push_back(ConstantExpr::alloc(argc, Expr::Int32)); if (++ai!=ae) { Instruction *first = &*(f->begin()->begin()); argvMO = memory->allocate((argc + 1 + envc + 1 + 1) * NumPtrBytes, /*isLocal=*/false, /*isGlobal=*/true, /*state=*/nullptr, /*allocSite=*/first, /*alignment=*/8); if (!argvMO) klee_error("Could not allocate memory for function arguments"); arguments.push_back(argvMO->getBaseExpr()); if (++ai!=ae) { uint64_t envp_start = argvMO->address + (argc+1)*NumPtrBytes; arguments.push_back(Expr::createPointer(envp_start)); if (++ai!=ae) klee_error("invalid main function (expect 0-3 arguments)"); } } } ExecutionState *state = new ExecutionState(kmodule->functionMap[f], memory.get()); if (pathWriter) state->pathOS = pathWriter->open(); if (symPathWriter) state->symPathOS = symPathWriter->open(); if (statsTracker) statsTracker->framePushed(*state, 0); assert(arguments.size() == f->arg_size() && "wrong number of arguments"); for (unsigned i = 0, e = f->arg_size(); i != e; ++i) bindArgument(kf, i, *state, arguments[i]); if (argvMO) { ObjectState *argvOS = bindObjectInState(*state, argvMO, false); for (int i=0; i=argc+1+envc) { // Write NULL pointer argvOS->write(i * NumPtrBytes, Expr::createPointer(0)); } else { char *s = iallocate(len + 1, /*isLocal=*/false, /*isGlobal=*/true, state, /*allocSite=*/state->pc->inst, /*alignment=*/8); if (!arg) klee_error("Could not allocate memory for function arguments"); ObjectState *os = bindObjectInState(*state, arg, false); for (j=0; jwrite8(j, s[j]); // Write pointer to newly allocated and initialised argv/envp c-string argvOS->write(i * NumPtrBytes, arg->getBaseExpr()); } } } initializeGlobals(*state); executionTree = createExecutionTree( *state, userSearcherRequiresInMemoryExecutionTree(), *interpreterHandler); run(*state); executionTree = nullptr; // hack to clear memory objects memory = nullptr; globalObjects.clear(); globalAddresses.clear(); if (statsTracker) statsTracker->done(); } unsigned Executor::getPathStreamID(const ExecutionState &state) { assert(pathWriter); return state.pathOS.getID(); } unsigned Executor::getSymbolicPathStreamID(const ExecutionState &state) { assert(symPathWriter); return state.symPathOS.getID(); } void Executor::getConstraintLog(const ExecutionState &state, std::string &res, Interpreter::LogType logFormat) { switch (logFormat) { case STP: { Query query(state.constraints, ConstantExpr::alloc(0, Expr::Bool)); res = solver->getConstraintLog(query); } break; case KQUERY: { std::string Str; llvm::raw_string_ostream info(Str); ExprPPrinter::printConstraints(info, state.constraints); res = info.str(); } break; case SMTLIB2: { std::string Str; llvm::raw_string_ostream info(Str); ExprSMTLIBPrinter printer; printer.setOutput(info); Query query(state.constraints, ConstantExpr::alloc(0, Expr::Bool)); printer.setQuery(query); printer.generateOutput(); res = info.str(); } break; default: klee_warning("Executor::getConstraintLog() : Log format not supported!"); } } bool Executor::getSymbolicSolution(const ExecutionState &state, std::vector< std::pair > > &res) { solver->setTimeout(coreSolverTimeout); ConstraintSet extendedConstraints(state.constraints); ConstraintManager cm(extendedConstraints); // Go through each byte in every test case and attempt to restrict // it to the constraints contained in cexPreferences. (Note: // usually this means trying to make it an ASCII character (0-127) // and therefore human readable. It is also possible to customize // the preferred constraints. See test/Features/PreferCex.c for // an example) While this process can be very expensive, it can // also make understanding individual test cases much easier. for (auto& pi: state.cexPreferences) { bool mustBeTrue; // Attempt to bound byte to constraints held in cexPreferences bool success = solver->mustBeTrue(extendedConstraints, Expr::createIsZero(pi), mustBeTrue, state.queryMetaData); // If it isn't possible to add the condition without making the entire list // UNSAT, then just continue to the next condition if (!success) break; // If the particular constraint operated on in this iteration through // the loop isn't implied then add it to the list of constraints. if (!mustBeTrue) cm.addConstraint(pi); } std::vector< std::vector > values; std::vector objects; for (unsigned i = 0; i != state.symbolics.size(); ++i) objects.push_back(state.symbolics[i].second); bool success = solver->getInitialValues(extendedConstraints, objects, values, state.queryMetaData); solver->setTimeout(time::Span()); if (!success) { klee_warning("unable to compute initial values (invalid constraints?)!"); ExprPPrinter::printQuery(llvm::errs(), state.constraints, ConstantExpr::alloc(0, Expr::Bool)); return false; } for (unsigned i = 0; i != state.symbolics.size(); ++i) res.push_back(std::make_pair(state.symbolics[i].first->name, values[i])); return true; } void Executor::getCoveredLines(const ExecutionState &state, std::map > &res) { res = state.coveredLines; } void Executor::doImpliedValueConcretization(ExecutionState &state, ref e, ref value) { abort(); // FIXME: Broken until we sort out how to do the write back. if (DebugCheckForImpliedValues) ImpliedValue::checkForImpliedValues(solver->solver.get(), e, value); ImpliedValueList results; ImpliedValue::getImpliedValues(e, value, results); for (ImpliedValueList::iterator it = results.begin(), ie = results.end(); it != ie; ++it) { ReadExpr *re = it->first.get(); if (ConstantExpr *CE = dyn_cast(re->index)) { // FIXME: This is the sole remaining usage of the Array object // variable. Kill me. const MemoryObject *mo = 0; //re->updates.root->object; const ObjectState *os = state.addressSpace.findObject(mo); if (!os) { // object has been free'd, no need to concretize (although as // in other cases we would like to concretize the outstanding // reads, but we have no facility for that yet) } else { assert(!os->readOnly && "not possible? read only object with static read?"); ObjectState *wos = state.addressSpace.getWriteable(mo, os); wos->write(CE, it->second); } } } } Expr::Width Executor::getWidthForLLVMType(llvm::Type *type) const { return kmodule->targetData->getTypeSizeInBits(type); } size_t Executor::getAllocationAlignment(const llvm::Value *allocSite) const { // FIXME: 8 was the previous default. We shouldn't hard code this // and should fetch the default from elsewhere. const size_t forcedAlignment = 8; size_t alignment = 0; llvm::Type *type = NULL; std::string allocationSiteName(allocSite->getName().str()); if (const GlobalObject *GO = dyn_cast(allocSite)) { alignment = GO->getAlignment(); if (const GlobalVariable *globalVar = dyn_cast(GO)) { // All GlobalVariables's have pointer type assert(globalVar->getType()->isPointerTy() && "globalVar's type is not a pointer"); type = globalVar->getValueType(); } else { type = GO->getType(); } } else if (const AllocaInst *AI = dyn_cast(allocSite)) { #if LLVM_VERSION_CODE <= LLVM_VERSION(10, 0) alignment = AI->getAlignment(); #else alignment = AI->getAlign().value(); #endif type = AI->getAllocatedType(); } else if (isa(allocSite) || isa(allocSite)) { // FIXME: Model the semantics of the call to use the right alignment const CallBase &cb = cast(*allocSite); llvm::Function *fn = klee::getDirectCallTarget(cb, /*moduleIsFullyLinked=*/true); if (fn) allocationSiteName = fn->getName().str(); klee_warning_once(fn != NULL ? fn : allocSite, "Alignment of memory from call \"%s\" is not " "modelled. Using alignment of %zu.", allocationSiteName.c_str(), forcedAlignment); alignment = forcedAlignment; } else { llvm_unreachable("Unhandled allocation site"); } if (alignment == 0) { assert(type != NULL); // No specified alignment. Get the alignment for the type. if (type->isSized()) { #if LLVM_VERSION_CODE >= LLVM_VERSION(16, 0) alignment = kmodule->targetData->getPrefTypeAlign(type).value(); #else alignment = kmodule->targetData->getPrefTypeAlignment(type); #endif } else { klee_warning_once(allocSite, "Cannot determine memory alignment for " "\"%s\". Using alignment of %zu.", allocationSiteName.c_str(), forcedAlignment); alignment = forcedAlignment; } } // Currently we require alignment be a power of 2 if (!bits64::isPowerOfTwo(alignment)) { klee_warning_once(allocSite, "Alignment of %zu requested for %s but this " "not supported. Using alignment of %zu", alignment, allocSite->getName().str().c_str(), forcedAlignment); alignment = forcedAlignment; } assert(bits64::isPowerOfTwo(alignment) && "Returned alignment must be a power of two"); return alignment; } void Executor::prepareForEarlyExit() { if (statsTracker) { // Make sure stats get flushed out statsTracker->done(); } } /// Returns the errno location in memory int *Executor::getErrnoLocation(const ExecutionState &state) const { #if !defined(__APPLE__) && !defined(__FreeBSD__) /* From /usr/include/errno.h: it [errno] is a per-thread variable. */ return __errno_location(); #else return __error(); #endif } void Executor::dumpExecutionTree() { if (!::dumpExecutionTree) return; char name[32]; snprintf(name, sizeof(name), "exec_tree%08d.dot", (int)stats::instructions); auto os = interpreterHandler->openOutputFile(name); if (os) executionTree->dump(*os); ::dumpExecutionTree = 0; } void Executor::dumpStates() { if (!::dumpStates) return; auto os = interpreterHandler->openOutputFile("states.txt"); if (os) { for (ExecutionState *es : states) { *os << "(" << es << ","; *os << "["; auto next = es->stack.begin(); ++next; for (auto sfIt = es->stack.begin(), sf_ie = es->stack.end(); sfIt != sf_ie; ++sfIt) { *os << "('" << sfIt->kf->function->getName().str() << "',"; if (next == es->stack.end()) { *os << es->prevPC->info->line << "), "; } else { *os << next->caller->info->line << "), "; ++next; } } *os << "], "; StackFrame &sf = es->stack.back(); uint64_t md2u = computeMinDistToUncovered(es->pc, sf.minDistToUncoveredOnReturn); uint64_t icnt = theStatisticManager->getIndexedValue(stats::instructions, es->pc->info->id); uint64_t cpicnt = sf.callPathNode->statistics.getValue(stats::instructions); *os << "{"; *os << "'depth' : " << es->depth << ", "; *os << "'queryCost' : " << es->queryMetaData.queryCost << ", "; *os << "'coveredNew' : " << es->coveredNew << ", "; *os << "'instsSinceCovNew' : " << es->instsSinceCovNew << ", "; *os << "'md2u' : " << md2u << ", "; *os << "'icnt' : " << icnt << ", "; *os << "'CPicnt' : " << cpicnt << ", "; *os << "}"; *os << ")\n"; } } ::dumpStates = 0; } /// Interpreter *Interpreter::create(LLVMContext &ctx, const InterpreterOptions &opts, InterpreterHandler *ih) { return new Executor(ctx, opts, ih); }