From 8e8732d482e42e363f0f4c51794ed966701371e7 Mon Sep 17 00:00:00 2001 From: Dan Liew Date: Wed, 24 May 2017 16:32:47 +0100 Subject: Implement basic support for vectorized instructions. We use LLVM's Scalarizer pass to remove most vectorized code so that the Executor only needs to support the InsertElement and ExtractElement instructions. This pass was not available in LLVM 3.4 so to support that LLVM version the pass has been back ported. To check that the Executor is not receiving vector operand types that it can't handle assertions have been added. There are a few limitations to this implementation. * The InsertElement and ExtractElement index cannot be symbolic. * There is no support for LLVM < 3.4. --- lib/Core/Executor.cpp | 218 ++++++- lib/Core/Executor.h | 1 + lib/Module/CMakeLists.txt | 1 + lib/Module/KModule.cpp | 9 + lib/Module/Passes.h | 9 + lib/Module/Scalarizer.cpp | 651 +++++++++++++++++++++ test/VectorInstructions/extract_element.c | 36 ++ test/VectorInstructions/extract_element_symbolic.c | 21 + .../floating_point_ops_constant.c | 152 +++++ test/VectorInstructions/insert_element.c | 46 ++ test/VectorInstructions/insert_element_symbolic.c | 22 + test/VectorInstructions/integer_ops_constant.c | 201 +++++++ .../integer_ops_signed_symbolic.c | 130 ++++ .../integer_ops_unsigned_symbolic.c | 129 ++++ test/VectorInstructions/shuffle_element.c | 83 +++ 15 files changed, 1697 insertions(+), 12 deletions(-) create mode 100644 lib/Module/Scalarizer.cpp create mode 100644 test/VectorInstructions/extract_element.c create mode 100644 test/VectorInstructions/extract_element_symbolic.c create mode 100644 test/VectorInstructions/floating_point_ops_constant.c create mode 100644 test/VectorInstructions/insert_element.c create mode 100644 test/VectorInstructions/insert_element_symbolic.c create mode 100644 test/VectorInstructions/integer_ops_constant.c create mode 100644 test/VectorInstructions/integer_ops_signed_symbolic.c create mode 100644 test/VectorInstructions/integer_ops_unsigned_symbolic.c create mode 100644 test/VectorInstructions/shuffle_element.c diff --git a/lib/Core/Executor.cpp b/lib/Core/Executor.cpp index c087b79b..96181a3d 100644 --- a/lib/Core/Executor.cpp +++ b/lib/Core/Executor.cpp @@ -279,6 +279,7 @@ namespace { cl::values( clEnumValN(Executor::Abort, "Abort", "The program crashed"), clEnumValN(Executor::Assert, "Assert", "An assertion was hit"), + clEnumValN(Executor::BadVectorAccess, "BadVectorAccess", "Vector accessed out of bounds"), clEnumValN(Executor::Exec, "Exec", "Trying to execute an unexpected instruction"), clEnumValN(Executor::External, "External", "External objects referenced"), clEnumValN(Executor::Free, "Free", "Freeing invalid memory"), @@ -333,6 +334,7 @@ namespace klee { const char *Executor::TerminateReasonNames[] = { [ Abort ] = "abort", [ Assert ] = "assert", + [ BadVectorAccess ] = "bad_vector_access", [ Exec ] = "exec", [ External ] = "external", [ Free ] = "free", @@ -1102,8 +1104,18 @@ ref Executor::evalConstant(const Constant *c) { } ref res = ConcatExpr::createN(kids.size(), kids.data()); return cast(res); + } else if (const ConstantVector *cv = dyn_cast(c)) { + llvm::SmallVector, 8> kids; + const size_t numOperands = cv->getNumOperands(); + kids.reserve(numOperands); + for (unsigned i = 0; i < numOperands; ++i) { + kids.push_back(evalConstant(cv->getOperand(i))); + } + ref res = ConcatExpr::createN(numOperands, kids.data()); + assert(isa(res) && + "result of constant vector built is not a constant"); + return cast(res); } else { - // Constant{Vector} llvm::report_fatal_error("invalid argument to evalConstant()"); } } @@ -1910,6 +1922,12 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { // Special instructions case Instruction::Select: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert((i->getOperand(1)->getType() == i->getOperand(2)->getType()) && + "true and false operands to Select have mismatching types"); + // NOTE: We are do not check that operands 1 and 2 are not of vector type + // because the scalarizer pass might not remove these. ref cond = eval(ki, 0, state).value; ref tExpr = eval(ki, 1, state).value; ref fExpr = eval(ki, 2, state).value; @@ -1925,6 +1943,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { // Arithmetic / logical case Instruction::Add: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; bindLocal(ki, state, AddExpr::create(left, right)); @@ -1932,6 +1954,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::Sub: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; bindLocal(ki, state, SubExpr::create(left, right)); @@ -1939,6 +1965,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::Mul: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; bindLocal(ki, state, MulExpr::create(left, right)); @@ -1946,6 +1976,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::UDiv: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = UDivExpr::create(left, right); @@ -1954,6 +1988,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::SDiv: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SDivExpr::create(left, right); @@ -1962,14 +2000,22 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::URem: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = URemExpr::create(left, right); bindLocal(ki, state, result); break; } - + case Instruction::SRem: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = SRemExpr::create(left, right); @@ -1978,6 +2024,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::And: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = AndExpr::create(left, right); @@ -1986,6 +2036,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::Or: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = OrExpr::create(left, right); @@ -1994,6 +2048,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::Xor: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = XorExpr::create(left, right); @@ -2002,6 +2060,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::Shl: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = ShlExpr::create(left, right); @@ -2010,6 +2072,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::LShr: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = LShrExpr::create(left, right); @@ -2018,6 +2084,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::AShr: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isIntegerTy() && + "Invalid operand type"); ref left = eval(ki, 0, state).value; ref right = eval(ki, 1, state).value; ref result = AShrExpr::create(left, right); @@ -2028,9 +2098,15 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { // Compare case Instruction::ICmp: { + assert((i->getOperand(0)->getType()->isIntegerTy() || + i->getOperand(0)->getType()->isPointerTy()) && + "Invalid operand type"); + assert((i->getOperand(1)->getType()->isIntegerTy() || + i->getOperand(1)->getType()->isPointerTy()) && + "Invalid operand type"); CmpInst *ci = cast(i); ICmpInst *ii = cast(ci); - + switch(ii->getPredicate()) { case ICmpInst::ICMP_EQ: { ref left = eval(ki, 0, state).value; @@ -2167,6 +2243,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { // Conversion case Instruction::Trunc: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); CastInst *ci = cast(i); ref result = ExtractExpr::create(eval(ki, 0, state).value, 0, @@ -2175,6 +2253,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { break; } case Instruction::ZExt: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); CastInst *ci = cast(i); ref result = ZExtExpr::create(eval(ki, 0, state).value, getWidthForLLVMType(ci->getType())); @@ -2182,6 +2262,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { break; } case Instruction::SExt: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); CastInst *ci = cast(i); ref result = SExtExpr::create(eval(ki, 0, state).value, getWidthForLLVMType(ci->getType())); @@ -2190,13 +2272,17 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::IntToPtr: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); CastInst *ci = cast(i); Expr::Width pType = getWidthForLLVMType(ci->getType()); ref arg = eval(ki, 0, state).value; bindLocal(ki, state, ZExtExpr::create(arg, pType)); break; - } + } case Instruction::PtrToInt: { + assert(i->getOperand(0)->getType()->isPointerTy() && + "Invalid operand type"); CastInst *ci = cast(i); Expr::Width iType = getWidthForLLVMType(ci->getType()); ref arg = eval(ki, 0, state).value; @@ -2213,6 +2299,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { // Floating point instructions case Instruction::FAdd: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isFloatingPointTy() && + "Invalid operand type"); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, @@ -2233,6 +2323,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FSub: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isFloatingPointTy() && + "Invalid operand type"); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, @@ -2250,8 +2344,12 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { bindLocal(ki, state, ConstantExpr::alloc(Res.bitcastToAPInt())); break; } - + case Instruction::FMul: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isFloatingPointTy() && + "Invalid operand type"); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, @@ -2272,6 +2370,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FDiv: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isFloatingPointTy() && + "Invalid operand type"); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, @@ -2292,6 +2394,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FRem: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isFloatingPointTy() && + "Invalid operand type"); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); ref right = toConstant(state, eval(ki, 1, state).value, @@ -2312,6 +2418,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FPTrunc: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); FPTruncInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, @@ -2333,6 +2441,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FPExt: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); FPExtInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, @@ -2353,6 +2463,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FPToUI: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); FPToUIInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, @@ -2374,6 +2486,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FPToSI: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); FPToSIInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, @@ -2395,6 +2509,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::UIToFP: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); UIToFPInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, @@ -2411,6 +2527,8 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::SIToFP: { + assert(i->getOperand(0)->getType()->isIntegerTy() && + "Invalid operand type"); SIToFPInst *fi = cast(i); Expr::Width resultType = getWidthForLLVMType(fi->getType()); ref arg = toConstant(state, eval(ki, 0, state).value, @@ -2427,6 +2545,10 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { } case Instruction::FCmp: { + assert(i->getOperand(0)->getType()->isFloatingPointTy() && + "Invalid operand type"); + assert(i->getOperand(1)->getType()->isFloatingPointTy() && + "Invalid operand type"); FCmpInst *fi = cast(i); ref left = toConstant(state, eval(ki, 0, state).value, "floating point"); @@ -2566,16 +2688,88 @@ void Executor::executeInstruction(ExecutionState &state, KInstruction *ki) { break; } #endif + case Instruction::InsertElement: { + InsertElementInst *iei = cast(i); + ref vec = eval(ki, 0, state).value; + ref newElt = eval(ki, 1, state).value; + ref idx = eval(ki, 2, state).value; + + ConstantExpr *cIdx = dyn_cast(idx); + if (cIdx == NULL) { + terminateStateOnError( + state, "InsertElement, support for symbolic index not implemented", + Unhandled); + return; + } + uint64_t iIdx = cIdx->getZExtValue(); + const llvm::VectorType *vt = iei->getType(); + unsigned EltBits = getWidthForLLVMType(vt->getElementType()); - // Other instructions... - // Unhandled - case Instruction::ExtractElement: - case Instruction::InsertElement: + if (iIdx >= vt->getNumElements()) { + // Out of bounds write + terminateStateOnError(state, "Out of bounds write when inserting element", + BadVectorAccess); + return; + } + + const unsigned elementCount = vt->getNumElements(); + llvm::SmallVector, 8> elems; + elems.reserve(elementCount); + for (unsigned i = 0; i < elementCount; ++i) { + // evalConstant() will use ConcatExpr to build vectors with the + // zero-th element leftmost (most significant bits), followed + // by the next element (second leftmost) and so on. This means + // that we have to adjust the index so we read left to right + // rather than right to left. + unsigned bitOffset = EltBits * (elementCount - i - 1); + elems.push_back(i == iIdx ? newElt + : ExtractExpr::create(vec, bitOffset, EltBits)); + } + + ref Result = ConcatExpr::createN(elementCount, elems.data()); + bindLocal(ki, state, Result); + break; + } + case Instruction::ExtractElement: { + ExtractElementInst *eei = cast(i); + ref vec = eval(ki, 0, state).value; + ref idx = eval(ki, 1, state).value; + + ConstantExpr *cIdx = dyn_cast(idx); + if (cIdx == NULL) { + terminateStateOnError( + state, "ExtractElement, support for symbolic index not implemented", + Unhandled); + return; + } + uint64_t iIdx = cIdx->getZExtValue(); + const llvm::VectorType *vt = eei->getVectorOperandType(); + unsigned EltBits = getWidthForLLVMType(vt->getElementType()); + + if (iIdx >= vt->getNumElements()) { + // Out of bounds read + terminateStateOnError(state, "Out of bounds read when extracting element", + BadVectorAccess); + return; + } + + // evalConstant() will use ConcatExpr to build vectors with the + // zero-th element left most (most significant bits), followed + // by the next element (second left most) and so on. This means + // that we have to adjust the index so we read left to right + // rather than right to left. + unsigned bitOffset = EltBits*(vt->getNumElements() - iIdx -1); + ref Result = ExtractExpr::create(vec, bitOffset, EltBits); + bindLocal(ki, state, Result); + break; + } case Instruction::ShuffleVector: - terminateStateOnError(state, "XXX vector instructions unhandled", - Unhandled); + // Should never happen due to Scalarizer pass removing ShuffleVector + // instructions. + terminateStateOnExecError(state, "Unexpected ShuffleVector instruction"); break; - + // Other instructions... + // Unhandled default: terminateStateOnExecError(state, "illegal instruction"); break; diff --git a/lib/Core/Executor.h b/lib/Core/Executor.h index c3f6e705..b3bb6864 100644 --- a/lib/Core/Executor.h +++ b/lib/Core/Executor.h @@ -105,6 +105,7 @@ public: enum TerminateReason { Abort, Assert, + BadVectorAccess, Exec, External, Free, diff --git a/lib/Module/CMakeLists.txt b/lib/Module/CMakeLists.txt index 006443a9..fc481780 100644 --- a/lib/Module/CMakeLists.txt +++ b/lib/Module/CMakeLists.txt @@ -17,6 +17,7 @@ klee_add_component(kleeModule Optimize.cpp PhiCleaner.cpp RaiseAsm.cpp + Scalarizer.cpp ) set(LLVM_COMPONENTS diff --git a/lib/Module/KModule.cpp b/lib/Module/KModule.cpp index 005c8869..bee9a0b4 100644 --- a/lib/Module/KModule.cpp +++ b/lib/Module/KModule.cpp @@ -307,6 +307,15 @@ void KModule::prepare(const Interpreter::ModuleOptions &opts, // module. LegacyLLVMPassManagerTy pm; pm.add(new RaiseAsmPass()); +#if LLVM_VERSION_CODE >= LLVM_VERSION(3,4) + // This pass will scalarize as much code as possible so that the Executor + // does not need to handle operands of vector type for most instructions + // other than InsertElementInst and ExtractElementInst. + // + // NOTE: Must come before division/overshift checks because those passes + // don't know how to handle vector instructions. + pm.add(createScalarizerPass()); +#endif if (opts.CheckDivZero) pm.add(new DivCheckPass()); if (opts.CheckOvershift) pm.add(new OvershiftCheckPass()); // FIXME: This false here is to work around a bug in diff --git a/lib/Module/Passes.h b/lib/Module/Passes.h index 4f1a1453..293766c1 100644 --- a/lib/Module/Passes.h +++ b/lib/Module/Passes.h @@ -180,6 +180,15 @@ private: llvm::BasicBlock *defaultBlock); }; +// This is the interface to a back-ported LLVM pass. +// Newer versions of LLVM already have this in-tree +// and we are not supporting vector instructions for +// LLVM 2.9. Therefore this interface is only needed for +// LLVM 3.4. +#if LLVM_VERSION_CODE == LLVM_VERSION(3,4) +llvm::FunctionPass *createScalarizerPass(); +#endif + } #endif diff --git a/lib/Module/Scalarizer.cpp b/lib/Module/Scalarizer.cpp new file mode 100644 index 00000000..0d8e1f48 --- /dev/null +++ b/lib/Module/Scalarizer.cpp @@ -0,0 +1,651 @@ +//===--- Scalarizer.cpp - Scalarize vector operations ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass converts vector operations into scalar operations, in order +// to expose optimization opportunities on the individual scalar operations. +// It is mainly intended for targets that do not have vector units, but it +// may also be useful for revectorizing code to different vector widths. +// +//===----------------------------------------------------------------------===// +#include "klee/Config/Version.h" + +// This is taken from r195471 in LLVM. This unfortunately was introduced just +// after LLVM branched for 3.4 so it has been copied into KLEE's source tree. +// We only use this for LLVM 3.4 because newer LLVM's have this pass in-tree. +#if LLVM_VERSION_CODE == LLVM_VERSION(3,4) + +#define DEBUG_TYPE "scalarizer" +#include "llvm/ADT/STLExtras.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/InstVisitor.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" + +using namespace llvm; + +namespace { +// Used to store the scattered form of a vector. +typedef SmallVector ValueVector; + +// Used to map a vector Value to its scattered form. We use std::map +// because we want iterators to persist across insertion and because the +// values are relatively large. +typedef std::map ScatterMap; + +// Lists Instructions that have been replaced with scalar implementations, +// along with a pointer to their scattered forms. +typedef SmallVector, 16> GatherList; + +// Provides a very limited vector-like interface for lazily accessing one +// component of a scattered vector or vector pointer. +class Scatterer { +public: + // Scatter V into Size components. If new instructions are needed, + // insert them before BBI in BB. If Cache is nonnull, use it to cache + // the results. + Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, + ValueVector *cachePtr = 0); + + // Return component I, creating a new Value for it if necessary. + Value *operator[](unsigned I); + + // Return the number of components. + unsigned size() const { return Size; } + +private: + BasicBlock *BB; + BasicBlock::iterator BBI; + Value *V; + ValueVector *CachePtr; + PointerType *PtrTy; + ValueVector Tmp; + unsigned Size; +}; + +// FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp +// called Name that compares X and Y in the same way as FCI. +struct FCmpSplitter { + FCmpSplitter(FCmpInst &fci) : FCI(fci) {} + Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, + const Twine &Name) const { + return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name); + } + FCmpInst &FCI; +}; + +// ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp +// called Name that compares X and Y in the same way as ICI. +struct ICmpSplitter { + ICmpSplitter(ICmpInst &ici) : ICI(ici) {} + Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, + const Twine &Name) const { + return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name); + } + ICmpInst &ICI; +}; + +// BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create +// a binary operator like BO called Name with operands X and Y. +struct BinarySplitter { + BinarySplitter(BinaryOperator &bo) : BO(bo) {} + Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, + const Twine &Name) const { + return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name); + } + BinaryOperator &BO; +}; + +// GEPSpliiter()(Builder, X, Y, Name) uses Builder to create +// a single GEP called Name with operands X and Y. +struct GEPSplitter { + GEPSplitter() {} + Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, + const Twine &Name) const { + return Builder.CreateGEP(Op0, Op1, Name); + } +}; + +// Information about a load or store that we're scalarizing. +struct VectorLayout { + VectorLayout() : VecTy(0), ElemTy(0), VecAlign(0), ElemSize(0) {} + + // Return the alignment of element I. + uint64_t getElemAlign(unsigned I) { + return MinAlign(VecAlign, I * ElemSize); + } + + // The type of the vector. + VectorType *VecTy; + + // The type of each element. + Type *ElemTy; + + // The alignment of the vector. + uint64_t VecAlign; + + // The size of each element. + uint64_t ElemSize; +}; + +class Scalarizer : public FunctionPass, + public InstVisitor { +public: + static char ID; + + Scalarizer() : + FunctionPass(ID) { + // HACK: + //initializeScalarizerPass(*PassRegistry::getPassRegistry()); + } + + virtual bool doInitialization(Module &M); + virtual bool runOnFunction(Function &F); + + // InstVisitor methods. They return true if the instruction was scalarized, + // false if nothing changed. + bool visitInstruction(Instruction &) { return false; } + bool visitSelectInst(SelectInst &SI); + bool visitICmpInst(ICmpInst &); + bool visitFCmpInst(FCmpInst &); + bool visitBinaryOperator(BinaryOperator &); + bool visitGetElementPtrInst(GetElementPtrInst &); + bool visitCastInst(CastInst &); + bool visitBitCastInst(BitCastInst &); + bool visitShuffleVectorInst(ShuffleVectorInst &); + bool visitPHINode(PHINode &); + bool visitLoadInst(LoadInst &); + bool visitStoreInst(StoreInst &); + +private: + Scatterer scatter(Instruction *, Value *); + void gather(Instruction *, const ValueVector &); + bool canTransferMetadata(unsigned Kind); + void transferMetadata(Instruction *, const ValueVector &); + bool getVectorLayout(Type *, unsigned, VectorLayout &); + bool finish(); + + template bool splitBinary(Instruction &, const T &); + + ScatterMap Scattered; + GatherList Gathered; + unsigned ParallelLoopAccessMDKind; + const DataLayout *TDL; +}; + +char Scalarizer::ID = 0; +} // end anonymous namespace + +bool ScalarizeLoadStore = true; // HACK +/* +// This is disabled by default because having separate loads and stores makes +// it more likely that the -combiner-alias-analysis limits will be reached. +static cl::opt ScalarizeLoadStore + ("scalarize-load-store", cl::Hidden, cl::init(false), + cl::desc("Allow the scalarizer pass to scalarize loads and store")); + +INITIALIZE_PASS(Scalarizer, "scalarizer", "Scalarize vector operations", + false, false) +*/ + +Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, + ValueVector *cachePtr) + : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) { + Type *Ty = V->getType(); + PtrTy = dyn_cast(Ty); + if (PtrTy) + Ty = PtrTy->getElementType(); + Size = Ty->getVectorNumElements(); + if (!CachePtr) + Tmp.resize(Size, 0); + else if (CachePtr->empty()) + CachePtr->resize(Size, 0); + else + assert(Size == CachePtr->size() && "Inconsistent vector sizes"); +} + +// Return component I, creating a new Value for it if necessary. +Value *Scatterer::operator[](unsigned I) { + ValueVector &CV = (CachePtr ? *CachePtr : Tmp); + // Try to reuse a previous value. + if (CV[I]) + return CV[I]; + IRBuilder<> Builder(BB, BBI); + if (PtrTy) { + if (!CV[0]) { + Type *Ty = + PointerType::get(PtrTy->getElementType()->getVectorElementType(), + PtrTy->getAddressSpace()); + CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0"); + } + if (I != 0) + CV[I] = Builder.CreateConstGEP1_32(CV[0], I, + V->getName() + ".i" + Twine(I)); + } else { + // Search through a chain of InsertElementInsts looking for element I. + // Record other elements in the cache. The new V is still suitable + // for all uncached indices. + for (;;) { + InsertElementInst *Insert = dyn_cast(V); + if (!Insert) + break; + ConstantInt *Idx = dyn_cast(Insert->getOperand(2)); + if (!Idx) + break; + unsigned J = Idx->getZExtValue(); + CV[J] = Insert->getOperand(1); + V = Insert->getOperand(0); + if (I == J) + return CV[J]; + } + CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I), + V->getName() + ".i" + Twine(I)); + } + return CV[I]; +} + +bool Scalarizer::doInitialization(Module &M) { + ParallelLoopAccessMDKind = + M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); + return false; +} + +bool Scalarizer::runOnFunction(Function &F) { + TDL = getAnalysisIfAvailable(); + for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) { + BasicBlock *BB = BBI; + for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { + Instruction *I = II; + bool Done = visit(I); + ++II; + if (Done && I->getType()->isVoidTy()) + I->eraseFromParent(); + } + } + return finish(); +} + +// Return a scattered form of V that can be accessed by Point. V must be a +// vector or a pointer to a vector. +Scatterer Scalarizer::scatter(Instruction *Point, Value *V) { + if (Argument *VArg = dyn_cast(V)) { + // Put the scattered form of arguments in the entry block, + // so that it can be used everywhere. + Function *F = VArg->getParent(); + BasicBlock *BB = &F->getEntryBlock(); + return Scatterer(BB, BB->begin(), V, &Scattered[V]); + } + if (Instruction *VOp = dyn_cast(V)) { + // Put the scattered form of an instruction directly after the + // instruction. + BasicBlock *BB = VOp->getParent(); + return Scatterer(BB, llvm::next(BasicBlock::iterator(VOp)), + V, &Scattered[V]); + } + // In the fallback case, just put the scattered before Point and + // keep the result local to Point. + return Scatterer(Point->getParent(), Point, V); +} + +// Replace Op with the gathered form of the components in CV. Defer the +// deletion of Op and creation of the gathered form to the end of the pass, +// so that we can avoid creating the gathered form if all uses of Op are +// replaced with uses of CV. +void Scalarizer::gather(Instruction *Op, const ValueVector &CV) { + // Since we're not deleting Op yet, stub out its operands, so that it + // doesn't make anything live unnecessarily. + for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) + Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType())); + + transferMetadata(Op, CV); + + // If we already have a scattered form of Op (created from ExtractElements + // of Op itself), replace them with the new form. + ValueVector &SV = Scattered[Op]; + if (!SV.empty()) { + for (unsigned I = 0, E = SV.size(); I != E; ++I) { + Instruction *Old = cast(SV[I]); + CV[I]->takeName(Old); + Old->replaceAllUsesWith(CV[I]); + Old->eraseFromParent(); + } + } + SV = CV; + Gathered.push_back(GatherList::value_type(Op, &SV)); +} + +// Return true if it is safe to transfer the given metadata tag from +// vector to scalar instructions. +bool Scalarizer::canTransferMetadata(unsigned Tag) { + return (Tag == LLVMContext::MD_tbaa + || Tag == LLVMContext::MD_fpmath + || Tag == LLVMContext::MD_tbaa_struct + || Tag == LLVMContext::MD_invariant_load + || Tag == ParallelLoopAccessMDKind); +} + +// Transfer metadata from Op to the instructions in CV if it is known +// to be safe to do so. +void Scalarizer::transferMetadata(Instruction *Op, const ValueVector &CV) { + SmallVector, 4> MDs; + Op->getAllMetadataOtherThanDebugLoc(MDs); + for (unsigned I = 0, E = CV.size(); I != E; ++I) { + if (Instruction *New = dyn_cast(CV[I])) { + for (SmallVectorImpl >::iterator + MI = MDs.begin(), ME = MDs.end(); MI != ME; ++MI) + if (canTransferMetadata(MI->first)) + New->setMetadata(MI->first, MI->second); + New->setDebugLoc(Op->getDebugLoc()); + } + } +} + +// Try to fill in Layout from Ty, returning true on success. Alignment is +// the alignment of the vector, or 0 if the ABI default should be used. +bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment, + VectorLayout &Layout) { + if (!TDL) + return false; + + // Make sure we're dealing with a vector. + Layout.VecTy = dyn_cast(Ty); + if (!Layout.VecTy) + return false; + + // Check that we're dealing with full-byte elements. + Layout.ElemTy = Layout.VecTy->getElementType(); + if (TDL->getTypeSizeInBits(Layout.ElemTy) != + TDL->getTypeStoreSizeInBits(Layout.ElemTy)) + return false; + + if (Alignment) + Layout.VecAlign = Alignment; + else + Layout.VecAlign = TDL->getABITypeAlignment(Layout.VecTy); + Layout.ElemSize = TDL->getTypeStoreSize(Layout.ElemTy); + return true; +} + +// Scalarize two-operand instruction I, using Split(Builder, X, Y, Name) +// to create an instruction like I with operands X and Y and name Name. +template +bool Scalarizer::splitBinary(Instruction &I, const Splitter &Split) { + VectorType *VT = dyn_cast(I.getType()); + if (!VT) + return false; + + unsigned NumElems = VT->getNumElements(); + IRBuilder<> Builder(I.getParent(), &I); + Scatterer Op0 = scatter(&I, I.getOperand(0)); + Scatterer Op1 = scatter(&I, I.getOperand(1)); + assert(Op0.size() == NumElems && "Mismatched binary operation"); + assert(Op1.size() == NumElems && "Mismatched binary operation"); + ValueVector Res; + Res.resize(NumElems); + for (unsigned Elem = 0; Elem < NumElems; ++Elem) + Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem], + I.getName() + ".i" + Twine(Elem)); + gather(&I, Res); + return true; +} + +bool Scalarizer::visitSelectInst(SelectInst &SI) { + VectorType *VT = dyn_cast(SI.getType()); + if (!VT) + return false; + + unsigned NumElems = VT->getNumElements(); + IRBuilder<> Builder(SI.getParent(), &SI); + Scatterer Op1 = scatter(&SI, SI.getOperand(1)); + Scatterer Op2 = scatter(&SI, SI.getOperand(2)); + assert(Op1.size() == NumElems && "Mismatched select"); + assert(Op2.size() == NumElems && "Mismatched select"); + ValueVector Res; + Res.resize(NumElems); + + if (SI.getOperand(0)->getType()->isVectorTy()) { + Scatterer Op0 = scatter(&SI, SI.getOperand(0)); + assert(Op0.size() == NumElems && "Mismatched select"); + for (unsigned I = 0; I < NumElems; ++I) + Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I], + SI.getName() + ".i" + Twine(I)); + } else { + Value *Op0 = SI.getOperand(0); + for (unsigned I = 0; I < NumElems; ++I) + Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I], + SI.getName() + ".i" + Twine(I)); + } + gather(&SI, Res); + return true; +} + +bool Scalarizer::visitICmpInst(ICmpInst &ICI) { + return splitBinary(ICI, ICmpSplitter(ICI)); +} + +bool Scalarizer::visitFCmpInst(FCmpInst &FCI) { + return splitBinary(FCI, FCmpSplitter(FCI)); +} + +bool Scalarizer::visitBinaryOperator(BinaryOperator &BO) { + return splitBinary(BO, BinarySplitter(BO)); +} + +bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) { + return splitBinary(GEPI, GEPSplitter()); +} + +bool Scalarizer::visitCastInst(CastInst &CI) { + VectorType *VT = dyn_cast(CI.getDestTy()); + if (!VT) + return false; + + unsigned NumElems = VT->getNumElements(); + IRBuilder<> Builder(CI.getParent(), &CI); + Scatterer Op0 = scatter(&CI, CI.getOperand(0)); + assert(Op0.size() == NumElems && "Mismatched cast"); + ValueVector Res; + Res.resize(NumElems); + for (unsigned I = 0; I < NumElems; ++I) + Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(), + CI.getName() + ".i" + Twine(I)); + gather(&CI, Res); + return true; +} + +bool Scalarizer::visitBitCastInst(BitCastInst &BCI) { + VectorType *DstVT = dyn_cast(BCI.getDestTy()); + VectorType *SrcVT = dyn_cast(BCI.getSrcTy()); + if (!DstVT || !SrcVT) + return false; + + unsigned DstNumElems = DstVT->getNumElements(); + unsigned SrcNumElems = SrcVT->getNumElements(); + IRBuilder<> Builder(BCI.getParent(), &BCI); + Scatterer Op0 = scatter(&BCI, BCI.getOperand(0)); + ValueVector Res; + Res.resize(DstNumElems); + + if (DstNumElems == SrcNumElems) { + for (unsigned I = 0; I < DstNumElems; ++I) + Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(), + BCI.getName() + ".i" + Twine(I)); + } else if (DstNumElems > SrcNumElems) { + // -> . Convert each t1 to and copy the + // individual elements to the destination. + unsigned FanOut = DstNumElems / SrcNumElems; + Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut); + unsigned ResI = 0; + for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) { + Value *V = Op0[Op0I]; + Instruction *VI; + // Look through any existing bitcasts before converting to . + // In the best case, the resulting conversion might be a no-op. + while ((VI = dyn_cast(V)) && + VI->getOpcode() == Instruction::BitCast) + V = VI->getOperand(0); + V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast"); + Scatterer Mid = scatter(&BCI, V); + for (unsigned MidI = 0; MidI < FanOut; ++MidI) + Res[ResI++] = Mid[MidI]; + } + } else { + // -> . Convert each group of into a t2. + unsigned FanIn = SrcNumElems / DstNumElems; + Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn); + unsigned Op0I = 0; + for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) { + Value *V = UndefValue::get(MidTy); + for (unsigned MidI = 0; MidI < FanIn; ++MidI) + V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI), + BCI.getName() + ".i" + Twine(ResI) + + ".upto" + Twine(MidI)); + Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(), + BCI.getName() + ".i" + Twine(ResI)); + } + } + gather(&BCI, Res); + return true; +} + +bool Scalarizer::visitShuffleVectorInst(ShuffleVectorInst &SVI) { + VectorType *VT = dyn_cast(SVI.getType()); + if (!VT) + return false; + + unsigned NumElems = VT->getNumElements(); + Scatterer Op0 = scatter(&SVI, SVI.getOperand(0)); + Scatterer Op1 = scatter(&SVI, SVI.getOperand(1)); + ValueVector Res; + Res.resize(NumElems); + + for (unsigned I = 0; I < NumElems; ++I) { + int Selector = SVI.getMaskValue(I); + if (Selector < 0) + Res[I] = UndefValue::get(VT->getElementType()); + else if (unsigned(Selector) < Op0.size()) + Res[I] = Op0[Selector]; + else + Res[I] = Op1[Selector - Op0.size()]; + } + gather(&SVI, Res); + return true; +} + +bool Scalarizer::visitPHINode(PHINode &PHI) { + VectorType *VT = dyn_cast(PHI.getType()); + if (!VT) + return false; + + unsigned NumElems = VT->getNumElements(); + IRBuilder<> Builder(PHI.getParent(), &PHI); + ValueVector Res; + Res.resize(NumElems); + + unsigned NumOps = PHI.getNumOperands(); + for (unsigned I = 0; I < NumElems; ++I) + Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps, + PHI.getName() + ".i" + Twine(I)); + + for (unsigned I = 0; I < NumOps; ++I) { + Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I)); + BasicBlock *IncomingBlock = PHI.getIncomingBlock(I); + for (unsigned J = 0; J < NumElems; ++J) + cast(Res[J])->addIncoming(Op[J], IncomingBlock); + } + gather(&PHI, Res); + return true; +} + +bool Scalarizer::visitLoadInst(LoadInst &LI) { + if (!ScalarizeLoadStore) + return false; + if (!LI.isSimple()) + return false; + + VectorLayout Layout; + if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout)) + return false; + + unsigned NumElems = Layout.VecTy->getNumElements(); + IRBuilder<> Builder(LI.getParent(), &LI); + Scatterer Ptr = scatter(&LI, LI.getPointerOperand()); + ValueVector Res; + Res.resize(NumElems); + + for (unsigned I = 0; I < NumElems; ++I) + Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I), + LI.getName() + ".i" + Twine(I)); + gather(&LI, Res); + return true; +} + +bool Scalarizer::visitStoreInst(StoreInst &SI) { + if (!ScalarizeLoadStore) + return false; + if (!SI.isSimple()) + return false; + + VectorLayout Layout; + Value *FullValue = SI.getValueOperand(); + if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout)) + return false; + + unsigned NumElems = Layout.VecTy->getNumElements(); + IRBuilder<> Builder(SI.getParent(), &SI); + Scatterer Ptr = scatter(&SI, SI.getPointerOperand()); + Scatterer Val = scatter(&SI, FullValue); + + ValueVector Stores; + Stores.resize(NumElems); + for (unsigned I = 0; I < NumElems; ++I) { + unsigned Align = Layout.getElemAlign(I); + Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align); + } + transferMetadata(&SI, Stores); + return true; +} + +// Delete the instructions that we scalarized. If a full vector result +// is still needed, recreate it using InsertElements. +bool Scalarizer::finish() { + if (Gathered.empty()) + return false; + for (GatherList::iterator GMI = Gathered.begin(), GME = Gathered.end(); + GMI != GME; ++GMI) { + Instruction *Op = GMI->first; + ValueVector &CV = *GMI->second; + if (!Op->use_empty()) { + // The value is still needed, so recreate it using a series of + // InsertElements. + Type *Ty = Op->getType(); + Value *Res = UndefValue::get(Ty); + unsigned Count = Ty->getVectorNumElements(); + IRBuilder<> Builder(Op->getParent(), Op); + for (unsigned I = 0; I < Count; ++I) + Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I), + Op->getName() + ".upto" + Twine(I)); + Res->takeName(Op); + Op->replaceAllUsesWith(Res); + } + Op->eraseFromParent(); + } + Gathered.clear(); + Scattered.clear(); + return true; +} + +namespace klee { + llvm::FunctionPass *createScalarizerPass() { + return new Scalarizer(); + } +} + +#endif diff --git a/test/VectorInstructions/extract_element.c b/test/VectorInstructions/extract_element.c new file mode 100644 index 00000000..008691e3 --- /dev/null +++ b/test/VectorInstructions/extract_element.c @@ -0,0 +1,36 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// constant folded away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false %t1.bc > %t.stdout.log 2> %t.stderr.log +// RUN: FileCheck -check-prefix=CHECK-STDOUT -input-file=%t.stdout.log %s +// RUN: FileCheck -check-prefix=CHECK-STDERR -input-file=%t.stderr.log %s +#include +#include +#include + +typedef uint32_t v4ui __attribute__ ((vector_size (16))); +int main() { + v4ui f = { 0, 1, 2, 3 }; + // Performing these reads should be ExtractElement instructions + // CHECK-STDOUT: f[0]=0 + printf("f[0]=%u\n", f[0]); + // CHECK-STDOUT-NEXT: f[1]=1 + printf("f[1]=%u\n", f[1]); + // CHECK-STDOUT-NEXT: f[2]=2 + printf("f[2]=%u\n", f[2]); + // CHECK-STDOUT-NEXT: f[3]=3 + printf("f[3]=%u\n", f[3]); + // CHECK-STDERR-NOT: extract_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[0] == 0); + // CHECK-STDERR-NOT: extract_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[1] == 1); + // CHECK-STDERR-NOT: extract_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[2] == 2); + // CHECK-STDERR-NOT: extract_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[3] == 3); + // CHECK-STDERR: extract_element.c:[[@LINE+1]]: Out of bounds read when extracting element + printf("f[4]=%u\n", f[4]); // Out of bounds + return 0; +} diff --git a/test/VectorInstructions/extract_element_symbolic.c b/test/VectorInstructions/extract_element_symbolic.c new file mode 100644 index 00000000..f75bad62 --- /dev/null +++ b/test/VectorInstructions/extract_element_symbolic.c @@ -0,0 +1,21 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// RUN: not %klee --output-dir=%t.klee-out --exit-on-error %t1.bc > %t.log 2>&1 +// RUN: FileCheck -input-file=%t.log %s +#include "klee/klee.h" +#include +#include +#include + +typedef uint32_t v4ui __attribute__ ((vector_size (16))); +int main() { + v4ui f = { 0, 1, 2, 3 }; + unsigned index = 0; + klee_make_symbolic(&index, sizeof(unsigned), "index"); + // Performing read should be ExtractElement instruction. + // For now this is an expected limitation. + // CHECK: extract_element_symbolic.c:[[@LINE+1]]: ExtractElement, support for symbolic index not implemented + uint32_t readValue = f[index]; + return readValue % 255; +} diff --git a/test/VectorInstructions/floating_point_ops_constant.c b/test/VectorInstructions/floating_point_ops_constant.c new file mode 100644 index 00000000..4e567a30 --- /dev/null +++ b/test/VectorInstructions/floating_point_ops_constant.c @@ -0,0 +1,152 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// constant folded away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false --exit-on-error %t1.bc +#include +#include +#include + +typedef float v4float __attribute__((vector_size(16))); +typedef double v4double __attribute__((vector_size(32))); + +#define ASSERT_EL(C, OP, A, B, INDEX) assert(C[INDEX] == (A[INDEX] OP B[INDEX])) +#define ASSERT_ELV4(C, OP, A, B) \ + do { \ + ASSERT_EL(C, OP, A, B, 0); \ + ASSERT_EL(C, OP, A, B, 1); \ + ASSERT_EL(C, OP, A, B, 2); \ + ASSERT_EL(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TRUTH(C, OP, A, B, INDEX) \ + assert(C[INDEX] ? (A[INDEX] OP B[INDEX]) : (!(A[INDEX] OP B[INDEX]))) +#define ASSERT_EL_TRUTH_V4(C, OP, A, B) \ + do { \ + ASSERT_EL_TRUTH(C, OP, A, B, 0); \ + ASSERT_EL_TRUTH(C, OP, A, B, 1); \ + ASSERT_EL_TRUTH(C, OP, A, B, 2); \ + ASSERT_EL_TRUTH(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, INDEX) \ + assert(C[INDEX] == (CONDITION ? A[INDEX] : B[INDEX])) +#define ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(C, CONDITION, A, B) \ + do { \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 0); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 1); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 2); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 3); \ + } while (0); + +int main() { + // Float tests + { + v4float a = {1.0f, 2.5f, -3.5f, 4.0f}; + v4float b = {-10.0f, 20.2f, -30.5f, 40.1f}; + + // Test addition + v4float c = a + b; + ASSERT_ELV4(c, +, a, b); + + // Test subtraction + c = b - a; + ASSERT_ELV4(c, -, b, a); + + // Test multiplication + c = a * b; + ASSERT_ELV4(c, *, a, b); + + // Test division + c = a / b; + ASSERT_ELV4(c, /, a, b); + + // NOTE: Can't use `ASSERT_ELV4` due to semantics + // of GCC vector extensions. See + // https://gcc.gnu.org/onlinedocs/gcc/Vector-Extensions.html + + // Test == + c = a == b; + ASSERT_EL_TRUTH_V4(c, ==, a, b); + + // Test < + c = a < b; + ASSERT_EL_TRUTH_V4(c, <, a, b); + + // Test <= + c = a <= b; + ASSERT_EL_TRUTH_V4(c, <=, a, b); + + // Test > + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test >= + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test != + c = a != b; + ASSERT_EL_TRUTH_V4(c, !=, a, b); + + // Test ternary operator + c = 0 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 0, a, b); + c = 1 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 1, a, b); + } + + // double tests + { + v4double a = {1.0, 2.5, -3.5, 4.0}; + v4double b = {-10.0, 20.2, -30.5, 40.1}; + + // Test addition + v4double c = a + b; + ASSERT_ELV4(c, +, a, b); + + // Test subtraction + c = b - a; + ASSERT_ELV4(c, -, b, a); + + // Test multiplication + c = a * b; + ASSERT_ELV4(c, *, a, b); + + // Test division + c = a / b; + ASSERT_ELV4(c, /, a, b); + + // Test == + c = a == b; + ASSERT_EL_TRUTH_V4(c, ==, a, b); + + // Test < + c = a < b; + ASSERT_EL_TRUTH_V4(c, <, a, b); + + // Test <= + c = a <= b; + ASSERT_EL_TRUTH_V4(c, <=, a, b); + + // Test > + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test >= + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test != + c = a != b; + ASSERT_EL_TRUTH_V4(c, !=, a, b); + + // Test ternary operator + c = 0 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 0, a, b); + c = 1 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 1, a, b); + } + return 0; +} diff --git a/test/VectorInstructions/insert_element.c b/test/VectorInstructions/insert_element.c new file mode 100644 index 00000000..8397dceb --- /dev/null +++ b/test/VectorInstructions/insert_element.c @@ -0,0 +1,46 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// constant folded away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false %t1.bc > %t.stdout.log 2> %t.stderr.log +// RUN: FileCheck -check-prefix=CHECK-STDOUT -input-file=%t.stdout.log %s +// RUN: FileCheck -check-prefix=CHECK-STDERR -input-file=%t.stderr.log %s +#include +#include +#include + +typedef uint32_t v4ui __attribute__ ((vector_size (16))); +int main() { + v4ui f = { 0, 1, 2, 3 }; + klee_print_expr("f:=", f); + // Performing these writes should be InsertElement instructions + f[0] = 255; + klee_print_expr("f after write to [0]", f); + f[1] = 128; + klee_print_expr("f after write to [1]", f); + f[2] = 1; + klee_print_expr("f after write to [2]", f); + f[3] = 19; + klee_print_expr("f after write to [3]", f); + // CHECK-STDOUT: f[0]=255 + printf("f[0]=%u\n", f[0]); + // CHECK-STDOUT: f[1]=128 + printf("f[1]=%u\n", f[1]); + // CHECK-STDOUT: f[2]=1 + printf("f[2]=%u\n", f[2]); + // CHECK-STDOUT: f[3]=19 + printf("f[3]=%u\n", f[3]); + // CHECK-STDERR-NOT: insert_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[0] == 255); + // CHECK-STDERR-NOT: insert_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[1] == 128); + // CHECK-STDERR-NOT: insert_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[2] == 1); + // CHECK-STDERR-NOT: insert_element.c:[[@LINE+1]]: ASSERTION FAIL + assert(f[3] == 19); + + // CHECK-STDERR: insert_element.c:[[@LINE+1]]: Out of bounds write when inserting element + f[4] = 255; // Out of bounds write + return 0; +} diff --git a/test/VectorInstructions/insert_element_symbolic.c b/test/VectorInstructions/insert_element_symbolic.c new file mode 100644 index 00000000..76df899c --- /dev/null +++ b/test/VectorInstructions/insert_element_symbolic.c @@ -0,0 +1,22 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// RUN: not %klee --output-dir=%t.klee-out --exit-on-error %t1.bc > %t.log 2>&1 +// RUN: FileCheck -input-file=%t.log %s +#include "klee/klee.h" +#include +#include +#include + +typedef uint32_t v4ui __attribute__ ((vector_size (16))); +int main() { + v4ui f = { 0, 1, 2, 3 }; + unsigned index = 0; + klee_make_symbolic(&index, sizeof(unsigned), "index"); + // Performing write should be InsertElement instructions. + // For now this is an expected limitation. + // CHECK: insert_element_symbolic.c:[[@LINE+1]]: InsertElement, support for symbolic index not implemented + f[index] = 255; + klee_print_expr("f after write to [0]", f); + return 0; +} diff --git a/test/VectorInstructions/integer_ops_constant.c b/test/VectorInstructions/integer_ops_constant.c new file mode 100644 index 00000000..189ad4ee --- /dev/null +++ b/test/VectorInstructions/integer_ops_constant.c @@ -0,0 +1,201 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// constant folded away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false --exit-on-error %t1.bc +#include +#include +#include + +typedef uint32_t v4ui __attribute__((vector_size(16))); +typedef int32_t v4si __attribute__((vector_size(16))); + +#define ASSERT_EL(C, OP, A, B, INDEX) assert(C[INDEX] == (A[INDEX] OP B[INDEX])) +#define ASSERT_ELV4(C, OP, A, B) \ + do { \ + ASSERT_EL(C, OP, A, B, 0); \ + ASSERT_EL(C, OP, A, B, 1); \ + ASSERT_EL(C, OP, A, B, 2); \ + ASSERT_EL(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TRUTH(C, OP, A, B, INDEX) \ + assert(C[INDEX] ? (A[INDEX] OP B[INDEX]) : (!(A[INDEX] OP B[INDEX]))) +#define ASSERT_EL_TRUTH_V4(C, OP, A, B) \ + do { \ + ASSERT_EL_TRUTH(C, OP, A, B, 0); \ + ASSERT_EL_TRUTH(C, OP, A, B, 1); \ + ASSERT_EL_TRUTH(C, OP, A, B, 2); \ + ASSERT_EL_TRUTH(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, INDEX) \ + assert(C[INDEX] == (CONDITION ? A[INDEX] : B[INDEX])) +#define ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(C, CONDITION, A, B) \ + do { \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 0); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 1); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 2); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 3); \ + } while (0); + +int main() { + // Unsigned tests + { + v4ui a = {0, 1, 2, 3}; + v4ui b = {10, 20, 30, 3}; + + // Test addition + v4ui c = a + b; + ASSERT_ELV4(c, +, a, b); + + // Test subtraction + c = b - a; + ASSERT_ELV4(c, -, b, a); + + // Test multiplication + c = a * b; + ASSERT_ELV4(c, *, a, b); + + // Test division + c = a / b; + ASSERT_ELV4(c, /, a, b); + + // Test mod + c = a % b; + ASSERT_ELV4(c, %, a, b); + + // Test bitwise and + c = a & b; + ASSERT_ELV4(c, &, a, b); + + // Test bitwise or + c = a | b; + ASSERT_ELV4(c, |, a, b); + + // Test bitwise xor + c = a ^ b; + ASSERT_ELV4(c, ^, a, b); + + // Test left shift + c = b << a; + ASSERT_ELV4(c, <<, b, a); + + // Test logic right shift + c = b >> a; + ASSERT_ELV4(c, >>, b, a); + + // NOTE: Can't use `ASSERT_ELV4` due to semantics + // of GCC vector extensions. See + // https://gcc.gnu.org/onlinedocs/gcc/Vector-Extensions.html + + // Test == + c = a == b; + ASSERT_EL_TRUTH_V4(c, ==, a, b); + + // Test < + c = a < b; + ASSERT_EL_TRUTH_V4(c, <, a, b); + + // Test <= + c = a <= b; + ASSERT_EL_TRUTH_V4(c, <=, a, b); + + // Test > + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test >= + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test != + c = a != b; + ASSERT_EL_TRUTH_V4(c, !=, a, b); + + // Test ternary operator + c = 0 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 0, a, b); + c = 1 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 1, a, b); + } + + // Signed tests + { + v4si a = {-1, 2, -3, 4}; + v4si b = {-10, 20, -30, 40}; + + // Test addition + v4si c = a + b; + ASSERT_ELV4(c, +, a, b); + + // Test subtraction + c = b - a; + ASSERT_ELV4(c, -, b, a); + + // Test multiplication + c = a * b; + ASSERT_ELV4(c, *, a, b); + + // Test division + c = a / b; + ASSERT_ELV4(c, /, a, b); + + // Test mod + c = a % b; + ASSERT_ELV4(c, %, a, b); + + // Test bitwise and + c = a & b; + ASSERT_ELV4(c, &, a, b); + + // Test bitwise or + c = a | b; + ASSERT_ELV4(c, |, a, b); + + // Test bitwise xor + c = a ^ b; + ASSERT_ELV4(c, ^, a, b); + + // Test left shift + v4si shifts = {1, 2, 3, 4}; + c = b << shifts; + ASSERT_ELV4(c, <<, b, shifts); + + // Test arithmetic right shift + c = b >> shifts; + ASSERT_ELV4(c, >>, b, shifts); + + // Test == + c = a == b; + ASSERT_EL_TRUTH_V4(c, ==, a, b); + + // Test < + c = a < b; + ASSERT_EL_TRUTH_V4(c, <, a, b); + + // Test <= + c = a <= b; + ASSERT_EL_TRUTH_V4(c, <=, a, b); + + // Test > + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test >= + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test != + c = a != b; + ASSERT_EL_TRUTH_V4(c, !=, a, b); + + // Test ternary operator + c = 0 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 0, a, b); + c = 1 ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, 1, a, b); + } + return 0; +} diff --git a/test/VectorInstructions/integer_ops_signed_symbolic.c b/test/VectorInstructions/integer_ops_signed_symbolic.c new file mode 100644 index 00000000..80f4e420 --- /dev/null +++ b/test/VectorInstructions/integer_ops_signed_symbolic.c @@ -0,0 +1,130 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// optimized away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false --exit-on-error %t1.bc +#include "klee/klee.h" +#include +#include +#include + +typedef int32_t v4si __attribute__((vector_size(16))); + +#define ASSERT_EL(C, OP, A, B, INDEX) assert(C[INDEX] == (A[INDEX] OP B[INDEX])) +#define ASSERT_ELV4(C, OP, A, B) \ + do { \ + ASSERT_EL(C, OP, A, B, 0); \ + ASSERT_EL(C, OP, A, B, 1); \ + ASSERT_EL(C, OP, A, B, 2); \ + ASSERT_EL(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TRUTH(C, OP, A, B, INDEX) \ + assert(C[INDEX] ? (A[INDEX] OP B[INDEX]) : (!(A[INDEX] OP B[INDEX]))) +#define ASSERT_EL_TRUTH_V4(C, OP, A, B) \ + do { \ + ASSERT_EL_TRUTH(C, OP, A, B, 0); \ + ASSERT_EL_TRUTH(C, OP, A, B, 1); \ + ASSERT_EL_TRUTH(C, OP, A, B, 2); \ + ASSERT_EL_TRUTH(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, INDEX) \ + assert(C[INDEX] == (CONDITION ? A[INDEX] : B[INDEX])) +#define ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(C, CONDITION, A, B) \ + do { \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 0); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 1); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 2); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 3); \ + } while (0); + +int main() { + int32_t data[8]; + klee_make_symbolic(&data, sizeof(data), "unsigned_data"); + v4si a = {data[0], data[1], data[2], data[3]}; + v4si b = {data[4], data[5], data[6], data[7]}; + + // Test addition + v4si c = a + b; + ASSERT_ELV4(c, +, a, b); + + // Test subtraction + c = b - a; + ASSERT_ELV4(c, -, b, a); + + // Test multiplication + c = a * b; + ASSERT_ELV4(c, *, a, b); + + // Test division + if ((data[4] != 0) & (data[5] != 0) & (data[6] != 0) & (data[7] != 0)) { + c = a / b; + ASSERT_ELV4(c, /, a, b); + + // Test mod + c = a % b; + ASSERT_ELV4(c, %, a, b); + return 0; + } + + // Test bitwise and + c = a & b; + ASSERT_ELV4(c, &, a, b); + + // Test bitwise or + c = a | b; + ASSERT_ELV4(c, |, a, b); + + // Test bitwise xor + c = a ^ b; + ASSERT_ELV4(c, ^, a, b); + + // Test left shift + if ((data[0] < 32) & (data[1] < 32) & (data[2] < 32) & (data[3] < 32) & + (data[0] >= 0) & (data[1] >= 0) & (data[2] >= 0) & (data[3] >= 0)) { + c = b << a; + ASSERT_ELV4(c, <<, b, a); + + // Test logic right shift + c = b >> a; + ASSERT_ELV4(c, >>, b, a); + return 0; + } + + // NOTE: Can't use `ASSERT_ELV4` due to semantics + // of GCC vector extensions. See + // https://gcc.gnu.org/onlinedocs/gcc/Vector-Extensions.html + + // Test == + c = a == b; + ASSERT_EL_TRUTH_V4(c, ==, a, b); + + // Test < + c = a < b; + ASSERT_EL_TRUTH_V4(c, <, a, b); + + // Test <= + c = a <= b; + ASSERT_EL_TRUTH_V4(c, <=, a, b); + + // Test > + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test >= + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test != + c = a != b; + ASSERT_EL_TRUTH_V4(c, !=, a, b); + + // Test ternary operator + int condition = 0; + klee_make_symbolic(&condition, sizeof(condition), "unsigned_condition"); + c = condition ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, condition, a, b); + return 0; +} diff --git a/test/VectorInstructions/integer_ops_unsigned_symbolic.c b/test/VectorInstructions/integer_ops_unsigned_symbolic.c new file mode 100644 index 00000000..21ac90ce --- /dev/null +++ b/test/VectorInstructions/integer_ops_unsigned_symbolic.c @@ -0,0 +1,129 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// optimized away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false --exit-on-error %t1.bc +#include "klee/klee.h" +#include +#include +#include + +typedef uint32_t v4ui __attribute__((vector_size(16))); + +#define ASSERT_EL(C, OP, A, B, INDEX) assert(C[INDEX] == (A[INDEX] OP B[INDEX])) +#define ASSERT_ELV4(C, OP, A, B) \ + do { \ + ASSERT_EL(C, OP, A, B, 0); \ + ASSERT_EL(C, OP, A, B, 1); \ + ASSERT_EL(C, OP, A, B, 2); \ + ASSERT_EL(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TRUTH(C, OP, A, B, INDEX) \ + assert(C[INDEX] ? (A[INDEX] OP B[INDEX]) : (!(A[INDEX] OP B[INDEX]))) +#define ASSERT_EL_TRUTH_V4(C, OP, A, B) \ + do { \ + ASSERT_EL_TRUTH(C, OP, A, B, 0); \ + ASSERT_EL_TRUTH(C, OP, A, B, 1); \ + ASSERT_EL_TRUTH(C, OP, A, B, 2); \ + ASSERT_EL_TRUTH(C, OP, A, B, 3); \ + } while (0); + +#define ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, INDEX) \ + assert(C[INDEX] == (CONDITION ? A[INDEX] : B[INDEX])) +#define ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(C, CONDITION, A, B) \ + do { \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 0); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 1); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 2); \ + ASSERT_EL_TERNARY_SCALAR_CONDITION(C, CONDITION, A, B, 3); \ + } while (0); + +int main() { + uint32_t data[8]; + klee_make_symbolic(&data, sizeof(data), "unsigned_data"); + v4ui a = {data[0], data[1], data[2], data[3]}; + v4ui b = {data[4], data[5], data[6], data[7]}; + + // Test addition + v4ui c = a + b; + ASSERT_ELV4(c, +, a, b); + + // Test subtraction + c = b - a; + ASSERT_ELV4(c, -, b, a); + + // Test multiplication + c = a * b; + ASSERT_ELV4(c, *, a, b); + + // Test division + if ((data[4] != 0) & (data[5] != 0) & (data[6] != 0) & (data[7] != 0)) { + c = a / b; + ASSERT_ELV4(c, /, a, b); + + // Test mod + c = a % b; + ASSERT_ELV4(c, %, a, b); + return 0; + } + + // Test bitwise and + c = a & b; + ASSERT_ELV4(c, &, a, b); + + // Test bitwise or + c = a | b; + ASSERT_ELV4(c, |, a, b); + + // Test bitwise xor + c = a ^ b; + ASSERT_ELV4(c, ^, a, b); + + // Test left shift + if ((data[0]) < 32 & (data[1] < 32) & (data[2] < 32) & (data[3] < 32)) { + c = b << a; + ASSERT_ELV4(c, <<, b, a); + + // Test logic right shift + c = b >> a; + ASSERT_ELV4(c, >>, b, a); + return 0; + } + + // NOTE: Can't use `ASSERT_ELV4` due to semantics + // of GCC vector extensions. See + // https://gcc.gnu.org/onlinedocs/gcc/Vector-Extensions.html + + // Test == + c = a == b; + ASSERT_EL_TRUTH_V4(c, ==, a, b); + + // Test < + c = a < b; + ASSERT_EL_TRUTH_V4(c, <, a, b); + + // Test <= + c = a <= b; + ASSERT_EL_TRUTH_V4(c, <=, a, b); + + // Test > + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test >= + c = a > b; + ASSERT_EL_TRUTH_V4(c, >, a, b); + + // Test != + c = a != b; + ASSERT_EL_TRUTH_V4(c, !=, a, b); + + // Test ternary operator + int condition = 0; + klee_make_symbolic(&condition, sizeof(condition), "unsigned_condition"); + c = condition ? a : b; + ASSERT_EL_TERNARY_SCALAR_CONDITION_V4(c, condition, a, b); + return 0; +} diff --git a/test/VectorInstructions/shuffle_element.c b/test/VectorInstructions/shuffle_element.c new file mode 100644 index 00000000..cf991b03 --- /dev/null +++ b/test/VectorInstructions/shuffle_element.c @@ -0,0 +1,83 @@ +// REQUIRES: geq-llvm-3.4 +// RUN: %llvmgcc %s -emit-llvm -O0 -g -c -o %t1.bc +// RUN: rm -rf %t.klee-out +// NOTE: Have to pass `--optimize=false` to avoid vector operations being +// constant folded away. +// RUN: %klee --output-dir=%t.klee-out --optimize=false --exit-on-error %t1.bc +#include "klee/klee.h" +#include +#include +#include +#include + +#define ASSERT_EQ_V4(X, Y) \ + assert(X[0] == Y[0]); \ + assert(X[1] == Y[1]); \ + assert(X[2] == Y[2]); \ + assert(X[3] == Y[3]); + +#define ASSERT_EQ_V8(X, Y) \ + assert(X[0] == Y[0]); \ + assert(X[1] == Y[1]); \ + assert(X[2] == Y[2]); \ + assert(X[3] == Y[3]); \ + assert(X[4] == Y[4]); \ + assert(X[5] == Y[5]); \ + assert(X[6] == Y[6]); \ + assert(X[7] == Y[7]); \ + +typedef uint32_t v4ui __attribute__ ((vector_size (16))); +typedef uint32_t v8ui __attribute__ ((vector_size (32))); +int main() { + v4ui f = { 0, 19, 129, 255 }; + klee_make_symbolic(&f, sizeof(v4ui), "f"); + klee_print_expr("f:=", f); + + // Test Single vector shuffle. + + // Extract same element uniformly + for (int index=0; index < 4; ++index) { + v4ui mask = { index, index, index, index }; + v4ui result = __builtin_shufflevector(f, mask); + v4ui expectedResult = { f[index], f[index], f[index], f[index]}; + ASSERT_EQ_V4(result, expectedResult); + } + + // Reverse vector + v4ui mask = { 3, 2, 1, 0 }; + v4ui result = __builtin_shufflevector(f, mask); + v4ui expectedResult = {f[3], f[2], f[1], f[0]}; + ASSERT_EQ_V4(result, expectedResult); + + // Extract a single element (Clang requires the mask to be constants so we can't use a for a loop). +#define TEST_SINGLE_EXTRACT(I)\ + do \ + { \ + uint32_t v __attribute__((vector_size(sizeof(uint32_t)))) = __builtin_shufflevector(f, f, I); \ + uint32_t expected = f[ I % 4 ]; \ + assert(v[0] == expected); \ + } while(0); + TEST_SINGLE_EXTRACT(0); + TEST_SINGLE_EXTRACT(1); + TEST_SINGLE_EXTRACT(2); + TEST_SINGLE_EXTRACT(3); + TEST_SINGLE_EXTRACT(4); + TEST_SINGLE_EXTRACT(5); + TEST_SINGLE_EXTRACT(6); + TEST_SINGLE_EXTRACT(7); + + // Test two vector shuffle + v4ui a = { 0, 1, 2, 3 }; + v4ui b = { 4, 5, 6, 7 }; + + { + // [a] + [b] + v8ui result = __builtin_shufflevector(a, b, 0, 1, 2, 3, 4, 5, 6, 7); + v8ui expected = { a[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3] }; + ASSERT_EQ_V8(result, expected); + } + + + + return 0; +} -- cgit 1.4.1