about summary refs log tree commit diff homepage
path: root/lib/Core
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Core')
-rw-r--r--lib/Core/Executor.cpp66
-rw-r--r--lib/Core/MemoryManager.cpp22
-rw-r--r--lib/Core/MemoryManager.h6
3 files changed, 59 insertions, 35 deletions
diff --git a/lib/Core/Executor.cpp b/lib/Core/Executor.cpp
index 709eb3a5..e349ff79 100644
--- a/lib/Core/Executor.cpp
+++ b/lib/Core/Executor.cpp
@@ -1293,13 +1293,13 @@ void Executor::executeCall(ExecutionState &state,
     KFunction *kf = kmodule->functionMap[f];
     state.pushFrame(state.prevPC, kf);
     state.pc = kf->instructions;
-        
+
     if (statsTracker)
       statsTracker->framePushed(state, &state.stack[state.stack.size()-2]);
- 
+
      // TODO: support "byval" parameter attribute
      // TODO: support zeroext, signext, sret attributes
-        
+
     unsigned callingArgs = arguments.size();
     unsigned funcArgs = f->arg_size();
     if (!f->isVarArg()) {
@@ -1319,56 +1319,64 @@ void Executor::executeCall(ExecutionState &state,
                               "user.err");
         return;
       }
-            
+
       StackFrame &sf = state.stack.back();
       unsigned size = 0;
+      bool requires16ByteAlignment = false;
       for (unsigned i = funcArgs; i < callingArgs; i++) {
         // FIXME: This is really specific to the architecture, not the pointer
-        // size. This happens to work fir x86-32 and x86-64, however.
+        // size. This happens to work for x86-32 and x86-64, however.
         if (WordSize == Expr::Int32) {
           size += Expr::getMinBytesForWidth(arguments[i]->getWidth());
         } else {
           Expr::Width argWidth = arguments[i]->getWidth();
-          // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
-          // byte boundary if alignment needed by type exceeds 8 byte boundary.
+          // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a
+          // 16 byte boundary if alignment needed by type exceeds 8 byte
+          // boundary.
           //
           // Alignment requirements for scalar types is the same as their size
           if (argWidth > Expr::Int64) {
              size = llvm::RoundUpToAlignment(size, 16);
+             requires16ByteAlignment = true;
           }
           size += llvm::RoundUpToAlignment(argWidth, WordSize) / 8;
         }
       }
 
-      MemoryObject *mo = sf.varargs = memory->allocate(size, true, false, 
-                                                       state.prevPC->inst);
-      if (!mo) {
+      MemoryObject *mo = sf.varargs =
+          memory->allocate(size, true, false, state.prevPC->inst,
+                           (requires16ByteAlignment ? 16 : 8));
+      if (!mo && size) {
         terminateStateOnExecError(state, "out of memory (varargs)");
         return;
       }
 
-      if ((WordSize == Expr::Int64) && (mo->address & 15)) {
-        // Both 64bit Linux/Glibc and 64bit MacOSX should align to 16 bytes.
-        klee_warning_once(0, "While allocating varargs: malloc did not align to 16 bytes.");
-      }
+      if (mo) {
+        if ((WordSize == Expr::Int64) && (mo->address & 15) &&
+            requires16ByteAlignment) {
+          // Both 64bit Linux/Glibc and 64bit MacOSX should align to 16 bytes.
+          klee_warning_once(
+              0, "While allocating varargs: malloc did not align to 16 bytes.");
+        }
 
-      ObjectState *os = bindObjectInState(state, mo, true);
-      unsigned offset = 0;
-      for (unsigned i = funcArgs; i < callingArgs; i++) {
-        // FIXME: This is really specific to the architecture, not the pointer
-        // size. This happens to work fir x86-32 and x86-64, however.
-        if (WordSize == Expr::Int32) {
-          os->write(offset, arguments[i]);
-          offset += Expr::getMinBytesForWidth(arguments[i]->getWidth());
-        } else {
-          assert(WordSize == Expr::Int64 && "Unknown word size!");
+        ObjectState *os = bindObjectInState(state, mo, true);
+        unsigned offset = 0;
+        for (unsigned i = funcArgs; i < callingArgs; i++) {
+          // FIXME: This is really specific to the architecture, not the pointer
+          // size. This happens to work for x86-32 and x86-64, however.
+          if (WordSize == Expr::Int32) {
+            os->write(offset, arguments[i]);
+            offset += Expr::getMinBytesForWidth(arguments[i]->getWidth());
+          } else {
+            assert(WordSize == Expr::Int64 && "Unknown word size!");
 
-          Expr::Width argWidth = arguments[i]->getWidth();
-          if (argWidth > Expr::Int64) {
-             offset = llvm::RoundUpToAlignment(offset, 16);
+            Expr::Width argWidth = arguments[i]->getWidth();
+            if (argWidth > Expr::Int64) {
+              offset = llvm::RoundUpToAlignment(offset, 16);
+            }
+            os->write(offset, arguments[i]);
+            offset += llvm::RoundUpToAlignment(argWidth, WordSize) / 8;
           }
-          os->write(offset, arguments[i]);
-          offset += llvm::RoundUpToAlignment(argWidth, WordSize) / 8;
         }
       }
     }
diff --git a/lib/Core/MemoryManager.cpp b/lib/Core/MemoryManager.cpp
index 7c76d480..02bbe678 100644
--- a/lib/Core/MemoryManager.cpp
+++ b/lib/Core/MemoryManager.cpp
@@ -32,13 +32,25 @@ MemoryManager::~MemoryManager() {
   }
 }
 
-MemoryObject *MemoryManager::allocate(uint64_t size, bool isLocal, 
+MemoryObject *MemoryManager::allocate(uint64_t size, bool isLocal,
                                       bool isGlobal,
-                                      const llvm::Value *allocSite) {
+                                      const llvm::Value *allocSite,
+                                      size_t alignment) {
   if (size>10*1024*1024)
-    klee_warning_once(0, "Large alloc: %u bytes.  KLEE may run out of memory.", (unsigned) size);
-  
-  uint64_t address = (uint64_t) (unsigned long) malloc((unsigned) size);
+    klee_warning_once(0, "Large alloc: %lu bytes.  KLEE may run out of memory.",
+                      size);
+
+  uint64_t address = 0;
+    // Use malloc for the standard case
+    if (alignment <= 8)
+      address = (uint64_t)malloc(size);
+    else {
+      int res = posix_memalign((void **)&address, alignment, size);
+      if (res < 0) {
+        klee_warning("Allocating aligned memory failed.");
+        address = 0;
+      }
+    }
   if (!address)
     return 0;
   
diff --git a/lib/Core/MemoryManager.h b/lib/Core/MemoryManager.h
index 01683443..d80e44af 100644
--- a/lib/Core/MemoryManager.h
+++ b/lib/Core/MemoryManager.h
@@ -31,8 +31,12 @@ namespace klee {
     MemoryManager(ArrayCache *arrayCache) : arrayCache(arrayCache) {}
     ~MemoryManager();
 
+    /**
+     * Returns memory object which contains a handle to real virtual process
+     * memory.
+     */
     MemoryObject *allocate(uint64_t size, bool isLocal, bool isGlobal,
-                           const llvm::Value *allocSite);
+                           const llvm::Value *allocSite, size_t alignment = 8);
     MemoryObject *allocateFixed(uint64_t address, uint64_t size,
                                 const llvm::Value *allocSite);
     void deallocate(const MemoryObject *mo);