diff --git a/.build_number b/.build_number index e9bae3d88..d1d4b2156 100644 --- a/.build_number +++ b/.build_number @@ -1 +1 @@ -1463 +1464 diff --git a/src/libtriton/ast/astContext.cpp b/src/libtriton/ast/astContext.cpp index d65b0ea58..88ad4880b 100644 --- a/src/libtriton/ast/astContext.cpp +++ b/src/libtriton/ast/astContext.cpp @@ -45,10 +45,13 @@ namespace triton { SharedAbstractNode AstContext::collect(const SharedAbstractNode& node) { /* - * We keep a shared reference of nodes in a deep AST. Instead of keeping - * each node (which does not scales), we only keep one reference at each - * deep step of 10000. Thus, it will avoid the stack recursion on destructor - * calls of shared_ptr. + * We keep references to nodes which belong to a depth in the AST which is + * a multiple of 10000. Thus, when the root node is destroyed, the stack recursivity + * stops when the depth level of 10000 is reached, because the nodes there still + * have a reference to them in the AST manager. The destruction will continue at the + * next allocation of nodes and so on. So, it means that ASTs are destroyed by steps + * of depth of 10000 which avoids the overflow while keeping a good scale. + * * See: #753. */ triton::uint32 lvl = node->getLevel(); diff --git a/src/libtriton/engines/symbolic/symbolicEngine.cpp b/src/libtriton/engines/symbolic/symbolicEngine.cpp index 59796c878..bb864b91c 100644 --- a/src/libtriton/engines/symbolic/symbolicEngine.cpp +++ b/src/libtriton/engines/symbolic/symbolicEngine.cpp @@ -148,22 +148,15 @@ namespace triton { /* Gets an aligned entry. */ - inline SharedSymbolicExpression SymbolicEngine::getAlignedMemory(triton::uint64 address, triton::uint32 size) { - return this->alignedMemoryReference[std::make_pair(address, size)].lock(); + const SharedSymbolicExpression& SymbolicEngine::getAlignedMemory(triton::uint64 address, triton::uint32 size) { + return this->alignedMemoryReference[std::make_pair(address, size)]; } /* Checks if the aligned memory is recored. */ bool SymbolicEngine::isAlignedMemory(triton::uint64 address, triton::uint32 size) { if (this->alignedMemoryReference.find(std::make_pair(address, size)) != this->alignedMemoryReference.end()) { - /* Also check if the symbolic expression is alive */ - if (this->alignedMemoryReference[std::make_pair(address, size)].lock()) { - return true; - } - /* Also check if the symbolic expression is alive */ - else { - this->removeAlignedMemory(address, size); - } + return true; } return false; } @@ -783,8 +776,7 @@ namespace triton { * If the memory access is aligned, don't split the memory. */ if (this->modes->isModeEnabled(triton::modes::ALIGNED_MEMORY) && this->isAlignedMemory(address, size)) { - triton::ast::SharedAbstractNode anode = this->getAlignedMemory(address, size)->getAst(); - return anode; + return this->getAlignedMemory(address, size)->getAst(); } /* If the memory access is 1 byte long, just return the appropriate 8-bit vector */ diff --git a/src/libtriton/includes/triton/symbolicEngine.hpp b/src/libtriton/includes/triton/symbolicEngine.hpp index 039318ed4..f29daf9fc 100644 --- a/src/libtriton/includes/triton/symbolicEngine.hpp +++ b/src/libtriton/includes/triton/symbolicEngine.hpp @@ -95,7 +95,7 @@ namespace triton { * **item1**:
* **item2**: shared symbolic expression */ - std::map, WeakSymbolicExpression> alignedMemoryReference; + std::map, SharedSymbolicExpression> alignedMemoryReference; /*! \brief map of address -> symbolic expression * @@ -128,7 +128,7 @@ namespace triton { triton::usize getUniqueSymVarId(void); //! Gets an aligned entry. - inline SharedSymbolicExpression getAlignedMemory(triton::uint64 address, triton::uint32 size); + const SharedSymbolicExpression& getAlignedMemory(triton::uint64 address, triton::uint32 size); //! Adds an aligned entry. void addAlignedMemory(triton::uint64 address, triton::uint32 size, const SharedSymbolicExpression& expr); diff --git a/src/testers/unittests/test_symbolic_optimizations.py b/src/testers/unittests/test_symbolic_optimizations.py new file mode 100644 index 000000000..8699da092 --- /dev/null +++ b/src/testers/unittests/test_symbolic_optimizations.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# coding: utf-8 +"""Test Symbolic Optimizations.""" + +import unittest +from triton import * + + +class TestSymbolicOptimizations(unittest.TestCase): + + """Testing ALIGNED_MEMORY.""" + + def setUp(self): + self.ctx = TritonContext(ARCH.X86_64) + + + def test_without_optim(self): + self.ctx.setMode(MODE.ALIGNED_MEMORY, False) + + self.ctx.processing(Instruction(b"\x48\xc7\xc0\x01\x00\x00\x00")) # mov rax, 1 + self.ctx.processing(Instruction(b"\x48\x89\x03")) # mov [rbx], rax + self.ctx.processing(Instruction(b"\x48\x8b\x0b")) # mov rcx, [rbx] + + rcx = self.ctx.getMemoryAst(MemoryAccess(0, CPUSIZE.QWORD)) + self.assertEqual(rcx.getType(), AST_NODE.CONCAT) + self.assertEqual(rcx.evaluate(), 1) + return + + + def test_with_optim(self): + self.ctx.setMode(MODE.ALIGNED_MEMORY, True) + + self.ctx.processing(Instruction(b"\x48\xc7\xc0\x01\x00\x00\x00")) # mov rax, 1 + self.ctx.processing(Instruction(b"\x48\x89\x03")) # mov [rbx], rax + self.ctx.processing(Instruction(b"\x48\x8b\x0b")) # mov rcx, [rbx] + + rcx = self.ctx.getMemoryAst(MemoryAccess(0, CPUSIZE.QWORD)) + self.assertEqual(rcx.getType(), AST_NODE.REFERENCE) + self.assertEqual(rcx.evaluate(), 1) + return