diff --git a/arch/Pate/PPC.hs b/arch/Pate/PPC.hs index f4b61935..19eebb98 100644 --- a/arch/Pate/PPC.hs +++ b/arch/Pate/PPC.hs @@ -29,7 +29,7 @@ where import Control.Lens ( (^.), (^?) ) import qualified Control.Lens as L -import Control.Applicative ( (<|>) ) +import Control.Applicative ( (<|>), Const (..) ) import qualified Control.Monad.Catch as CMC import qualified Data.BitVector.Sized as BVS import qualified Data.ByteString.Char8 as BSC @@ -85,6 +85,11 @@ import Data.Macaw.CFG.Core import qualified What4.JSON as W4S import qualified Data.Macaw.CFG as MC import qualified Pate.SimState as PS +import qualified Data.Parameterized.Map as MapF +import qualified Data.Macaw.AbsDomain.AbsState as MA +import qualified Data.Set as Set +import qualified Pate.Memory.MemTrace as PMT +import Data.Data (Typeable) -- | There is just one dedicated register on ppc64 data PPC64DedicatedRegister tp where @@ -175,6 +180,7 @@ instance PA.ValidArch PPC.PPC32 where PPC.PPC_FPSCR -> True PPC.PPC_VSCR -> True PPC.PPC_XER -> True + PPC.PPC_LNK -> True _ -> False displayRegister = display argumentNameFrom = argumentNameFromGeneric @@ -265,22 +271,64 @@ handleExternalCall = PVE.ExternalDomain $ \sym -> do argumentMapping :: (1 <= SP.AddrWidth v) => PVO.ArgumentMapping (PPC.AnyPPC v) argumentMapping = undefined + +specializedBufferWrite :: forall arch v. (arch ~ PPC.AnyPPC v, 16 <= SP.AddrWidth v, MS.SymArchConstraints arch) => PA.StubOverride arch +specializedBufferWrite = + PA.mkEventOverride "CFE_SB_TransmitBuffer" mkEvent 0x30 (gpr 3) (gpr 3) + where + mkEvent :: CB.IsSymInterface sym => sym -> PMT.MemChunk sym (MC.ArchAddrWidth arch) -> IO (WI.SymBV sym (MC.ArchAddrWidth arch)) + mkEvent sym chunk = do + let ptrW = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) + let concreteExpect :: Integer = 0x300 + offset <- WI.bvLit sym ptrW (BVS.mkBV ptrW 4) + let bytes = WI.knownNat @2 + PMT.BytesBV _ bv <- PMT.readFromChunk sym MC.BigEndian chunk offset bytes + let bits = WI.bvWidth bv + expect_value <- WI.bvLit sym bits (BVS.mkBV bits concreteExpect) + is_expect <- WI.isEq sym bv expect_value + zero <- WI.bvLit sym ptrW (BVS.zero ptrW) + expect_value' <- WI.bvLit sym ptrW (BVS.mkBV ptrW concreteExpect) + -- if we the bytes are as expected then return them + -- otherise, return zero + WI.baseTypeIte sym is_expect expect_value' zero + +-- FIXME: flags to make it equal? +specializeSocketRead :: forall arch v. (Typeable arch, arch ~ PPC.AnyPPC v, 16 <= SP.AddrWidth v, MS.SymArchConstraints arch) => PA.StubOverride arch +specializeSocketRead = + PA.mkReadOverride "OS_SocketRecvFrom" (PPa.PatchPairSingle PB.PatchedRepr (Const f)) addrs lengths (gpr 3) (gpr 4) (gpr 5) (gpr 3) + where + addrs :: PPa.PatchPairC (Maybe (PA.MemLocation (MC.ArchAddrWidth arch))) + addrs = PPa.PatchPairC (Just (PA.MemIndPointer (PA.MemPointer 0x00013044) 0x7c)) (Just (PA.MemIndPointer (PA.MemPointer 0x00013044) 0x7c)) + + lengths :: PPa.PatchPairC (Maybe (MC.MemWord (MC.ArchAddrWidth arch))) + lengths = PPa.PatchPairC (Just 0x30) (Just 0x30) + + f :: PA.MemChunkModify (MC.ArchAddrWidth arch) + f = PA.modifyConcreteChunk MC.BigEndian WI.knownNat (0x300 :: Integer) 2 4 + -- FIXME: clagged directly from ARM, registers may not be correct -stubOverrides :: (MS.SymArchConstraints (PPC.AnyPPC v), 1 <= SP.AddrWidth v, 16 <= SP.AddrWidth v) => PA.ArchStubOverrides (PPC.AnyPPC v) -stubOverrides = PA.ArchStubOverrides (PA.mkDefaultStubOverride "__pate_stub" r0 ) $ \fs -> +stubOverrides :: (Typeable (PPC.AnyPPC v), MS.SymArchConstraints (PPC.AnyPPC v), 1 <= SP.AddrWidth v, 16 <= SP.AddrWidth v) => PA.ArchStubOverrides (PPC.AnyPPC v) +stubOverrides = PA.ArchStubOverrides (PA.mkDefaultStubOverride "__pate_stub" r3 ) $ \fs -> lookup (PBl.fnSymBase fs) override_list where override_list = - [ ("malloc", PA.mkMallocOverride r0 r0) + [ ("malloc", PA.mkMallocOverride r3 r3) -- FIXME: arguments are interpreted differently for calloc - , ("calloc", PA.mkMallocOverride r0 r0) + , ("calloc", PA.mkMallocOverride r3 r3) -- FIXME: arguments are interpreted differently for reallolc - , ("realloc", PA.mkMallocOverride r0 r0) - , ("clock", PA.mkClockOverride r0) - , ("write", PA.mkWriteOverride "write" r0 r1 r2 r0) + , ("realloc", PA.mkMallocOverride r3 r3) + , ("clock", PA.mkClockOverride r3) + , ("write", PA.mkWriteOverride "write" r3 r4 r5 r3) -- FIXME: fixup arguments for fwrite - , ("fwrite", PA.mkWriteOverride "fwrite" r0 r1 r2 r0) - , ("printf", PA.mkObservableOverride "printf" r0 r1) + , ("fwrite", PA.mkWriteOverride "fwrite" r3 r4 r5 r3) + , ("printf", PA.mkObservableOverride "printf" r3 r4) + + -- FIXME: added for relaySAT challenge problem + , ("CFE_SB_AllocateMessageBuffer", PA.mkMallocOverride' (Just (PA.MemIndPointer (PA.MemPointer 0x00013044) 0x7c)) r3 r3) + , ("OS_SocketRecvFrom", specializeSocketRead) + , ("CFE_SB_TransmitBuffer", specializedBufferWrite) + -- END relaySAT + -- FIXME: default stubs below here ] ++ (map mkDefault $ @@ -325,14 +373,22 @@ stubOverrides = PA.ArchStubOverrides (PA.mkDefaultStubOverride "__pate_stub" r0 , "console_println" -- FIXME: observable? , "console_error" -- FIXME: observable? , "console_print" -- FIXME: observable? + , "CFE_EVS_SendEvent" + , "CFE_ES_PerfLogAdd" ]) mkNOPStub nm = (nm, PA.mkNOPStub nm) - mkDefault nm = (nm, PA.mkDefaultStubOverride nm r0) + mkDefault nm = (nm, PA.mkDefaultStubOverride nm r3) + + -- r0 = gpr 0 + -- r1 = gpr 1 + -- r2 = gpr 2 + r3 = gpr 3 + r4 = gpr 4 + r5 = gpr 5 + -- r6 = gpr 6 + -- r7 = gpr 7 - r0 = gpr 0 - r1 = gpr 1 - r2 = gpr 2 instance MCS.HasArchTermEndCase (PPC.PPCTermStmt v) where archTermCase = \case diff --git a/src/Pate/Arch.hs b/src/Pate/Arch.hs index bbd3a371..39ec95d5 100644 --- a/src/Pate/Arch.hs +++ b/src/Pate/Arch.hs @@ -15,8 +15,11 @@ {-# LANGUAGE QuantifiedConstraints #-} {-# LANGUAGE ImpredicativeTypes #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -fno-warn-orphans #-} +{-# LANGUAGE LambdaCase #-} module Pate.Arch ( SomeValidArch(..), @@ -43,7 +46,16 @@ module Pate.Arch ( withStubOverride, mergeLoaders, idStubOverride, - serializeRegister + serializeRegister, + mkReadOverride, + noMemChunkModify, + modifyConcreteChunk, + MemChunkModify, + mkEventOverride, + mkMallocOverride', + mkArgPassthroughOverride, + MemLocation(..), + freshAtLocation ) where import Control.Lens ( (&), (.~), (^.) ) @@ -102,6 +114,18 @@ import qualified Data.Parameterized.TraversableFC as TFC import qualified Data.Aeson as JSON import Data.Aeson ( (.=) ) import Data.Parameterized.Classes (ShowF(..)) +import qualified Pate.PatchPair as PPa +import Data.Parameterized.WithRepr (withRepr) +import Data.Parameterized.Classes +import qualified Control.Monad.IO.Class as IO +import qualified System.IO as IO +import Data.Functor.Const +import qualified What4.ExprHelpers as WEH +import Numeric.Natural +import qualified What4.Expr.ArrayUpdateMap as AUM +import qualified Data.Parameterized.TraversableF as TF +import Data.Maybe +import Pate.Memory -- | The type of architecture-specific dedicated registers -- @@ -254,16 +278,21 @@ data StubOverride arch = (forall sym. LCB.IsSymInterface sym => sym -> + MI.ArchitectureInfo arch -> PVC.WrappedSolver sym IO -> IO (StateTransformer sym arch)) +-- | In general the stub override may use both the original and patched states as its input, producing a pair +-- of post-states representing the stub semantics. However the 'StateTransformer' pattern captures the +-- typical case where a stub operates on each state independently. data StateTransformer sym arch = - StateTransformer (forall v bin. PB.KnownBinary bin => PS.SimState sym arch v bin -> IO (PS.SimState sym arch v bin)) + StateTransformer2 (forall v. PPa.PatchPair (PS.SimState sym arch v) -> IO (PPa.PatchPair (PS.SimState sym arch v))) + | StateTransformer (forall bin v. PB.KnownBinary bin => PS.SimState sym arch v bin -> IO (PS.SimState sym arch v bin)) mkStubOverride :: forall arch. - (forall sym bin v.W4.IsSymExprBuilder sym => PB.KnownBinary bin => sym -> PS.SimState sym arch v bin -> IO (PS.SimState sym arch v bin)) -> + (forall sym bin v. LCB.IsSymInterface sym => PB.KnownBinary bin => sym -> PS.SimState sym arch v bin -> IO (PS.SimState sym arch v bin)) -> StubOverride arch -mkStubOverride f = StubOverride $ \sym _ -> return $ StateTransformer (\st -> f sym st) +mkStubOverride f = StubOverride $ \sym _ _ -> return $ StateTransformer $ \st -> f sym st idStubOverride :: StubOverride arch idStubOverride = mkStubOverride $ \_ -> return @@ -271,14 +300,19 @@ idStubOverride = mkStubOverride $ \_ -> return withStubOverride :: LCB.IsSymInterface sym => sym -> + MI.ArchitectureInfo arch -> PVC.WrappedSolver sym IO -> StubOverride arch -> - ((forall bin. PB.KnownBinary bin => PS.SimState sym arch v bin -> IO (PS.SimState sym arch v bin)) -> IO a) -> + ((PPa.PatchPair (PS.SimState sym arch v) -> IO (PPa.PatchPair (PS.SimState sym arch v))) -> IO a) -> IO a -withStubOverride sym wsolver (StubOverride ov) f = do - StateTransformer ov' <- ov sym wsolver - f ov' - +withStubOverride sym archInfo wsolver (StubOverride ov) f = do + ov sym archInfo wsolver >>= \case + StateTransformer2 ov' -> f ov' + StateTransformer ov' -> + let ov'' stPair = case stPair of + PPa.PatchPair stO stP -> PPa.PatchPair <$> ov' stO <*> ov' stP + PPa.PatchPairSingle bin st -> withRepr bin $ PPa.PatchPairSingle bin <$> ov' st + in f ov'' data ArchStubOverrides arch = ArchStubOverrides (StubOverride arch) (PB.FunctionSymbol -> Maybe (StubOverride arch)) @@ -310,15 +344,73 @@ mkMallocOverride :: MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ register for fresh pointer -} -> StubOverride arch mkMallocOverride _rLen rOut = mkStubOverride $ \sym st -> do + (fresh_ptr, st') <- freshAlloc sym Nothing st + return (st' { PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ fresh_ptr) }) + +mkMallocOverride' :: + forall arch. + 16 <= MC.ArchAddrWidth arch => + MS.SymArchConstraints arch => + Typeable arch => + Maybe (MemLocation (MC.ArchAddrWidth arch)) -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ write fresh pointer here -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ register for fresh pointer -} -> + StubOverride arch +mkMallocOverride' bufOverride rPtrLoc rOut = StubOverride $ \sym archInfo _wsolver -> do + alloc_success <- W4.freshConstant sym (W4.safeSymbol "alloc_success") W4.BaseBoolRepr + return $ StateTransformer $ \(st :: PS.SimState sym arch v bin) -> do + let w_mem = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) + let ptr = PSR.macawRegValue $ (PS.simRegs st) ^. MC.boundValue rPtrLoc + + (fresh_ptr, st') <- case bufOverride of + Just ov -> do + (fresh_ptr, st') <- freshAtLocation sym st (MI.archEndianness archInfo) (Just 4) ov + return (PSR.ptrToEntry fresh_ptr, st' ) + Nothing -> freshAlloc sym Nothing st + + -- IO.liftIO $ IO.putStrLn (show $ (PS.simRegs st) ^. MC.boundValue rPtrLoc) + --ptrOffset <- W4.bvLit sym w_mem (BVS.mkBV w_mem (toInteger bufferOv)) + --region <- W4.natLit sym 0 + --let ptr = (CLM.LLVMPointer region ptrOffset) + --let memRepr = PMT.memWidthMemRepr (MI.archEndianness archInfo) w_mem + --mem' <- PMT.writeMemState @_ @arch sym (W4.truePred sym) (PMT.memState $ PS.simMem st) ptr memRepr (PSR.macawRegValue fresh_ptr) + --return (st' { PS.simMem = (PS.simMem st){PMT.memState = mem'}, PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ fresh_ptr) }) + + nullPtr <- IO.liftIO $ CLM.mkNullPointer sym w_mem + + ptr' <- IO.liftIO $ PMT.muxPtr sym alloc_success (PSR.macawRegValue fresh_ptr) nullPtr + return (st' { PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ (PSR.ptrToEntry ptr')) }) + + -- | Return a freshly-allocated pointer with an optional region override. +-- New max region is set to the given region + 1 if it is greater than the current max +-- region (i.e. the new max region is guaranted to not clash with the returned pointer) +freshAlloc :: + forall sym arch v bin. + LCB.IsSymInterface sym => + MS.SymArchConstraints arch => + sym -> + Maybe Integer -> + PS.SimState sym arch v bin -> + IO (PSR.MacawRegEntry sym (MT.BVType (MC.ArchAddrWidth arch)), PS.SimState sym arch v bin) +freshAlloc sym mregion st = do let mr = PS.simMaxRegion st let w = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) - mr_nat <- W4.integerToNat sym (PS.unSE mr) + this_region <- case mregion of + Just region -> W4.intLit sym region + Nothing -> return $ PS.unSE mr zero <- W4.bvLit sym w (BVS.mkBV w 0) - let fresh_ptr = PSR.ptrToEntry (CLM.LLVMPointer mr_nat zero) - mr_inc <- PS.forScopedExpr sym mr $ \sym' mr' -> do + next_region_nat <- W4.integerToNat sym this_region + let fresh_ptr = PSR.ptrToEntry (CLM.LLVMPointer next_region_nat zero) + next_region <- PS.forScopedExpr sym mr $ \sym' mr' -> do + cur_max <- case mregion of + Just region -> do + region_sym <- W4.intLit sym' region + W4.intMax sym' mr' region_sym + Nothing -> return mr' one <- W4.intLit sym' 1 - W4.intAdd sym' mr' one - return (st { PS.simMaxRegion = mr_inc, PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ fresh_ptr) }) + W4.intAdd sym' cur_max one + return (fresh_ptr, st { PS.simMaxRegion = next_region }) + -- | Defines an override for @clock@ that returns a value representing the current time. -- Takes a single register used to store the return value. @@ -331,7 +423,7 @@ mkClockOverride :: MS.SymArchConstraints arch => MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> StubOverride arch -mkClockOverride rOut = StubOverride $ \sym _ -> do +mkClockOverride rOut = StubOverride $ \sym _ _ -> do let w = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) fresh_bv <- W4.freshConstant sym (W4.safeSymbol "current_time") (W4.BaseBVRepr w) return $ StateTransformer $ \st -> do @@ -339,6 +431,25 @@ mkClockOverride rOut = StubOverride $ \sym _ -> do let ptr = PSR.ptrToEntry (CLM.LLVMPointer zero_nat fresh_bv) return (st { PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ ptr) }) +applyConcreteOverride :: + LCB.IsSymInterface sym => + 1 <= ptrW => + MC.MemWidth ptrW => + sym -> + PB.WhichBinaryRepr bin -> + PPa.PatchPairC (Maybe (MC.MemWord ptrW)) -> + PSR.MacawRegEntry sym (MT.BVType ptrW) -> + IO (PSR.MacawRegEntry sym (MT.BVType ptrW)) +applyConcreteOverride sym bin overridePair e = case PPa.getC bin overridePair of + Just (Just override) -> do + let ptrW = PSR.macawRegBVWidth e + ptrOffset <- W4.bvLit sym ptrW (BVS.mkBV ptrW (toInteger override)) + region <- W4.natLit sym 0 + let ptr = (CLM.LLVMPointer region ptrOffset) + return $ e { PSR.macawRegValue = ptr } + _ -> return e + + mkObservableOverride :: forall arch. 16 <= MC.ArchAddrWidth arch => @@ -347,10 +458,9 @@ mkObservableOverride :: MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ r0 -} -> MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ r1 -} -> StubOverride arch -mkObservableOverride nm r0_reg r1_reg = StubOverride $ \sym _wsolver -> do +mkObservableOverride nm r0_reg r1_reg = StubOverride $ \sym _archInfo _wsolver -> do let w_mem = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) let bv_repr = W4.BaseBVRepr w_mem - -- FIXME: this is wrong, since this value needs to read from memory bv_fn <- W4U.mkUninterpretedSymFn sym ("written_" ++ show nm) (Ctx.empty Ctx.:> bv_repr) (W4.BaseBVRepr w_mem) return $ StateTransformer $ \st -> do @@ -363,6 +473,267 @@ mkObservableOverride nm r0_reg r1_reg = StubOverride $ \sym _wsolver -> do let ptr = PSR.ptrToEntry (CLM.LLVMPointer zero_nat fresh_bv) return (st' { PS.simRegs = ((PS.simRegs st') & (MC.boundValue r0_reg) .~ ptr ) }) +mkEventOverride :: + forall arch ptrW. + 16 <= MC.ArchAddrWidth arch => + MS.SymArchConstraints arch => + ptrW ~ (MC.ArchAddrWidth arch) => + T.Text {- ^ name of call -} -> + (forall sym. LCB.IsSymInterface sym => sym -> PMT.MemChunk sym ptrW -> IO (W4.SymBV sym ptrW)) {- ^ compute return value for read chunk -} -> + (MC.MemWord (MC.ArchAddrWidth arch)) {- ^ concrete length to read -} -> + MC.ArchReg arch (MT.BVType ptrW) {- ^ buf -} -> + MC.ArchReg arch (MT.BVType ptrW) {- ^ return register -} -> + StubOverride arch +mkEventOverride nm readChunk len buf_reg rOut = StubOverride $ \(sym :: sym) _archInfo _wsolver -> return $ StateTransformer $ \(st :: PS.SimState sym arch v bin) -> do + -- let (bin :: PB.WhichBinaryRepr bin) = knownRepr + let ptrW = MC.memWidthNatRepr @ptrW + --(CLM.LLVMPointer _ len_bv) <- PSR.macawRegValue <$> (applyConcreteOverride sym bin lenOverridePair $ (PS.simRegs st) ^. MC.boundValue len_reg) + + --(fresh_buf, st') <- IO.liftIO $ freshAlloc sym (Just 4) st + let fresh_buf = (PS.simRegs st) ^. MC.boundValue buf_reg + let st' = st + + let buf_ptr = PSR.macawRegValue fresh_buf + let mem = PS.simMem st + len_sym <- W4.bvLit sym ptrW (BVS.mkBV ptrW (fromIntegral len)) + bytes_chunk <- PMT.readChunk sym (PMT.memState mem) buf_ptr len_sym + written <- readChunk sym bytes_chunk + mem' <- PMT.addExternalCallEvent sym nm (Ctx.empty Ctx.:> written) mem + result <- PSR.bvToEntry sym written + return (st' { PS.simMem = mem', PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ result ) }) + + +newtype MemChunkModify ptrW = + MemChunkModify { mkMemChunk :: (forall sym. LCB.IsSymInterface sym => sym -> PMT.MemChunk sym ptrW -> IO (PMT.MemChunk sym ptrW)) } + +noMemChunkModify :: MemChunkModify ptrW +noMemChunkModify = MemChunkModify $ \_ chunk -> return chunk + +modifyConcreteChunk :: + 1 <= ptrW => + Integral x => + MC.Endianness -> + W4.NatRepr ptrW -> + x {- ^ concrete contents (clamped to number of bytes ) -} -> + Natural {- ^ number of bytes -} -> + Natural {- ^ offset into base chunk -} -> + MemChunkModify ptrW +modifyConcreteChunk endianness ptrW x bytes offset = MemChunkModify $ \sym chunk -> do + offsetBV <- W4.bvLit sym ptrW (BVS.mkBV ptrW (fromIntegral offset)) + chunk' <- PMT.concreteMemChunk sym endianness ptrW x bytes + PMT.copyMemChunkInto sym chunk offsetBV chunk' + +-- Indirect pointers with a concrete base +data MemLocation ptrW = + MemPointer (MC.MemWord ptrW) + | MemIndPointer (MemLocation ptrW) (MC.MemWord ptrW) + +-- Create an indirect pointer by creating a fresh allocation for each level of indirection +freshAtLocation :: + forall sym arch ptrW v bin. + LCB.IsSymInterface sym => + MS.SymArchConstraints arch => + Typeable arch => + ptrW ~ MD.ArchAddrWidth arch => + sym -> + PS.SimState sym arch v bin -> + MC.Endianness -> + Maybe Integer {- ^ override for starting region -} -> + MemLocation ptrW -> + IO ( CLM.LLVMPtr sym ptrW, PS.SimState sym arch v bin) +freshAtLocation sym st endianness mregion loc = do + (base, st') <- freshAlloc sym mregion st + st'' <- go (fmap (+1) mregion) loc (PSR.macawRegValue base) st' + return (PSR.macawRegValue base, st'') + where + go :: + Maybe Integer -> + MemLocation ptrW -> + CLM.LLVMPtr sym ptrW -> + PS.SimState sym arch v bin -> + IO (PS.SimState sym arch v bin) + go mregion' loc' val st' = case loc' of + MemPointer mw -> do + let w_mem = MC.memWidthNatRepr @ptrW + ptrOffset <- W4.bvLit sym w_mem (BVS.mkBV w_mem (fromIntegral mw)) + region <- W4.natLit sym 0 + let ptr = CLM.LLVMPointer region ptrOffset + let memRepr = PMT.memWidthMemRepr endianness w_mem + mem' <- PMT.writeMem sym ptr val memRepr (PS.simMem st') + --mem' <- PMT.writeMemState @_ @arch sym (W4.truePred sym) (PMT.memState $ PS.simMem st) ptr memRepr val + return $ (st' { PS.simMem = mem' }) + MemIndPointer loc'' locOffset -> do + let w_mem = MC.memWidthNatRepr @ptrW + (fresh_loc_ptr, st'') <- freshAlloc sym mregion' st' + st''' <- go (fmap (+1) mregion') loc'' (PSR.macawRegValue fresh_loc_ptr) st'' + let (CLM.LLVMPointer region ptrOffset) = (PSR.macawRegValue fresh_loc_ptr) + offset_bv <- W4.bvLit sym w_mem (BVS.mkBV w_mem (fromIntegral locOffset)) + ptrOffset' <- W4.bvAdd sym ptrOffset offset_bv + let ptr = (CLM.LLVMPointer region ptrOffset') + let memRepr = PMT.memWidthMemRepr endianness w_mem + mem' <- PMT.writeMem sym ptr val memRepr (PS.simMem st''') + --mem' <- PMT.writeMemState @_ @arch sym (W4.truePred sym) (PMT.memState $ PS.simMem st''') ptr memRepr val + return $ st''' { PS.simMem = mem' } + +resolveMemLocation :: + forall sym ptrW. + W4.IsSymExprBuilder sym => + MC.MemWidth ptrW => + sym -> + PMT.MemTraceImpl sym ptrW -> + MC.Endianness -> + MemLocation ptrW -> + IO (CLM.LLVMPtr sym ptrW) +resolveMemLocation sym mem endianness loc = case loc of + MemPointer mw -> do + let w_mem = MC.memWidthNatRepr @ptrW + ptrOffset <- W4.bvLit sym w_mem (BVS.mkBV w_mem (fromIntegral mw)) + region <- W4.natLit sym 0 + let ptr = (CLM.LLVMPointer region ptrOffset) + let memRepr = PMT.memWidthMemRepr endianness w_mem + PMT.readMemState sym (PMT.memState mem) (PMT.memBaseMemory mem) ptr memRepr + MemIndPointer loc' locOffset -> do + let w_mem = MC.memWidthNatRepr @ptrW + (CLM.LLVMPointer region ptrOffset) <- resolveMemLocation sym mem endianness loc' + offset_bv <- W4.bvLit sym w_mem (BVS.mkBV w_mem (fromIntegral locOffset)) + ptrOffset' <- W4.bvAdd sym ptrOffset offset_bv + let ptr = (CLM.LLVMPointer region ptrOffset') + let memRepr = PMT.memWidthMemRepr endianness w_mem + PMT.readMemState sym (PMT.memState mem) (PMT.memBaseMemory mem) ptr memRepr + +-- | Stub that reads the same uninterpreted chunk into both +-- original and patched programs, modulo optionally overwriting the uninterpreted +-- bytes with contents. Does nothing if the number of available bytes is zero or less. +mkReadOverride :: + forall arch. + 16 <= MC.ArchAddrWidth arch => + MS.SymArchConstraints arch => + Typeable arch => + T.Text {- ^ name of call -} -> + PPa.PatchPairC (MemChunkModify (MC.ArchAddrWidth arch)) {- ^ mutator for incoming symbolic data -} -> + PPa.PatchPairC (Maybe (MemLocation (MC.ArchAddrWidth arch))) {- ^ concrete override for buffer address (pointer to buffer pointer) -} -> + PPa.PatchPairC (Maybe (MC.MemWord (MC.ArchAddrWidth arch))) {- ^ concrete override for length -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ source (i.e. file descriptor, socket, etc) -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ buf -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ len -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> + StubOverride arch +mkReadOverride nm chunkOverrides bufferOverrides lenOverrides src_reg buf_reg len_reg rOut = StubOverride $ \(sym :: sym) archInfo wsolver -> return $ StateTransformer2 $ \ stPair -> do + let w_mem = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) + let bv_repr = W4.BaseBVRepr w_mem + let byte_repr = W4.BaseBVRepr (W4.knownNat @8) + -- undefined number of actual available bytes from the read "source" + -- FIXME: formally this should be an interpreted function on the source register, not just an arbitrary shared value + -- FIXME: this does not capture the fact that usually the return value is signed, where negative values represent + -- various error conditions (aside from simply having no more content available). + + -- FIXME: this makes "bytes_available" globally unique, which is not quite right. We need a model of + -- the input stream and have this be a function of reading a particular part of that stream. + + bytes_available_fn <- W4U.mkUninterpretedSymFn sym (show nm ++ "_bytes_available") Ctx.empty W4.BaseBoolRepr + bytes_available <- W4.applySymFn sym bytes_available_fn Ctx.empty + -- W4.freshConstant sym (W4.safeSymbol "bytes_available") W4.BaseBoolRepr + zero <- IO.liftIO $ W4.bvLit sym w_mem (BVS.zero w_mem) + -- for simplicity we assume that the symbolic chunk is exactly as large as the requested length (the max + -- length of both sides) + -- we then make the entire write into memory conditional on the symbolic predicate 'bytes_available' + -- FIXME: this is an over-simplification to reduce the state explosion into exactly two cases: either + -- we get all of the requested bytes or we don't get any + -- One idea for representing the case where we have fewer than expected but nonzero bytes would be to + -- have each of the individual bytes written to memory conditionally either from the chunk or simply read + -- back from memory. This allows us to still represent this as a single large write. + available_bytes_ <- PPa.runPatchPairT $ PPa.joinPatchPred (\x y -> IO.liftIO $ WEH.bvUMax sym x y) $ \bin -> do + st <- PPa.get bin stPair + (CLM.LLVMPointer _ len_bv) <- IO.liftIO $ (PSR.macawRegValue <$> (applyConcreteOverride sym bin lenOverrides $ (PS.simRegs st) ^. MC.boundValue len_reg)) + return len_bv + available_bytes <- IO.liftIO $ PVC.resolveSingletonSymbolicAsDefault wsolver available_bytes_ + + let arr_repr = W4.BaseArrayRepr (Ctx.singleton bv_repr) byte_repr + + -- FIXME: as with "bytes_available" this should not be globally unique, but rather a function + -- of where in the stream we're reading from + chunk_arr_fn <- W4U.mkUninterpretedSymFn sym (show nm ++ "_undefined_read") Ctx.empty arr_repr + chunk_arr <- IO.liftIO $ W4.applySymFn sym chunk_arr_fn Ctx.empty + --chunk_arr <- IO.liftIO $ W4.freshConstant sym (W4.safeSymbol "undefined_read") arr_repr + let chunk = PMT.MemChunk chunk_arr zero available_bytes + PPa.runPatchPairT $ PPa.forBins $ \bin -> do + --chunk_arr <- IO.liftIO $ W4.freshConstant sym W4.emptySymbol arr_repr + --let chunk = PMT.MemChunk chunk_arr zero available_bytes + st <- PPa.get bin stPair + chunk' <- case PPa.getC bin chunkOverrides of + Just ov -> IO.liftIO $ mkMemChunk ov sym chunk + Nothing -> return chunk + let src = (PS.simRegs st) ^. MC.boundValue src_reg + + -- FIXME: this region is picked arbitrarily and concretely so that we + -- can find this buffer again when we call the corresponding override + + + -- FIXME: we should be able to deduce the address by examining + -- the buffer expression itself + -- here we invent a fresh buffer pointer (instead of using the + -- given one) so we know that it's necessarily disjoint from anything else + + -- fresh_buf, st') <- IO.liftIO $ freshAlloc sym (Just 4) st + (buf, st'') <- case PPa.getC bin bufferOverrides of + Just (Just bufferOv) -> IO.liftIO $ do + (fresh_buf, st') <- freshAtLocation sym st (MI.archEndianness archInfo) (Just 4) bufferOv + return $ (PSR.ptrToEntry fresh_buf, st') + _ -> do + return ((PS.simRegs st) ^. MC.boundValue buf_reg, st) + + IO.liftIO $ readTransformer sym archInfo src buf rOut bytes_available chunk' st'' + + +{- + return $ readTransformer sym wsolver src_reg buf_reg len_reg rOut $ \bin _ -> case PPa.getC bin chunkOverrides of + Just mkr -> do + -- only apply the override if some content was returned + -- NB: otherwise this override will force there to always be content available to + -- read, which incorrectly will cause this stub to avoid legitimate program paths + -- which handle reaching the end-of-input + return chunk + -- nonzero_contents <- W4.bvUlt sym zero available_bytes + -- chunk' <- mkMemChunk mkr sym chunk + -- PMT.muxMemChunk sym nonzero_contents chunk' chunk + Nothing -> return chunk +-} + +readTransformer :: + forall sym arch bin v. + 16 <= MC.ArchAddrWidth arch => + MS.SymArchConstraints arch => + LCB.IsSymInterface sym => + sym -> + MI.ArchitectureInfo arch -> + PSR.MacawRegEntry sym (MT.BVType (MC.ArchAddrWidth arch)) {- ^ source (i.e. file descriptor, socket, etc) -} -> + PSR.MacawRegEntry sym (MT.BVType (MC.ArchAddrWidth arch)) {- ^ buf -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> + W4.Pred sym {- ^ condition for read -} -> + PMT.MemChunk sym (MC.ArchAddrWidth arch) -> + PS.SimState sym arch v bin -> + IO (PS.SimState sym arch v bin) +readTransformer sym archInfo _src_val buf rOut cond chunk st = do + let ptrW = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) + -- let (bin :: PB.WhichBinaryRepr bin) = knownRepr + -- let src_val = (PS.simRegs st) ^. MC.boundValue src_reg + -- chunk <- mkChunk bin src_val + -- let (CLM.LLVMPointer _ len_bv) = PSR.macawRegValue $ (PS.simRegs st) ^. MC.boundValue len_reg + --len_bv' <- PVC.resolveSingletonSymbolicAsDefault wsolver len_bv + let buf_ptr = PSR.macawRegValue buf + -- let buf_ptr = PSR.macawRegValue $ (PS.simRegs st) ^. MC.boundValue buf_reg + let mem = PS.simMem st + mem' <- IO.liftIO $ PMT.writeChunk sym (MI.archEndianness archInfo) chunk buf_ptr cond mem + zero <- IO.liftIO $ W4.bvLit sym ptrW (BVS.zero ptrW) + -- either the chunk is written or it's not and we return zero bytes written + written <- IO.liftIO $ W4.baseTypeIte sym cond (PMT.memChunkLen chunk) zero + zero_nat <- IO.liftIO $ W4.natLit sym 0 + let written_result = PSR.ptrToEntry (CLM.LLVMPointer zero_nat written) + -- FIXME: currently these are all unsigned values, but likely this return value will be treated as signed + -- where negative values represent different error conditions + return (st { PS.simMem = mem', PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ written_result ) }) + +-- SymExpr sym (BaseArrayType (Ctx.EmptyCtx Ctx.::> BaseBVType ptrW) (BaseBVType 8)) mkWriteOverride :: forall arch. 16 <= MC.ArchAddrWidth arch => @@ -373,7 +744,7 @@ mkWriteOverride :: MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ len -} -> MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> StubOverride arch -mkWriteOverride nm fd_reg buf_reg flen rOut = StubOverride $ \sym wsolver -> do +mkWriteOverride nm fd_reg buf_reg flen rOut = StubOverride $ \sym archInfo wsolver -> do let w_mem = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) return $ StateTransformer $ \st -> do let buf_ptr = PSR.macawRegValue $ (PS.simRegs st) ^. MC.boundValue buf_reg @@ -388,7 +759,7 @@ mkWriteOverride nm fd_reg buf_reg flen rOut = StubOverride $ \sym wsolver -> do , Just W4.LeqProof <- W4.isPosNat w -> do let mem = PS.simMem st -- endianness doesn't really matter, as long as we're consistent - let memrepr = MC.BVMemRepr w MC.LittleEndian + let memrepr = MC.BVMemRepr w (MI.archEndianness archInfo) (CLM.LLVMPointer region val_bv) <- PMT.readMemState sym (PMT.memState mem) (PMT.memBaseMemory mem) buf_ptr memrepr mem' <- PMT.addExternalCallEvent sym nm (Ctx.empty Ctx.:> fd_bv Ctx.:> val_bv Ctx.:> W4.natToIntegerPure region) mem -- globally-unique @@ -398,7 +769,7 @@ mkWriteOverride nm fd_reg buf_reg flen rOut = StubOverride $ \sym wsolver -> do _ -> do let mem = PS.simMem st bytes_chunk <- PMT.readChunk sym (PMT.memState mem) buf_ptr len_bv - let memrepr = MC.BVMemRepr (W4.knownNat @8) MC.LittleEndian + let memrepr = MC.BVMemRepr (W4.knownNat @8) (MI.archEndianness archInfo) (CLM.LLVMPointer _region first_byte) <- PMT.readMemState sym (PMT.memState mem) (PMT.memBaseMemory mem) buf_ptr memrepr one <- W4.bvLit sym w_mem (BVS.mkBV w_mem 1) len_minus_one <- W4.bvSub sym len_bv one @@ -431,7 +802,7 @@ mkDefaultStubOverride :: String -> MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> StubOverride arch -mkDefaultStubOverride nm rOut = StubOverride $ \sym _ -> do +mkDefaultStubOverride nm rOut = StubOverride $ \sym _ _ -> do let w = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) fresh_bv <- W4.freshConstant sym (W4.safeSymbol nm) (W4.BaseBVRepr w) return $ StateTransformer $ \st -> do @@ -439,6 +810,17 @@ mkDefaultStubOverride nm rOut = StubOverride $ \sym _ -> do let ptr = PSR.ptrToEntry (CLM.LLVMPointer zero_nat fresh_bv) return (st { PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ ptr) }) +-- | Default override returns the same arbitrary value for both binaries +mkArgPassthroughOverride :: + forall arch. + MS.SymArchConstraints arch => + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ argument register -} -> + MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> + StubOverride arch +mkArgPassthroughOverride rArg rOut = StubOverride $ \_ _ _ -> return $ StateTransformer $ \st -> do + let arg = (PS.simRegs st) ^. MC.boundValue rArg + return (st { PS.simRegs = ((PS.simRegs st) & (MC.boundValue rOut) .~ arg) }) + -- | Default override returns the same arbitrary value for both binaries, based on the input -- registers mkDefaultStubOverrideArg :: @@ -448,7 +830,7 @@ mkDefaultStubOverrideArg :: [Some (MC.ArchReg arch)] {- ^ argument registers -} -> MC.ArchReg arch (MT.BVType (MC.ArchAddrWidth arch)) {- ^ return register -} -> StubOverride arch -mkDefaultStubOverrideArg nm rArgs rOut = StubOverride $ \sym _ -> do +mkDefaultStubOverrideArg nm rArgs rOut = StubOverride $ \sym _ _ -> do let w = MC.memWidthNatRepr @(MC.ArchAddrWidth arch) return $ StateTransformer $ \st -> do @@ -481,7 +863,7 @@ mkNOPStub :: MS.SymArchConstraints arch => String -> StubOverride arch -mkNOPStub _nm = StubOverride $ \_sym _ -> return $ StateTransformer $ \st -> return st +mkNOPStub _nm = StubOverride $ \_sym _ _ -> return $ StateTransformer $ \st -> return st -- | A witness to the validity of an architecture, along with any -- architecture-specific data required for the verifier diff --git a/src/Pate/EventTrace.hs b/src/Pate/EventTrace.hs index 5f8a3138..e8d31253 100644 --- a/src/Pate/EventTrace.hs +++ b/src/Pate/EventTrace.hs @@ -50,6 +50,9 @@ module Pate.EventTrace , compareExternalCallData , ExternalCallData(..) , MemChunk(..) +, copyMemChunkInto +, concreteMemChunk +, muxMemChunk ) where @@ -97,6 +100,9 @@ import qualified What4.Expr.ArrayUpdateMap as AUM import Data.Data (Typeable) import GHC.TypeLits import qualified Data.Macaw.Memory as MC +import qualified What4.Concrete as W4C +import Data.Foldable (foldlM) +import Data.Parameterized (Some(..)) -- Needed since SymBV is a type alias newtype SymBV' sym w = SymBV' { unSymBV :: SymBV sym w } @@ -409,6 +415,81 @@ instance PEM.ExprMappable sym (MemChunk sym ptrW) where instance W4S.SerializableExprs sym => W4S.W4Serializable sym (MemChunk sym ptrW) where w4Serialize (MemChunk a b c) = W4S.object ["mem_arr" .== a, "base" .== b, "len" .== c] +-- | Overwrite the contents of a base memory chunk with new contents at a +-- given offset. +copyMemChunkInto :: + IsSymExprBuilder sym => + 1 <= ptrW => + sym -> + MemChunk sym ptrW {- ^ base chunk -} -> + SymBV sym ptrW {- ^ index into base chunk to copy contents -} -> + MemChunk sym ptrW {- ^ new contents -} -> + IO (MemChunk sym ptrW) +copyMemChunkInto sym baseChunk startIdx srcChunk = do + baseIdx <- bvAdd sym (memChunkBase baseChunk) startIdx + arr' <- case asConcrete (memChunkLen srcChunk) of + Just (W4C.ConcreteBV ptrW srcLenBV) -> do + let srcLen = BV.asUnsigned srcLenBV + upds <- forM [0 .. srcLen] $ \idx -> do + idxSym <- bvLit sym ptrW (BV.mkBV ptrW idx) + srcIdx <- bvAdd sym (memChunkBase srcChunk) idxSym + -- chunk base + starting index + byte index + baseIdx' <- bvAdd sym baseIdx idxSym + srcByte <- arrayLookup sym (memChunkArr srcChunk) (Ctx.singleton srcIdx) + return (Ctx.singleton baseIdx', srcByte) + foldlM (\a (baseIdx', srcByte) -> arrayUpdate sym a baseIdx' srcByte) (memChunkArr baseChunk) upds + _ -> + arrayCopy sym (memChunkArr baseChunk) baseIdx (memChunkArr baseChunk) (memChunkBase baseChunk) (memChunkLen srcChunk) + baseTop <- bvAdd sym baseIdx (memChunkLen srcChunk) + -- we may extend the size of the base chunk if we write bytes past the top of + -- its contents + len' <- WEH.bvUMax sym (memChunkLen baseChunk) baseTop + return $ baseChunk { memChunkArr = arr', memChunkLen = len'} + + +-- | Create a 'MemChunk' with concrete contents. +concreteMemChunk :: + IsSymExprBuilder sym => + Integral x => + 1 <= ptrW => + sym -> + MC.Endianness -> + NatRepr ptrW -> + x {- ^ concrete contents (clamped to number of bytes ) -} -> + Natural {- ^ number of bytes -} -> + IO (MemChunk sym ptrW) +concreteMemChunk sym endianness ptrW contents bytesLen = do + Some bitsLen <- return $ mkNatRepr (bytesLen * 8) + let contentsBV = BV.unsignedClamp bitsLen (fromIntegral contents) + let mbytes = case endianness of + MC.BigEndian -> BV.asBytesBE bitsLen contentsBV + MC.LittleEndian -> BV.asBytesLE bitsLen contentsBV + bytes <- case mbytes of + Just bytes -> return bytes + Nothing -> fail "concreteMemChunk: impossible" + let arrTp = Ctx.singleton (BaseBVRepr ptrW) + let indices = map (Ctx.singleton . BVIndexLit ptrW . BV.mkBV ptrW . fromIntegral) [0 .. bytesLen] + bytesSym <- mapM (bvLit sym (knownNat @8) . BV.word8) bytes + let contentsBytes = AUM.fromAscList (BaseBVRepr (knownNat @8)) (zip indices bytesSym) + zeroByte <- bvLit sym (knownNat @8) (BV.zero (knownNat @8)) + arr <- arrayFromMap sym arrTp contentsBytes zeroByte + zeroIndex <- bvLit sym ptrW (BV.zero ptrW) + lenSymBV <- bvLit sym ptrW (BV.mkBV ptrW (fromIntegral bytesLen)) + return $ MemChunk arr zeroIndex lenSymBV + +muxMemChunk :: + IsSymExprBuilder sym => + sym -> + Pred sym -> + MemChunk sym ptrW -> + MemChunk sym ptrW -> + IO (MemChunk sym ptrW) +muxMemChunk sym p chunkT chunkF = do + arr <- baseTypeIte sym p (memChunkArr chunkT) (memChunkArr chunkF) + base <- baseTypeIte sym p (memChunkBase chunkT) (memChunkBase chunkF) + len <- baseTypeIte sym p (memChunkLen chunkT) (memChunkLen chunkF) + return $ MemChunk arr base len + compareTrees :: OrdF (SymExpr sym) => Ord tp => MuxTree sym tp -> MuxTree sym tp -> Ordering compareTrees mt1 mt2 = let diff --git a/src/Pate/Memory/MemTrace.hs b/src/Pate/Memory/MemTrace.hs index 051202ea..32a274b0 100644 --- a/src/Pate/Memory/MemTrace.hs +++ b/src/Pate/Memory/MemTrace.hs @@ -27,6 +27,7 @@ {-# LANGUAGE NoStarIsType #-} #endif {-# OPTIONS_GHC -fno-warn-orphans #-} +{-# LANGUAGE MultiWayIf #-} module Pate.Memory.MemTrace ( MemTraceImpl(..) @@ -74,13 +75,22 @@ module Pate.Memory.MemTrace , memInstrSeq , addRegEvent , readChunk +, writeChunk , module Pate.EventTrace +, segOffToPtr +, readFromChunk +, BytesBV(..) +, memWidthMemRepr +, writeMem +, muxPtr ) where import Unsafe.Coerce import Control.Applicative import Control.Lens ((%~), (&), (^.), (.~)) +import Control.Monad ( forM ) import Control.Monad.State +import Control.Monad.Trans.State ( modifyM ) import qualified Data.BitVector.Sized as BV import Data.Map (Map) import qualified Data.Map as Map @@ -157,6 +167,7 @@ import qualified Data.Parameterized.TraversableF as TF import Pate.EventTrace import qualified Data.Parameterized.TraversableFC as TFC import What4.SymSequence +import qualified What4.Concrete as W4C ------ -- * Undefined pointers @@ -748,6 +759,7 @@ memInstrSeq :: InstructionTrace sym arch memInstrSeq mem = getWrappedPtrW (memInstrSeq_ mem) +-- | TODO: document the implied invariants of this datatype data MemTraceImpl sym ptrW = MemTraceImpl { memSeq :: MemTraceSeq sym ptrW -- ^ The sequence of memory operations in reverse execution order; @@ -1281,6 +1293,19 @@ doWriteMem :: StateT (MemTraceImpl sym ptrW) IO () doWriteMem sym = doMemOpInternal sym Write Unconditional +writeMem :: + IsSymInterface sym => + MemWidth ptrW => + sym -> + LLVMPtr sym ptrW -> + RegValue sym (MS.ToCrucibleType ty) -> + MemRepr ty -> + MemTraceImpl sym ptrW -> + IO (MemTraceImpl sym ptrW) +writeMem sym ptr v repr mem = do + execStateT (doMemOpInternal sym Write Unconditional (addrWidthRepr ptr) ptr v repr) mem + + doCondWriteMem :: IsSymInterface sym => MemWidth ptrW => @@ -1557,6 +1582,16 @@ writeMemState sym cond memSt ptr repr val = do MemTraceImpl _ memSt' _ _ _ _ <- execStateT (doMemOpInternal sym Write (Conditional cond) (addrWidthRepr mem) ptr val repr) mem return memSt' +memWidthMemRepr :: + forall ptrW p. + MemWidth ptrW => + Endianness -> + p ptrW -> + MemRepr (MT.BVType ptrW) +memWidthMemRepr endianness proxy = case addrWidthRepr proxy of + Addr32 -> BVMemRepr (knownNat @4) endianness + Addr64 -> BVMemRepr (knownNat @8) endianness + -- | Write to the memory array and set the dirty bits on -- any written addresses writeMemBV :: forall sym ptrW w. @@ -1603,6 +1638,196 @@ writeMemBV sym mem_init ptr repr (LLVMPointer region val) = go 0 repr val mem_in mem1 <- go n reprHead hd mem go (n + 1) repr' tl mem1 +data BytesBV sym w where + BytesBV :: 1 <= bytes => NatRepr bytes -> SymBV sym (8 * bytes) -> BytesBV sym bytes + +concatBytes :: + IsExprBuilder sym => + sym -> + BytesBV sym w1 -> + BytesBV sym w2 -> + IO (BytesBV sym (w1 + w2)) +concatBytes sym (BytesBV bytes1 bv1) (BytesBV bytes2 bv2) + | LeqProof <- mulMono (knownNat @8) bytes1 + , LeqProof <- mulMono (knownNat @8) bytes2 + , LeqProof <- leqAddPos bytes1 bytes2 + , Refl <- addMulDistribRight bytes1 bytes2 (knownNat @8) + , Refl <- mulComm (knownNat @8) (addNat bytes1 bytes2) + , Refl <- mulComm (knownNat @8) bytes1 + , Refl <- mulComm (knownNat @8) bytes2 + = do + result <- bvConcat sym bv1 bv2 + return $ BytesBV (addNat bytes1 bytes2) result + + +-- | Read a given number of bytes from the given MemChunk. Note that this ignores the +-- actual length of the chunk and is therefore undefined if bytes past this length +-- are accessed. +readFromChunk :: + forall sym ptrW bytes. + IsSymExprBuilder sym => + 1 <= ptrW => + 1 <= bytes => + sym -> + Endianness -> + MemChunk sym ptrW -> + SymBV sym ptrW -> + NatRepr bytes -> + IO (BytesBV sym bytes) +readFromChunk sym endianness (MemChunk baseArray chunk_offset _) start_offset num_bytes = go num_bytes + where + go :: + forall bytes'. + 1 <= bytes' => + NatRepr bytes' -> + IO (BytesBV sym bytes') + go num_bytes' = do + BaseBVRepr ptrW <- return $ exprType chunk_offset + offset_int <- intLit sym ((fromIntegral $ natValue num_bytes) - (fromIntegral $ natValue num_bytes')) + offset_bv <- integerToBV sym offset_int ptrW + + index <- bvAdd sym chunk_offset offset_bv >>= bvAdd sym start_offset + byte <- arrayLookup sym baseArray (Ctx.singleton index) + let rest_len = decNat num_bytes' + Refl <- return $ minusPlusCancel num_bytes' (knownNat @1) + Refl <- return $ plusComm (knownNat @1) rest_len + case isZeroOrGT1 rest_len of + Left Refl -> return $ BytesBV (knownNat @1) byte + Right LeqProof -> do + rest <- go rest_len + case endianness of + BigEndian -> concatBytes sym (BytesBV (knownNat @1) byte) rest + LittleEndian -> concatBytes sym rest (BytesBV (knownNat @1) byte) + + +-- | Fetch a byte from the given 'MemChunk', with a predicate +-- that determines if the byte is within the bounds of the chunk. +getChunkByte :: + forall sym ptrW. + IsSymExprBuilder sym => + 1 <= ptrW => + sym -> + MemChunk sym ptrW -> + SymBV sym ptrW -> + IO (SymBV sym 8, Pred sym) +getChunkByte sym (MemChunk baseArray offset len) idx = do + index <- bvAdd sym offset idx + byte <- arrayLookup sym baseArray (Ctx.singleton index) + valid <- bvUlt sym index len + return (byte, valid) + +{- +-- | Conditionally write a single byte to memory if it is within the +-- bounds of the given MemChunk and write length. +writeChunkByte :: + forall sym ptrW. + IsSymInterface sym => + 1 <= ptrW => + MemWidth ptrW => + sym -> + MemChunk sym ptrW -> + SymBV sym ptrW {- index into chunk -} -> + LLVMPtr sym ptrW -> + MemTraceImpl sym ptrW -> + IO (MemTraceImpl sym ptrW) +writeChunkByte sym chunk idx_bv (LLVMPointer region offset) mem = do + let ptrW = bvWidth offset + (byte, in_chunk) <- getChunkByte sym chunk idx_bv + in_write <- bvUlt sym idx_bv write_len + valid <- andPred sym in_chunk in_write + let memRepr = BVMemRepr (knownNat @1) LittleEndian + zero_nat <- intLit sym 0 >>= integerToNat sym + offset' <- bvAdd sym offset idx_bv + let ptr = (LLVMPointer region offset') + execStateT (doCondWriteMem sym valid (addrWidthRepr ptrW) ptr (LLVMPointer zero_nat byte) memRepr) mem + + +-- Write an array of bytes into memory. Returns the resulting memory state +-- as well as the number of symbolic bytes written (minimum of memchunk length +-- and requested length). +-- This updates both the "bytes" array values with the given contents, as +-- well as the "region" array to set the written bytes to have region zero +-- (i.e. consider the written values to be absolute/raw bytes). +-- FIXME: For symbolic-length writes +-- only the first and laste bytes are included in the event stream. +-- Additionally it is internally using 'arrayCopy' which is likely to +-- hang the solver if there isn't some bound on the write length +-- that can be inferred. +writeChunkState :: forall sym ptrW. + MemWidth ptrW => + IsSymInterface sym => + MemWidth ptrW => + sym -> + MemChunk sym ptrW -> + LLVMPtr sym ptrW -> + MemTraceImpl sym ptrW -> + IO (MemTraceImpl sym ptrW) +writeChunkState sym chunk@(MemChunk writeBytes off_chunk len) ptr mem = do + let memSt = memState mem + let ptrW = bvWidth len + (_ Ctx.:> reg Ctx.:> off) <- arrayIdx sym ptr 0 + regArrayBytes <- arrayLookup sym (memArrBytes memSt) (Ctx.singleton reg) + regArrayBytes' <- arrayCopy sym regArrayBytes off writeBytes off_chunk len + memBytes' <- arrayUpdate sym (memArrBytes memSt) (Ctx.singleton reg) regArrayBytes' + zero_int <- intLit sym 0 + regArrayRegs <- arrayLookup sym (memArrRegions memSt) (Ctx.singleton reg) + regArrayRegs' <- arraySet sym regArrayRegs off zero_int len + memRegions' <- arrayUpdate sym (memArrRegions memSt) (Ctx.singleton reg) regArrayRegs' + let memSt' = memSt { memArrBytes = memBytes', memArrRegions = memRegions' } + zero <- bvLit sym ptrW (BV.mkBV ptrW 0) + -- we explicitly write the first and last bytes of the chunk so they are included + -- in the event stream + -- this is a workaround in lieu of being able to include symbolic-width chunks + -- in events + -- NB: if the chunk or write length is zero then these writes do nothing, as + -- they are conditional on the index being within bounds + mem' <- writeChunkByte sym chunk zero write_len ptr (mem { memState = memSt' }) + one <- bvLit sym ptrW (BV.mkBV ptrW 1) + -- NB: if len = 0 this will underflow. In this case the write will be a no-op since + -- end > len (out of bounds). + end <- bvSub sym len one + writeChunkByte sym chunk end write_len ptr mem' + -- below isn't quite right because it will then require all symbolic-length memory + -- writes to be exactly equal/considered observable + -- in general we need another memory event type to specifically handle symbolic-length + -- writes, which we then need to be able to extract a memory footprint from + {- + let chunk' = chunk { memChunkLen = len } + let event = ExternalCallEvent "writeChunkState" [ExternalCallDataChunk chunk'] + addMemEvent sym event mem'' + -} +-} + +writeChunk :: forall sym ptrW. + MemWidth ptrW => + IsSymInterface sym => + MemWidth ptrW => + sym -> + Endianness -> -- FIXME: endianness actually doesn't matter here + MemChunk sym ptrW -> + LLVMPtr sym ptrW {- ^ address to write chunk into -} -> + Pred sym {- ^ symbolic condition for entire write -} -> + MemTraceImpl sym ptrW -> + IO (MemTraceImpl sym ptrW) +writeChunk sym endianness chunk ptr cond mem = do + let ptrW = memWidthNatRepr @ptrW + mnumBytes <- case asConcrete (memChunkLen chunk) of + Just (W4C.ConcreteBV _ chunkLenC) -> return $ Just $ BV.asUnsigned chunkLenC + Nothing -> return Nothing + case mnumBytes of + Just numBytes_int + | Just (Some numBytes) <- someNat numBytes_int -> case isZeroOrGT1 numBytes of + Left Refl -> return mem + Right LeqProof -> do + zero_nat <- natLit sym 0 + zero <- bvLit sym ptrW (BV.zero ptrW) + -- FIXME: we can actually just arbitrarily pick an endianness since it only matters that + -- the read and write are consistent + BytesBV _ chunk_bv <- readFromChunk sym endianness chunk zero numBytes + let memRepr = BVMemRepr numBytes endianness + execStateT (doCondWriteMem sym cond (addrWidthRepr ptrW) ptr (LLVMPointer zero_nat chunk_bv) memRepr) mem + _ -> fail "TODO: writeChunk: symbolic write lengths not supported" + ifCond :: IsSymInterface sym => sym -> diff --git a/src/Pate/SimulatorRegisters.hs b/src/Pate/SimulatorRegisters.hs index 1173fbf7..e3d729e0 100644 --- a/src/Pate/SimulatorRegisters.hs +++ b/src/Pate/SimulatorRegisters.hs @@ -9,13 +9,17 @@ {-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ScopedTypeVariables #-} module Pate.SimulatorRegisters ( CrucBaseTypes, MacawRegEntry(..), MacawRegVar(..), macawRegEntry, - ptrToEntry + macawRegBVWidth, + ptrToEntry, + bvToEntry ) where import qualified Data.Macaw.Symbolic as MS @@ -54,6 +58,13 @@ data MacawRegEntry sym (tp :: MT.Type) where } -> MacawRegEntry sym tp + +macawRegBVWidth :: MacawRegEntry sym (MT.BVType ptrW) -> WI.NatRepr ptrW +macawRegBVWidth (MacawRegEntry repr _) = case repr of + CLM.LLVMPointerRepr ptrW -> ptrW + _ -> error $ "macawRegBVWidth: unexpected type:" ++ show repr + + instance W4S.SerializableExprs sym => W4S.W4Serializable sym (MacawRegEntry sym tp) where w4Serialize (MacawRegEntry r v) = case r of CLM.LLVMPointerRepr{} -> W4S.w4Serialize (Ptr.fromLLVMPointer v) @@ -104,6 +115,17 @@ ptrToEntry :: ptrToEntry ptr@(CLM.LLVMPointer _ bv) = case WI.exprType bv of WI.BaseBVRepr w -> MacawRegEntry (CLM.LLVMPointerRepr w) ptr +bvToEntry :: + WI.IsExprBuilder sym => + sym -> + WI.SymBV sym w -> + IO (MacawRegEntry sym (MT.BVType w)) +bvToEntry sym bv = do + zero <- WI.natLit sym 0 + WI.BaseBVRepr w <- return $ WI.exprType bv + return $ MacawRegEntry (CLM.LLVMPointerRepr w) (CLM.LLVMPointer zero bv) + + instance PEM.ExprMappable sym (MacawRegEntry sym tp) where mapExpr sym f entry = do case macawRegRepr entry of diff --git a/src/Pate/Verification/PairGraph.hs b/src/Pate/Verification/PairGraph.hs index 8de356d7..e8af885d 100644 --- a/src/Pate/Verification/PairGraph.hs +++ b/src/Pate/Verification/PairGraph.hs @@ -131,6 +131,7 @@ module Pate.Verification.PairGraph , addDomainRefinements , isSyncNode , queueExitMerges + , setPropagationKind ) where import Prettyprinter @@ -1001,6 +1002,16 @@ setCondition :: PairGraph sym arch setCondition nd condK propK cond pg = pg { pairGraphConditions = Map.insert (nd,condK) (propK, cond) (pairGraphConditions pg) } +setPropagationKind :: + GraphNode arch -> + ConditionKind -> + PropagateKind -> + PairGraph sym arch -> + PairGraph sym arch +setPropagationKind nd condK propK pg = case Map.lookup (nd,condK) (pairGraphConditions pg) of + Just (_, cond) -> pg { pairGraphConditions = Map.insert (nd,condK) (propK, cond) (pairGraphConditions pg) } + _ -> pg + dropCondition :: GraphNode arch -> ConditionKind -> diff --git a/src/Pate/Verification/Simplify.hs b/src/Pate/Verification/Simplify.hs index eec6fb10..62714b5a 100644 --- a/src/Pate/Verification/Simplify.hs +++ b/src/Pate/Verification/Simplify.hs @@ -21,6 +21,7 @@ module Pate.Verification.Simplify ( , prettySimplifier , coreStrategy , applySimpStrategy + , unfoldDefsStrategy ) where import Control.Monad (foldM) @@ -239,6 +240,9 @@ unfoldDefsStrategy = WEH.joinStrategy $ withValid $ return $ WEH.SimpStrategy $ prettySimplifier :: forall sym arch. SimpStrategy sym (EquivM_ sym arch) prettySimplifier = deepPredicateSimplifier <> base <> unfoldDefsStrategy <> deepPredicateSimplifier <> unfoldDefsStrategy <> base where + mem :: SimpStrategy sym (EquivM_ sym arch) + mem = WEH.joinStrategy $ withValid $ return WEH.memReadPrettySimplify + base :: SimpStrategy sym (EquivM_ sym arch) base = WEH.joinStrategy $ withValid $ return $ WEH.bvPrettySimplify <> WEH.memReadPrettySimplify <> WEH.collapseBVOps diff --git a/src/Pate/Verification/StrongestPosts.hs b/src/Pate/Verification/StrongestPosts.hs index 2442f6b6..daa3ef3a 100644 --- a/src/Pate/Verification/StrongestPosts.hs +++ b/src/Pate/Verification/StrongestPosts.hs @@ -971,13 +971,18 @@ orphanReturnBundle scope pPair = withSym $ \sym -> do return $ PS.SimInput (PS.simVarState vars') blk absSt wsolver <- getWrappedSolver + + -- FIXME: should we pass in the archInfo for each binary to each stub separately? + -- currently it's only used to get the architecture endianness, which realistically + -- shouldn't change between the patched and original binaries + origBinary <- PMC.binary <$> getBinCtx' PBi.OriginalRepr + let archInfo = PA.binArchInfo origBinary + simOut_ <- IO.withRunInIO $ \runInIO -> - PA.withStubOverride sym wsolver ov $ \f -> runInIO $ PPa.forBins $ \bin -> do - input <- PPa.get bin simIn_ - let inSt = PS.simInState input - outSt <- liftIO $ f inSt + PA.withStubOverride sym archInfo wsolver ov $ \f -> runInIO $ do + outSt <- liftIO $ f (TF.fmapF PS.simInState simIn_) blkend <- liftIO $ MCS.initBlockEnd (Proxy @arch) sym MCS.MacawBlockEndReturn - return $ PS.SimOutput outSt blkend + return $ TF.fmapF (\st' -> PS.SimOutput st' blkend) outSt return $ PS.SimBundle simIn_ simOut_ @@ -1691,22 +1696,33 @@ doCheckObservables scope ne bundle preD pg = case PS.simOut bundle of choice "Emit warning and continue" () $ return Nothing -- NB: the only difference between the two assertion types is the priority that the propagation step occurs at -- the verifier will finish any widening steps it has before processing any deferred propagation steps - choice "Assert difference is infeasible (defer proof)" () $ return $ Just (ConditionAsserted, PriorityDeferredPropagation) - choice "Assert difference is infeasible (prove immediately)" () $ return $ Just (ConditionAsserted, PriorityPropagation) - choice "Assume difference is infeasible" () $ return $ Just (ConditionAssumed, PriorityDeferredPropagation) - choice "Avoid difference with equivalence condition" () $ return $ Just (ConditionEquiv, PriorityDeferredPropagation) + choice "Assert difference is infeasible (defer proof)" () $ return $ Just (ConditionAsserted, RefineUsingExactEquality, PriorityDeferredPropagation) + choice "Assert difference is infeasible (prove immediately)" () $ return $ Just (ConditionAsserted, RefineUsingExactEquality, PriorityPropagation) + choice "Assume difference is infeasible" () $ return $ Just (ConditionAssumed, RefineUsingExactEquality, PriorityDeferredPropagation) + choice "Avoid difference with equivalence condition" () $ return $ Just (ConditionEquiv, RefineUsingExactEquality, PriorityDeferredPropagation) + choice "Avoid difference with path-sensitive equivalence condition" () $ return $ Just (ConditionEquiv, RefineUsingIntraBlockPaths, PriorityPropagation) False -> return Nothing case mcondK of - Just (condK, p) -> do - let do_propagate = shouldPropagate (getPropagationKind pg nd condK) + Just (condK, refineK, p) -> do + let do_propagate = case p of + PriorityPropagation -> True + _ -> shouldPropagate (getPropagationKind pg nd condK) + simplifier <- PSi.mkSimplifier PSi.deepPredicateSimplifier - eqSeq_simp <- PSi.applySimplifier simplifier eqSeq + eqSeq_simp <- case refineK of + RefineUsingExactEquality -> PSi.applySimplifier simplifier eqSeq + RefineUsingIntraBlockPaths -> do + eqSeq_paths <- strengthenPredicate [Some eqSeq] eqSeq + PSi.applySimplifier simplifier eqSeq_paths + withPG pg $ do liftEqM_ $ addToEquivCondition scope nd condK eqSeq_simp liftPG $ do - when do_propagate $ + when do_propagate $ do modify $ queueAncestors (priority p) nd + when ((condK, p) == (ConditionEquiv, PriorityPropagation)) $ + modify $ setPropagationKind nd ConditionEquiv PropagateOnce modify $ dropPostDomains nd (priority PriorityDomainRefresh) modify $ queueNode (raisePriority (priority PriorityNodeRecheck)) nd return Nothing @@ -2598,9 +2614,9 @@ combineOverrides :: PPa.PatchPairC (PA.StubOverride arch) -> PA.StubOverride arch combineOverrides (PPa.PatchPairSingle _ (Const f)) = f -combineOverrides (PPa.PatchPairC (PA.StubOverride f1) (PA.StubOverride f2)) = PA.StubOverride $ \sym wsolver -> do - f1' <- f1 sym wsolver - f2' <- f2 sym wsolver +combineOverrides (PPa.PatchPairC (PA.StubOverride f1) (PA.StubOverride f2)) = PA.StubOverride $ \sym archInfo wsolver -> do + f1' <- f1 sym archInfo wsolver + f2' <- f2 sym archInfo wsolver let fnPair = PPa.PatchPairC f1' f2' return $ PA.StateTransformer $ \(st :: PS.SimState sym arch v bin) -> do let (bin :: PBi.WhichBinaryRepr bin) = knownRepr @@ -2877,12 +2893,24 @@ handleStub scope bundle currBlock d gr0_ pPair mpRetPair stubPair = fnTrace "han False -> do ov <- mergeStubOverrides archData stubPair ovPair wsolver <- getWrappedSolver + -- FIXME: should we pass in the archInfo for each binary to each stub separately? + -- currently it's only used to get the architecture endianness, which realistically + -- shouldn't change between the patched and original binaries + origBinary <- PMC.binary <$> getBinCtx' PBi.OriginalRepr + let archInfo = PA.binArchInfo origBinary + unfold_simplifier <- PSi.mkSimplifier PSi.unfoldDefsStrategy + pretty_simplifier <- PSi.mkSimplifier PSi.prettySimplifier outputs <- IO.withRunInIO $ \runInIO -> - PA.withStubOverride sym wsolver ov $ \f -> runInIO $ PPa.forBins $ \bin -> do - output <- PPa.get bin $ PS.simOut bundle - nextSt <- liftIO $ f (PS.simOutState output) - return $ output { PS.simOutState = nextSt } + PA.withStubOverride sym archInfo wsolver ov $ \f -> runInIO $ do + {- out <- PSi.applySimplifier unfold_simplifier (TF.fmapF PS.simOutState (PS.simOut bundle)) + nextStPair_ <- liftIO $ f out + nextStPair <- PSi.applySimplifier unfold_simplifier nextStPair_ -} + nextStPair <- liftIO $ f (TF.fmapF PS.simOutState (PS.simOut bundle)) + PPa.forBins $ \bin -> do + nextSt <- PPa.get bin nextStPair + output <- PPa.get bin (PS.simOut bundle) + return $ output { PS.simOutState = nextSt } let bundle' = bundle { PS.simOut = outputs } gr1 <- checkObservables scope currBlock bundle' d gr0 case mpRetPair of diff --git a/src/Pate/Verification/Widening.hs b/src/Pate/Verification/Widening.hs index a884ba4b..dc010544 100644 --- a/src/Pate/Verification/Widening.hs +++ b/src/Pate/Verification/Widening.hs @@ -32,6 +32,7 @@ module Pate.Verification.Widening , getSomeGroundTrace , getTraceFromModel , addToEquivCondition + , strengthenPredicate ) where import GHC.Stack @@ -768,7 +769,9 @@ propagateCondition scope bundle from to gr0_ = fnTrace "propagateCondition" $ do Just{} -> do -- take the condition of the target edge and bind it to -- the output state of the bundle - cond <- getEquivPostCondition scope bundle to condK gr + cond_ <- getEquivPostCondition scope bundle to condK gr + simplifier <- PSi.mkSimplifier PSi.deepPredicateSimplifier + cond <- PSi.applySimplifier simplifier cond_ {- @@ -1171,8 +1174,6 @@ abstractOverVars scope_pre bundle _from _to postSpec postResult = do -- in a loss of soundness; dropping an entry means that the resulting domain -- is effectively now assuming equality on that entry. - -- simplifier <- PSi.getSimplifier - --domEq_simplified <- PSi.applySimplifier simplifier (PAD.absDomEq postResult) let domEq = PAD.absDomEq postResult eq_post <- subTree "equivalence" $ fmap PS.unWS $ PS.scopedLocWither @sym @arch sym (PS.WithScope @_ @pre domEq) $ \loc (se :: PS.ScopedExpr sym tp pre) -> subTrace @"loc" (PL.SomeLocation loc) $ do @@ -1188,6 +1189,7 @@ abstractOverVars scope_pre bundle _from _to postSpec postResult = do se' <- liftIO $ PS.applyScopeCoercion sym pre_to_post se e'' <- liftIO $ PS.applyScopeCoercion sym post_to_pre se' curAsms <- currentAsm + case x of PC.ThrowOnEqRescopeFailure -> do void $ emitError $ PEE.InnerSymEquivalenceError $ PEE.RescopingFailure curAsms se e'' @@ -1195,8 +1197,7 @@ abstractOverVars scope_pre bundle _from _to postSpec postResult = do void $ emitWarning $ PEE.InnerSymEquivalenceError $ PEE.RescopingFailure curAsms se e'' return Nothing - let evSeq = PAD.absDomEvents postResult - --nextSeq <- + let evSeq = PAD.absDomEvents postResult evSeq_post <- subTree "events" $ fmap PS.unWS $ PS.scopedLocWither @sym @arch sym (PS.WithScope @_ @pre evSeq) $ \loc se -> subTrace @"loc" (PL.SomeLocation loc) $ do emitTraceLabel @"expr" "input" (Some (PS.unSE se)) @@ -1404,6 +1405,7 @@ widenPostcondition scope bundle preD postD0 = do _ -> panic Verifier "widenPostcondition" [ "Unexpected widening case"] Sat evalFn -> do emit PE.SolverFailure + emitTrace @"message" "equivalence failure" if i <= 0 then -- we ran out of gas do slice <- PP.simBundleToSlice scope bundle @@ -1462,7 +1464,7 @@ widenPostcondition scope bundle preD postD0 = do liftIO $ PAD.absDomainValsToPostCond sym eqCtx st Nothing vals res2 <- case postVals of - PPa.PatchPairSingle bin (Const valPost) -> + PPa.PatchPairSingle bin (Const valPost) -> PL.foldLocation @sym @arch sym valPost (Left postD) (widenOnce WidenValue (Gas i) (Just (Some bin))) PPa.PatchPairC valPostO valPostP -> do res1 <- PL.foldLocation @sym @arch sym valPostO (Left postD) (widenOnce WidenValue (Gas i) (Just (Some PBi.OriginalRepr))) @@ -1543,6 +1545,7 @@ getInitalAbsDomainVals bundle preDom = withTracing @"debug" "getInitalAbsDomainV pre <- PPa.get bin (PAD.absDomVals preDom) PAD.initAbsDomainVals sym eqCtx getConcreteRange out pre + widenUsingCounterexample :: sym -> PS.SimScope sym arch v -> diff --git a/src/What4/ExprHelpers.hs b/src/What4/ExprHelpers.hs index 81b73dc7..62921dbb 100644 --- a/src/What4/ExprHelpers.hs +++ b/src/What4/ExprHelpers.hs @@ -90,6 +90,8 @@ module What4.ExprHelpers ( , W4SBV.bvPrettySimplify , W4SBV.memReadPrettySimplify , W4SBV.collapseBVOps + , bvUMax + , bvUMin ) where import GHC.TypeNats @@ -148,6 +150,30 @@ import Data.Maybe (fromMaybe) import qualified What4.Simplify.Bitvector as W4SBV import What4.Simplify.Bitvector (asSimpleSum) +bvUMax :: + W4.IsExprBuilder sym => + IO.MonadIO m => + 1 <= w => + sym -> + W4.SymBV sym w -> + W4.SymBV sym w -> + m (W4.SymBV sym w) +bvUMax sym bv1 bv2 = IO.liftIO $ do + ult <- W4.bvUlt sym bv1 bv2 + W4.bvIte sym ult bv2 bv1 + +bvUMin :: + W4.IsExprBuilder sym => + IO.MonadIO m => + 1 <= w => + sym -> + W4.SymBV sym w -> + W4.SymBV sym w -> + m (W4.SymBV sym w) +bvUMin sym bv1 bv2 = IO.liftIO $ do + ult <- W4.bvUlt sym bv1 bv2 + W4.bvIte sym ult bv1 bv2 + -- | Sets the abstract domain of the given integer to assume -- that it is positive. assumePositiveInt :: diff --git a/submodules/macaw b/submodules/macaw index 8d7d7d78..2bb5a299 160000 --- a/submodules/macaw +++ b/submodules/macaw @@ -1 +1 @@ -Subproject commit 8d7d7d7882054262794013320d7dec616e3ceb69 +Subproject commit 2bb5a2990a460f4eef9a81521cc402b2d9827992 diff --git a/tests/.programtargets b/tests/.programtargets new file mode 100644 index 00000000..443a51d2 --- /dev/null +++ b/tests/.programtargets @@ -0,0 +1 @@ +7814e7c93da16592cec4c4ad879a4008cf3f1e50 diff --git a/tests/aarch32/.programtargets b/tests/aarch32/.programtargets deleted file mode 100644 index e9eaebc6..00000000 --- a/tests/aarch32/.programtargets +++ /dev/null @@ -1 +0,0 @@ -0075d8114d338708a24a9f6e2bb9d40c210d6e37 diff --git a/tests/aarch32/Makefile b/tests/aarch32/Makefile index 9398a8d2..9dc594f7 100644 --- a/tests/aarch32/Makefile +++ b/tests/aarch32/Makefile @@ -15,15 +15,13 @@ scripted/challenge10.original.exe: ${CHALLENGE_10_BIN} endif -TARGET_7_DIR=$(CHALLENGE_DIR)/tests/target7/ -ifeq (,$(wildcard ${TARGET_7_DIR})) +ifeq (,$(wildcard $(CHALLENGE_DIR))) else EXTRA_TARGETS+=scripted/target7 -scripted/target7.%: .programtargets - (((cd $(CHALLENGE_DIR) && git show $(shell cat .programtargets):tests/target7/$(notdir $@)) > $@) || (rm -f $@ && exit 1)) - +scripted/target7.%: ../.programtargets + $(call fetch,target7) endif .PHONY: scripted/target7 diff --git a/tests/ppc32/Makefile b/tests/ppc32/Makefile index 84efa92b..cf550be5 100644 --- a/tests/ppc32/Makefile +++ b/tests/ppc32/Makefile @@ -2,4 +2,25 @@ CC=powerpc-linux-gnu-gcc OD=powerpc-linux-gnu-objdump EXTRA_TARGETS= +ifeq (,$(wildcard $(CHALLENGE_DIR))) +else + +EXTRA_TARGETS+=scripted/target1 + +scripted/target1.%: ../.programtargets + $(call fetch,target1) + +endif + +.PHONY: scripted/target1 + +scripted: + mkdir -p scripted + +scripted/target1: scripted scripted/target1.original.exe scripted/target1.args scripted/target1.pate + +scripted/target1.script_run: scripted/target1 + cd scripted; \ + ../../../pate.sh $(shell cat scripted/target1.args) + include ../template.mk diff --git a/tests/ppc32/scripted/target1.expect b/tests/ppc32/scripted/target1.expect new file mode 100644 index 00000000..e1eadd4d --- /dev/null +++ b/tests/ppc32/scripted/target1.expect @@ -0,0 +1 @@ +ConditionallyEquivalent \ No newline at end of file diff --git a/tests/template.mk b/tests/template.mk index 64611168..fddd41a1 100644 --- a/tests/template.mk +++ b/tests/template.mk @@ -1,5 +1,9 @@ .PHONY: all extras +define fetch +(((cd $(CHALLENGE_DIR) && git show $(shell cat ../.programtargets):tests/$1/$(notdir $@)) > $@) || (rm -f $@ && exit 1)) +endef + extras: ${EXTRA_TARGETS} all: $(notdir $(patsubst %original.c,%test,$(wildcard ../src/*.original.c))) $(addprefix ./build/,$(notdir $(patsubst %c,%i,$(wildcard ../src/*.c)))) extras