Skip to content

Commit

Permalink
Tests.QuickCheckUtils: clean-up
Browse files Browse the repository at this point in the history
  • Loading branch information
Anton-Latukha committed Oct 5, 2021
1 parent 9238e7b commit 7a7b301
Showing 1 changed file with 31 additions and 25 deletions.
56 changes: 31 additions & 25 deletions tests/Tests/QuickCheckUtils.hs
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ module Tests.QuickCheckUtils

import Control.Arrow ((***))
import Control.DeepSeq (NFData (..), deepseq)
import Control.Exception (bracket)
import Data.Char (isSpace)
import Data.Coerce (coerce)
import Data.Text.Foreign (I8)
import Data.Text.Lazy.Builder.RealFloat (FPFormat(..))
import Data.Word (Word8, Word16)
import GHC.Num (integerLog2)
import Test.QuickCheck hiding (Fixed(..), Small (..), (.&.))
import Tests.Utils
import qualified Data.ByteString as B
Expand All @@ -50,6 +50,7 @@ import qualified Data.Text.Internal.Lazy as TL
import qualified Data.Text.Internal.Lazy.Fusion as TLF
import qualified Data.Text.Lazy as TL
import qualified System.IO as IO
import Control.Applicative (liftA2, liftA3)

genWord8 :: Gen Word8
genWord8 = chooseAny
Expand All @@ -59,7 +60,7 @@ instance Arbitrary I8 where
shrink = shrinkIntegral

instance Arbitrary B.ByteString where
arbitrary = B.pack `fmap` listOf genWord8
arbitrary = B.pack <$> listOf genWord8
shrink = map B.pack . shrink . B.unpack

instance Arbitrary BL.ByteString where
Expand All @@ -69,10 +70,13 @@ instance Arbitrary BL.ByteString where
, BL.fromChunks . map B.singleton <$> listOf genWord8
-- so that a code point with 4 byte long utf8 representation
-- could appear split over 3 non-singleton chunks
, (\a b c -> BL.fromChunks [a, b, c])
<$> arbitrary
<*> ((\a b -> B.pack [a, b]) <$> genWord8 <*> genWord8)
<*> arbitrary
, liftA3 (\a b c -> BL.fromChunks [a, b, c])
arbitrary
(liftA2 (\a b -> B.pack [a, b])
genWord8
genWord8
)
arbitrary
]
shrink xs = BL.fromChunks <$> shrink (BL.toChunks xs)

Expand All @@ -84,7 +88,7 @@ newtype Sqrt a = Sqrt { unSqrt :: a }
instance Arbitrary a => Arbitrary (Sqrt a) where
arbitrary = coerce $ sized $ \n -> resize (smallish n) $ arbitrary @a
where
smallish = round . (sqrt :: Double -> Double) . fromIntegral . abs
smallish = round . sqrt @Double . fromIntegral . abs
shrink = coerce (shrink @a)

instance Arbitrary T.Text where
Expand Down Expand Up @@ -136,12 +140,12 @@ data DecodeErr = Lenient | Ignore | Strict | Replace
deriving (Show, Eq, Bounded, Enum)

genDecodeErr :: DecodeErr -> Gen T.OnDecodeError
genDecodeErr Lenient = return T.lenientDecode
genDecodeErr Ignore = return T.ignore
genDecodeErr Strict = return T.strictDecode
genDecodeErr Lenient = pure T.lenientDecode
genDecodeErr Ignore = pure T.ignore
genDecodeErr Strict = pure T.strictDecode
genDecodeErr Replace = (\c _ _ -> c) <$>
frequency
[ (1, return Nothing)
[ (1, pure Nothing)
, (50, pure <$> arbitraryUnicodeChar)
]

Expand Down Expand Up @@ -232,29 +236,31 @@ instance Arbitrary (Precision Double) where
shrink = coerce (shrink @(Maybe Int))

instance Arbitrary IO.Newline where
arbitrary = oneof [return IO.LF, return IO.CRLF]
arbitrary = oneof [pure IO.LF, pure IO.CRLF]

instance Arbitrary IO.NewlineMode where
arbitrary = IO.NewlineMode <$> arbitrary <*> arbitrary

instance Arbitrary IO.BufferMode where
arbitrary = oneof [ return IO.NoBuffering,
return IO.LineBuffering,
return (IO.BlockBuffering Nothing),
(IO.BlockBuffering . Just . (+1) . fromIntegral) `fmap`
(arbitrary :: Gen Word16) ]
arbitrary =
oneof
[ pure IO.NoBuffering
, pure IO.LineBuffering
, pure (IO.BlockBuffering Nothing)
, IO.BlockBuffering . pure . succ . fromIntegral <$> arbitrary @Word16
]

-- This test harness is complex! What property are we checking?
--
-- Reading after writing a multi-line file should give the same
-- results as were written.
--
-- What do we vary while checking this property?
-- * The lines themselves, scrubbed to contain neither CR nor LF. (By
-- working with a list of lines, we ensure that the data will
-- sometimes contain line endings.)
-- * Newline translation mode.
-- * Buffering.
-- * The lines themselves, scrubbed to contain neither CR nor LF. (By
-- working with a list of lines, we ensure that the data will
-- sometimes contain line endings.)
-- * Newline translation mode.
-- * Buffering.
write_read :: (NFData a, Eq a, Show a)
=> ([b] -> a)
-> ((Char -> Bool) -> a -> b)
Expand All @@ -268,18 +274,18 @@ write_read _ _ _ _ (IO.NewlineMode IO.LF IO.CRLF) _ _ = discard
write_read unline filt writer reader nl buf ts = ioProperty $
(===t) <$> act
where
t = unline . map (filt (not . (`elem` "\r\n"))) $ ts
t = unline . map (filt (`notElem` "\r\n")) $ ts

act = withTempFile $ \path h -> do
IO.hSetNewlineMode h nl
IO.hSetBuffering h buf
() <- writer h t
IO.hClose h
bracket (IO.openFile path IO.ReadMode) IO.hClose $ \h' -> do
IO.withFile path IO.ReadMode $ \h' -> do
IO.hSetNewlineMode h' nl
IO.hSetBuffering h' buf
r <- reader h'
r `deepseq` return r
r `deepseq` pure r

-- Generate various Unicode space characters with high probability
arbitrarySpacyChar :: Gen Char
Expand Down

0 comments on commit 7a7b301

Please sign in to comment.