pyppmd provides classes and functions for compressing and decompressing text data, using PPM (Prediction by partial matching) compression algorithm variation H and I.2. It provide an API similar to Python's zlib/bz2/lzma modules.
Describe the bug
When running fuzzing test on Windows, hypothesis fails.
It seems multi-threading problem.
It is happened with C implementation.
CFFI works fine.
Expected behavior
pass fuzzing test on windows.
Environment:
OS: Windows 10
Python CPython 3.6-3.9
project version: v0.10.0
______________________________ test_ppmd7_fuzzer ______________________________
self = <hypothesis.core.StateForActualGivenExecution object at 0x00000225E5946D68>
data = ConjectureData(INTERESTING, 58 bytes, frozen)
def _execute_once_for_engine(self, data):
"""Wrapper around ``execute_once`` that intercepts test failure
exceptions and single-test control exceptions, and turns them into
appropriate method calls to `data` instead.
This allows the engine to assume that any exception other than
``StopTest`` must be a fatal error, and should stop the entire engine.
"""
try:
trace = frozenset()
if (
self.failed_normally
and Phase.explain in self.settings.phases
and sys.gettrace() is None
and not PYPY
): # pragma: no cover
# This is in fact covered by our *non-coverage* tests, but due to the
# settrace() contention *not* by our coverage tests. Ah well.
tracer = Tracer()
try:
sys.settrace(tracer.trace)
result = self.execute_once(data)
if data.status == Status.VALID:
self.explain_traces[None].add(frozenset(tracer.branches))
finally:
sys.settrace(None)
trace = frozenset(tracer.branches)
else:
> result = self.execute_once(data)
.tox\py36\lib\site-packages\hypothesis\core.py:683:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.core.StateForActualGivenExecution object at 0x00000225E5946D68>
data = ConjectureData(INTERESTING, 58 bytes, frozen), print_example = False
is_final = False, expected_failure = None
def execute_once(
self, data, print_example=False, is_final=False, expected_failure=None
):
"""Run the test function once, using ``data`` as input.
If the test raises an exception, it will propagate through to the
caller of this method. Depending on its type, this could represent
an ordinary test failure, or a fatal error, or a control exception.
If this method returns normally, the test might have passed, or
it might have placed ``data`` in an unsuccessful state and then
swallowed the corresponding control exception.
"""
data.is_find = self.is_find
text_repr = [None]
if self.settings.deadline is None:
test = self.test
else:
@proxies(self.test)
def test(*args, **kwargs):
self.__test_runtime = None
initial_draws = len(data.draw_times)
start = time.perf_counter()
result = self.test(*args, **kwargs)
finish = time.perf_counter()
internal_draw_time = sum(data.draw_times[initial_draws:])
runtime = datetime.timedelta(
seconds=finish - start - internal_draw_time
)
self.__test_runtime = runtime
current_deadline = self.settings.deadline
if not is_final:
current_deadline = (current_deadline // 4) * 5
if runtime >= current_deadline:
raise DeadlineExceeded(runtime, self.settings.deadline)
return result
def run(data):
# Set up dynamic context needed by a single test run.
with local_settings(self.settings):
with deterministic_PRNG():
with BuildContext(data, is_final=is_final):
# Generate all arguments to the test function.
args, kwargs = data.draw(self.search_strategy)
if expected_failure is not None:
text_repr[0] = arg_string(test, args, kwargs)
if print_example or current_verbosity() >= Verbosity.verbose:
output = StringIO()
printer = RepresentationPrinter(output)
if print_example:
printer.text("Falsifying example:")
else:
printer.text("Trying example:")
if self.print_given_args:
printer.text(" ")
printer.text(test.__name__)
with printer.group(indent=4, open="(", close=""):
printer.break_()
for v in args:
printer.pretty(v)
# We add a comma unconditionally because
# generated arguments will always be kwargs,
# so there will always be more to come.
printer.text(",")
printer.breakable()
for i, (k, v) in enumerate(kwargs.items()):
printer.text(k)
printer.text("=")
printer.pretty(v)
printer.text(",")
if i + 1 < len(kwargs):
printer.breakable()
printer.break_()
printer.text(")")
printer.flush()
report(output.getvalue())
return test(*args, **kwargs)
# Run the test function once, via the executor hook.
# In most cases this will delegate straight to `run(data)`.
> result = self.test_runner(data, run)
.tox\py36\lib\site-packages\hypothesis\core.py:619:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
data = ConjectureData(INTERESTING, 58 bytes, frozen)
function = <function StateForActualGivenExecution.execute_once.<locals>.run at 0x00000225E5A2DB70>
def default_new_style_executor(data, function):
> return function(data)
.tox\py36\lib\site-packages\hypothesis\executors.py:52:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
data = ConjectureData(INTERESTING, 58 bytes, frozen)
def run(data):
# Set up dynamic context needed by a single test run.
with local_settings(self.settings):
with deterministic_PRNG():
with BuildContext(data, is_final=is_final):
# Generate all arguments to the test function.
args, kwargs = data.draw(self.search_strategy)
if expected_failure is not None:
text_repr[0] = arg_string(test, args, kwargs)
if print_example or current_verbosity() >= Verbosity.verbose:
output = StringIO()
printer = RepresentationPrinter(output)
if print_example:
printer.text("Falsifying example:")
else:
printer.text("Trying example:")
if self.print_given_args:
printer.text(" ")
printer.text(test.__name__)
with printer.group(indent=4, open="(", close=""):
printer.break_()
for v in args:
printer.pretty(v)
# We add a comma unconditionally because
# generated arguments will always be kwargs,
# so there will always be more to come.
printer.text(",")
printer.breakable()
for i, (k, v) in enumerate(kwargs.items()):
printer.text(k)
printer.text("=")
printer.pretty(v)
printer.text(",")
if i + 1 < len(kwargs):
printer.breakable()
printer.break_()
printer.text(")")
printer.flush()
report(output.getvalue())
> return test(*args, **kwargs)
.tox\py36\lib\site-packages\hypothesis\core.py:615:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
obj = b'\x07\x15\xa5\x07\x99BR\xa4*\xbf):kc=\x0e\xb1L\xa1', max_order = 62
mem_size = 4140799450
@given(
> obj=st.binary(min_size=5),
max_order=st.integers(min_value=2, max_value=64),
mem_size=st.integers(min_value=1 << 11, max_value=MAX_SIZE),
)
def test_ppmd7_fuzzer(obj, max_order, mem_size):
tests\test_ppmd7_fuzzer.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (b'\x07\x15\xa5\x07\x99BR\xa4*\xbf):kc=\x0e\xb1L\xa1', 62, 4140799450)
kwargs = {}, initial_draws = 1, start = 0.5331072
@proxies(self.test)
def test(*args, **kwargs):
self.__test_runtime = None
initial_draws = len(data.draw_times)
start = time.perf_counter()
> result = self.test(*args, **kwargs)
.tox\py36\lib\site-packages\hypothesis\core.py:557:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
obj = b'\x07\x15\xa5\x07\x99BR\xa4*\xbf):kc=\x0e\xb1L\xa1', max_order = 62
mem_size = 4140799450
@given(
obj=st.binary(min_size=5),
max_order=st.integers(min_value=2, max_value=64),
mem_size=st.integers(min_value=1 << 11, max_value=MAX_SIZE),
)
def test_ppmd7_fuzzer(obj, max_order, mem_size):
> enc = pyppmd.Ppmd7Encoder(max_order=max_order, mem_size=mem_size)
E SystemError: <class '_ppmd.Ppmd7Encoder'> returned NULL without setting an error
tests\test_ppmd7_fuzzer.py:19: SystemError
During handling of the above exception, another exception occurred:
@given(
> obj=st.binary(min_size=5),
max_order=st.integers(min_value=2, max_value=64),
mem_size=st.integers(min_value=1 << 11, max_value=MAX_SIZE),
)
def test_ppmd7_fuzzer(obj, max_order, mem_size):
tests\test_ppmd7_fuzzer.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.tox\py36\lib\site-packages\hypothesis\core.py:735: in _execute_once_for_engine
data.mark_interesting(interesting_origin)
.tox\py36\lib\site-packages\hypothesis\internal\conjecture\data.py:1056: in mark_interesting
self.conclude_test(Status.INTERESTING, interesting_origin)
.tox\py36\lib\site-packages\hypothesis\internal\conjecture\data.py:1052: in conclude_test
self.freeze()
.tox\py36\lib\site-packages\hypothesis\internal\conjecture\data.py:980: in freeze
self.observer.conclude_test(self.status, self.interesting_origin)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.datatree.TreeRecordingObserver object at 0x00000225E59A8F98>
status = Status.INTERESTING
interesting_origin = (<class 'SystemError'>, 'D:\\a\\pyppmd\\pyppmd\\tests\\test_ppmd7_fuzzer.py', 19, ())
def conclude_test(self, status, interesting_origin):
"""Says that ``status`` occurred at node ``node``. This updates the
node if necessary and checks for consistency."""
if status == Status.OVERRUN:
return
i = self.__index_in_current_node
node = self.__current_node
if i < len(node.values) or isinstance(node.transition, Branch):
inconsistent_generation()
new_transition = Conclusion(status, interesting_origin)
if node.transition is not None and node.transition != new_transition:
# As an, I'm afraid, horrible bodge, we deliberately ignore flakiness
# where tests go from interesting to valid, because it's much easier
# to produce good error messages for these further up the stack.
if isinstance(node.transition, Conclusion) and (
node.transition.status != Status.INTERESTING
or new_transition.status != Status.VALID
):
raise Flaky(
"Inconsistent test results! Test case was %r on first run but %r on second"
> % (node.transition, new_transition)
)
E hypothesis.errors.Flaky: Inconsistent test results! Test case was Conclusion(status=Status.INTERESTING, interesting_origin=(<class 'SystemError'>, 'D:\\a\\pyppmd\\pyppmd\\tests\\test_ppmd7_fuzzer.py', 23, ())) on first run but Conclusion(status=Status.INTERESTING, interesting_origin=(<class 'SystemError'>, 'D:\\a\\pyppmd\\pyppmd\\tests\\test_ppmd7_fuzzer.py', 19, ())) on second
.tox\py36\lib\site-packages\hypothesis\internal\conjecture\datatree.py:413: Flaky
=========================== short test summary info ===========================
FAILED tests/test_ppmd7_fuzzer.py::test_ppmd7_fuzzer - hypothesis.errors.Flak...
========================= 1 failed, 5 passed in 0.74s =========================
ERROR: InvocationError for command 'D:\a\pyppmd\pyppmd\.tox\py36\Scripts\python.EXE' -m pytest -vv -s (exited with code 1)
Describe the bug When running fuzzing test on Windows, hypothesis fails. It seems multi-threading problem.
It is happened with C implementation. CFFI works fine.
Expected behavior pass fuzzing test on windows.
Environment: