Closed github-actions[bot] closed 2 years ago
=================================== FAILURES ===================================
__________________________ AttrDataTest.test_bytes_df __________________________
self = <hypothesis.core.StateForActualGivenExecution object at 0x118bc63e0>
data = ConjectureData(INTERESTING, 14 bytes, frozen)
def _execute_once_for_engine(self, data):
"""Wrapper around ``execute_once`` that intercepts test failure
exceptions and single-test control exceptions, and turns them into
appropriate method calls to `data` instead.
This allows the engine to assume that any exception other than
``StopTest`` must be a fatal error, and should stop the entire engine.
"""
try:
trace = frozenset()
if (
self.failed_normally
and not self.failed_due_to_deadline
and Phase.shrink in self.settings.phases
and Phase.explain in self.settings.phases
and sys.gettrace() is None
and not PYPY
): # pragma: no cover
# This is in fact covered by our *non-coverage* tests, but due to the
# settrace() contention *not* by our coverage tests. Ah well.
tracer = Tracer()
try:
sys.settrace(tracer.trace)
result = self.execute_once(data)
if data.status == Status.VALID:
self.explain_traces[None].add(frozenset(tracer.branches))
finally:
sys.settrace(None)
trace = frozenset(tracer.branches)
else:
> result = self.execute_once(data)
../../../hostedtoolcache/Python/3.10.6/x64/lib/python3.10/site-packages/hypothesis/core.py:749:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.core.StateForActualGivenExecution object at 0x118bc63e0>
data = ConjectureData(INTERESTING, 14 bytes, frozen), print_example = False
is_final = False, expected_failure = None
def execute_once(
self, data, print_example=False, is_final=False, expected_failure=None
):
"""Run the test function once, using ``data`` as input.
If the test raises an exception, it will propagate through to the
caller of this method. Depending on its type, this could represent
an ordinary test failure, or a fatal error, or a control exception.
If this method returns normally, the test might have passed, or
it might have placed ``data`` in an unsuccessful state and then
swallowed the corresponding control exception.
"""
self.ever_executed = True
data.is_find = self.is_find
text_repr = None
if self.settings.deadline is None:
test = self.test
else:
@proxies(self.test)
def test(*args, **kwargs):
self.__test_runtime = None
initial_draws = len(data.draw_times)
start = time.perf_counter()
result = self.test(*args, **kwargs)
finish = time.perf_counter()
internal_draw_time = sum(data.draw_times[initial_draws:])
runtime = datetime.timedelta(
seconds=finish - start - internal_draw_time
)
self.__test_runtime = runtime
current_deadline = self.settings.deadline
if not is_final:
current_deadline = (current_deadline // 4) * 5
if runtime >= current_deadline:
raise DeadlineExceeded(runtime, self.settings.deadline)
return result
def run(data):
# Set up dynamic context needed by a single test run.
with local_settings(self.settings):
with deterministic_PRNG():
with BuildContext(data, is_final=is_final):
# Generate all arguments to the test function.
args, kwargs = data.draw(self.search_strategy)
if expected_failure is not None:
nonlocal text_repr
text_repr = repr_call(test, args, kwargs)
if print_example or current_verbosity() >= Verbosity.verbose:
output = StringIO()
printer = RepresentationPrinter(output)
if print_example:
printer.text("Falsifying example:")
else:
printer.text("Trying example:")
if self.print_given_args:
printer.text(" ")
printer.text(test.__name__)
with printer.group(indent=4, open="(", close=""):
printer.break_()
for v in args:
printer.pretty(v)
# We add a comma unconditionally because
# generated arguments will always be kwargs,
# so there will always be more to come.
printer.text(",")
printer.breakable()
for i, (k, v) in enumerate(kwargs.items()):
printer.text(k)
printer.text("=")
printer.pretty(v)
printer.text(",")
if i + 1 < len(kwargs):
printer.breakable()
printer.break_()
printer.text(")")
printer.flush()
report(output.getvalue())
return test(*args, **kwargs)
# Run the test function once, via the executor hook.
# In most cases this will delegate straight to `run(data)`.
> result = self.test_runner(data, run)
../../../hostedtoolcache/Python/3.10.6/x64/lib/python3.10/site-packages/hypothesis/core.py:688:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
data = ConjectureData(INTERESTING, 14 bytes, frozen)
function = <function StateForActualGivenExecution.execute_once.<locals>.run at 0x1192e7a30>
def default_new_style_executor(data, function):
> return function(data)
../../../hostedtoolcache/Python/3.10.6/x64/lib/python3.10/site-packages/hypothesis/executors.py:47:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
data = ConjectureData(INTERESTING, 14 bytes, frozen)
def run(data):
# Set up dynamic context needed by a single test run.
with local_settings(self.settings):
with deterministic_PRNG():
with BuildContext(data, is_final=is_final):
# Generate all arguments to the test function.
args, kwargs = data.draw(self.search_strategy)
if expected_failure is not None:
nonlocal text_repr
text_repr = repr_call(test, args, kwargs)
if print_example or current_verbosity() >= Verbosity.verbose:
output = StringIO()
printer = RepresentationPrinter(output)
if print_example:
printer.text("Falsifying example:")
else:
printer.text("Trying example:")
if self.print_given_args:
printer.text(" ")
printer.text(test.__name__)
with printer.group(indent=4, open="(", close=""):
printer.break_()
for v in args:
printer.pretty(v)
# We add a comma unconditionally because
# generated arguments will always be kwargs,
# so there will always be more to come.
printer.text(",")
printer.breakable()
for i, (k, v) in enumerate(kwargs.items()):
printer.text(k)
printer.text("=")
printer.pretty(v)
printer.text(",")
if i + 1 < len(kwargs):
printer.breakable()
printer.break_()
printer.text(")")
printer.flush()
report(output.getvalue())
> return test(*args, **kwargs)
../../../hostedtoolcache/Python/3.10.6/x64/lib/python3.10/site-packages/hypothesis/core.py:684:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tiledb.tests.test_hypothesis.AttrDataTest object at 0x1186b94e0>
data = b'\xa3"\xd1'
@pytest.mark.skipif(not has_pandas(), reason="pandas not installed")
@hypothesis.settings(deadline=None)
@given(st.binary())
def test_bytes_df(self, data):
start = time.time()
# TODO this test is slow. might be nice to run with in-memory
# VFS (if faster) but need to figure out correct setup
# uri = "mem://" + str(uri_int)
uri_df = self.path()
array = np.array([data], dtype="S0")
series = pd.Series(array)
df = pd.DataFrame({"": series})
start_fpd = time.time()
tiledb.from_pandas(uri_df, df, sparse=False)
fpd_time = time.time() - start_fpd
hypothesis.note(f"from_pandas time: {fpd_time}")
# DEBUG
tiledb.stats_enable()
tiledb.stats_reset()
# END DEBUG
with tiledb.open(uri_df) as A:
tm.assert_frame_equal(A.df[:], df)
hypothesis.note(tiledb.stats_dump(print_out=False))
# DEBUG
tiledb.stats_disable()
duration = time.time() - start
if duration > 2:
# Hypothesis setup is causing deadline exceeded errors
# https://github.com/TileDB-Inc/TileDB-Py/issues/1194
# Set deadline=None and use internal timing instead.
> pytest.fail("test_bytes_numpy exceeded 2s")
tiledb/tests/test_hypothesis.py:96:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
reason = 'test_bytes_numpy exceeded 2s', pytrace = True, msg = None
@_with_exception(Failed)
def fail(
reason: str = "", pytrace: bool = True, msg: Optional[str] = None
) -> "NoReturn":
"""Explicitly fail an executing test with the given message.
:param reason:
The message to show the user as reason for the failure.
:param pytrace:
If False, msg represents the full failure information and no
python traceback will be reported.
:param msg:
Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead.
"""
__tracebackhide__ = True
reason = _resolve_msg_to_reason("fail", reason, msg)
> raise Failed(msg=reason, pytrace=pytrace)
E Failed: test_bytes_numpy exceeded 2s
../../../hostedtoolcache/Python/3.10.6/x64/lib/python3.10/site-packages/_pytest/outcomes.py:196: Failed
The above exception was the direct cause of the following exception:
self = <tiledb.tests.test_hypothesis.AttrDataTest object at 0x1186b94e0>
@pytest.mark.skipif(not has_pandas(), reason="pandas not installed")
> @hypothesis.settings(deadline=None)
tiledb/tests/test_hypothesis.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.core.StateForActualGivenExecution object at 0x118bc63e0>
data = ConjectureData(VALID, 14 bytes, frozen), print_example = True
is_final = True
expected_failure = (test_bytes_numpy exceeded 2s, 'self = <tiledb.tests.test_hypothesis.AttrDataTest object at 0x1186b94e0>\ndata = b\'\\...numpy exceeded 2s")\nE Failed: test_bytes_numpy exceeded 2s\n\ntiledb/tests/test_hypothesis.py:96: Failed\n')
def execute_once(
self, data, print_example=False, is_final=False, expected_failure=None
):
"""Run the test function once, using ``data`` as input.
If the test raises an exception, it will propagate through to the
caller of this method. Depending on its type, this could represent
an ordinary test failure, or a fatal error, or a control exception.
If this method returns normally, the test might have passed, or
it might have placed ``data`` in an unsuccessful state and then
swallowed the corresponding control exception.
"""
self.ever_executed = True
data.is_find = self.is_find
text_repr = None
if self.settings.deadline is None:
test = self.test
else:
@proxies(self.test)
def test(*args, **kwargs):
self.__test_runtime = None
initial_draws = len(data.draw_times)
start = time.perf_counter()
result = self.test(*args, **kwargs)
finish = time.perf_counter()
internal_draw_time = sum(data.draw_times[initial_draws:])
runtime = datetime.timedelta(
seconds=finish - start - internal_draw_time
)
self.__test_runtime = runtime
current_deadline = self.settings.deadline
if not is_final:
current_deadline = (current_deadline // 4) * 5
if runtime >= current_deadline:
raise DeadlineExceeded(runtime, self.settings.deadline)
return result
def run(data):
# Set up dynamic context needed by a single test run.
with local_settings(self.settings):
with deterministic_PRNG():
with BuildContext(data, is_final=is_final):
# Generate all arguments to the test function.
args, kwargs = data.draw(self.search_strategy)
if expected_failure is not None:
nonlocal text_repr
text_repr = repr_call(test, args, kwargs)
if print_example or current_verbosity() >= Verbosity.verbose:
output = StringIO()
printer = RepresentationPrinter(output)
if print_example:
printer.text("Falsifying example:")
else:
printer.text("Trying example:")
if self.print_given_args:
printer.text(" ")
printer.text(test.__name__)
with printer.group(indent=4, open="(", close=""):
printer.break_()
for v in args:
printer.pretty(v)
# We add a comma unconditionally because
# generated arguments will always be kwargs,
# so there will always be more to come.
printer.text(",")
printer.breakable()
for i, (k, v) in enumerate(kwargs.items()):
printer.text(k)
printer.text("=")
printer.pretty(v)
printer.text(",")
if i + 1 < len(kwargs):
printer.breakable()
printer.break_()
printer.text(")")
printer.flush()
report(output.getvalue())
return test(*args, **kwargs)
# Run the test function once, via the executor hook.
# In most cases this will delegate straight to `run(data)`.
result = self.test_runner(data, run)
# If a failure was expected, it should have been raised already, so
# instead raise an appropriate diagnostic error.
if expected_failure is not None:
exception, traceback = expected_failure
if (
isinstance(exception, DeadlineExceeded)
and self.__test_runtime is not None
):
report(
"Unreliable test timings! On an initial run, this "
"test took %.2fms, which exceeded the deadline of "
"%.2fms, but on a subsequent run it took %.2f ms, "
"which did not. If you expect this sort of "
"variability in your test timings, consider turning "
"deadlines off for this test by setting deadline=None."
% (
exception.runtime.total_seconds() * 1000,
self.settings.deadline.total_seconds() * 1000,
self.__test_runtime.total_seconds() * 1000,
)
)
else:
report("Failed to reproduce exception. Expected: \n" + traceback)
> raise Flaky(
f"Hypothesis {text_repr} produces unreliable results: "
"Falsified on the first call but did not on a subsequent one"
) from exception
E hypothesis.errors.Flaky: Hypothesis test_bytes_df(self=<tiledb.tests.test_hypothesis.AttrDataTest at 0x1186b94e0>, data=b'\xa3"\xd1') produces unreliable results: Falsified on the first call but did not on a subsequent one
E Falsifying example: test_bytes_df(
E data=b'\xa3"\xd1',
E self=<tiledb.tests.test_hypothesis.AttrDataTest at 0x1186b94e0>,
E )
E from_pandas time: 0.022504091262817383
E TileDB Embedded Version: (2, 11, 1)
E TileDB-Py Version: 0.1.dev1
E
E [
E {
E "timers": {
E "Context.StorageManager.sm_load_array_schemas_and_fragment_metadata.sum": 0.000590495,
E "Context.StorageManager.sm_load_array_schemas_and_fragment_metadata.avg": 0.000590495,
E "Context.StorageManager.sm_load_array_schema_from_uri.sum": 0.000312[548](https://github.com/TileDB-Inc/TileDB-Py/runs/8182439248?check_suite_focus=true#step:9:549),
E "Context.StorageManager.sm_load_array_schema_from_uri.avg": 0.000312548,
E "Context.StorageManager.sm_load_array_metadata.sum": 0.000300696,
E "Context.StorageManager.sm_load_array_metadata.avg": 0.000300696,
E "Context.StorageManager.sm_load_all_array_schemas.sum": 0.000352482,
E "Context.StorageManager.sm_load_all_array_schemas.avg": 0.000352482,
E "Context.StorageManager.load_fragment_metadata.sum": 0.000218695,
E "Context.StorageManager.load_fragment_metadata.avg": 0.000218695,
E "Context.StorageManager.array_open_for_reads.sum": 0.000596162,
E "Context.StorageManager.array_open_for_reads.avg": 0.000596162,
E "Context.StorageManager.Query.Subarray.read_load_relevant_rtrees.sum": 0.000282397,
E "Context.StorageManager.Query.Subarray.read_load_relevant_rtrees.avg": 0.000282397,
E "Context.StorageManager.Query.Subarray.read_compute_tile_overlap.sum": 0.000410435,
E "Context.StorageManager.Query.Subarray.read_compute_tile_overlap.avg": 0.000205218,
E "Context.StorageManager.Query.Subarray.read_compute_tile_coords.sum": 4.96e-06,
E "Context.StorageManager.Query.Subarray.read_compute_tile_coords.avg": 4.96e-06,
E "Context.StorageManager.Query.Subarray.read_compute_relevant_tile_overlap.sum": 4.6528e-05,
E "Context.StorageManager.Query.Subarray.read_compute_relevant_tile_overlap.avg": 4.6528e-05,
E "Context.StorageManager.Query.Subarray.read_compute_relevant_frags.sum": 5.2778e-05,
E "Context.StorageManager.Query.Subarray.read_compute_relevant_frags.avg": 5.2778e-05,
E "Context.StorageManager.Query.Subarray.read_compute_est_result_size.sum": 0.000733147,
E "Context.StorageManager.Query.Subarray.read_compute_est_result_size.avg": 0.000366573,
E "Context.StorageManager.Query.Reader.unfilter_attr_tiles.sum": 0.000268689,
E "Context.StorageManager.Query.Reader.unfilter_attr_tiles.avg": 0.000268689,
E "Context.StorageManager.Query.Reader.read_tiles.sum": 0.000258989,
E "Context.StorageManager.Query.Reader.read_tiles.avg": 0.000258989,
E "Context.StorageManager.Query.Reader.read_attribute_tiles.sum": 0.000262562,
E "Context.StorageManager.Query.Reader.read_attribute_tiles.avg": 0.000262562,
E "Context.StorageManager.Query.Reader.load_tile_offsets.sum": 0.000600481,
E "Context.StorageManager.Query.Reader.load_tile_offsets.avg": 0.000600481,
E "Context.StorageManager.Query.Reader.init_state.sum": 0.000296146,
E "Context.StorageManager.Query.Reader.init_state.avg": 0.000296146,
E "Context.StorageManager.Query.Reader.fix_offset_tiles.sum": 1.478e-06,
E "Context.StorageManager.Query.Reader.fix_offset_tiles.avg": 1.478e-06,
E "Context.StorageManager.Query.Reader.fill_dense_coords.sum": 1.2111e-05,
E "Context.StorageManager.Query.Reader.fill_dense_coords.avg": 1.2111e-05,
E "Context.StorageManager.Query.Reader.dowork.sum": 0.00174763,
E "Context.StorageManager.Query.Reader.dowork.avg": 0.00174763,
E "Context.StorageManager.Query.Reader.copy_var_tiles.sum": 0.000103695,
E "Context.StorageManager.Query.Reader.copy_var_tiles.avg": 0.000103695,
E "Context.StorageManager.Query.Reader.copy_offset_tiles.sum": 5.09e-05,
E "Context.StorageManager.Query.Reader.copy_offset_tiles.avg": 5.09e-05,
E "Context.StorageManager.Query.Reader.copy_attribute.sum": 0.00016[557](https://github.com/TileDB-Inc/TileDB-Py/runs/8182439248?check_suite_focus=true#step:9:558)8,
E "Context.StorageManager.Query.Reader.copy_attribute.avg": 0.000165578,
E "Context.StorageManager.Query.Reader.apply_query_condition.sum": 1.426e-06,
E "Context.StorageManager.Query.Reader.apply_query_condition.avg": 1.426e-06,
E "Context.StorageManager.Query.Reader.SubarrayPartitioner.read_next_partition.sum": 0.000175845,
E "Context.StorageManager.Query.Reader.SubarrayPartitioner.read_next_partition.avg": 0.000175845
E },
E "counters": {
E "Context.StorageManager.read_unfiltered_byte_num": 369,
E "Context.StorageManager.read_tile_var_sizes_size": 16,
E "Context.StorageManager.read_tile_var_offsets_size": 16,
E "Context.StorageManager.read_tile_offsets_size": 16,
E "Context.StorageManager.read_rtree_size": 8,
E "Context.StorageManager.read_frag_meta_size": 398,
E "Context.StorageManager.read_array_schema_size": 189,
E "Context.StorageManager.read_array_meta_size": 124,
E "Context.StorageManager.VFS.read_ops_num": 22,
E "Context.StorageManager.VFS.read_byte_num": 12129,
E "Context.StorageManager.VFS.ls_num": 5,
E "Context.StorageManager.VFS.file_size_num": 1,
E "Context.StorageManager.Query.Subarray.precompute_tile_overlap.tile_overlap_cache_hit": 1,
E "Context.StorageManager.Query.Subarray.precompute_tile_overlap.tile_overlap_byte_size": 64,
E "Context.StorageManager.Query.Subarray.precompute_tile_overlap.relevant_fragment_num": 1,
E "Context.StorageManager.Query.Subarray.precompute_tile_overlap.ranges_requested": 1,
E "Context.StorageManager.Query.Subarray.precompute_tile_overlap.ranges_computed": 1,
E "Context.StorageManager.Query.Subarray.precompute_tile_overlap.fragment_num": 1,
E "Context.StorageManager.Query.Reader.read_unfiltered_byte_num": 90002,
E "Context.StorageManager.Query.Reader.num_tiles_read": 1,
E "Context.StorageManager.Query.Reader.num_tiles": 1,
E "Context.StorageManager.Query.Reader.loop_num": 1,
E "Context.StorageManager.Query.Reader.dim_num": 1,
E "Context.StorageManager.Query.Reader.dim_fixed_num": 1,
E "Context.StorageManager.Query.Reader.attr_var_num": 1,
E "Context.StorageManager.Query.Reader.attr_num": 1,
E "Context.StorageManager.Query.Reader.SubarrayPartitioner.compute_current_start_end.ranges": 1,
E "Context.StorageManager.Query.Reader.SubarrayPartitioner.compute_current_start_end.found": 1,
E "Context.StorageManager.Query.Reader.SubarrayPartitioner.compute_current_start_end.adjusted_ranges": 1
E }
E }
E ]
E
E ==== Python Stats ====
E
E py.core_read_query_initial_submit_time : 0.00216036
E py.core_read_query_total_time : 0.00315187
E py.getitem_time : 0.00963712
E py.getitem_time.add_ranges : 3.21865e-05
E py.getitem_time.buffer_conversion_time : 0.000431061
E py.getitem_time.pandas_index_update_time : 0.00233102
E py.query_retries_count : 0
E
E test_bytes_df time: 0.041162014007[568](https://github.com/TileDB-Inc/TileDB-Py/runs/8182439248?check_suite_focus=true#step:9:569)36
E Failed to reproduce exception. Expected:
E self = <tiledb.tests.test_hypothesis.AttrDataTest object at 0x1186b94e0>
E data = b'\xa3"\xd1'
E
E @pytest.mark.skipif(not has_pandas(), reason="pandas not installed")
E @hypothesis.settings(deadline=None)
E @given(st.binary())
E def test_bytes_df(self, data):
E start = time.time()
E
E # TODO this test is slow. might be nice to run with in-memory
E # VFS (if faster) but need to figure out correct setup
E # uri = "mem://" + str(uri_int)
E
E uri_df = self.path()
E
E array = np.array([data], dtype="S0")
E
E series = pd.Series(array)
E df = pd.DataFrame({"": series})
E
E start_fpd = time.time()
E tiledb.from_pandas(uri_df, df, sparse=False)
E fpd_time = time.time() - start_fpd
E hypothesis.note(f"from_pandas time: {fpd_time}")
E
E # DEBUG
E tiledb.stats_enable()
E tiledb.stats_reset()
E # END DEBUG
E
E with tiledb.open(uri_df) as A:
E tm.assert_frame_equal(A.df[:], df)
E
E hypothesis.note(tiledb.stats_dump(print_out=False))
E
E # DEBUG
E tiledb.stats_disable()
E
E duration = time.time() - start
E if duration > 2:
E # Hypothesis setup is causing deadline exceeded errors
E # https://github.com/TileDB-Inc/TileDB-Py/issues/1194
E # Set deadline=None and use internal timing instead.
E > pytest.fail("test_bytes_numpy exceeded 2s")
E E Failed: test_bytes_numpy exceeded 2s
E
E tiledb/tests/test_hypothesis.py:96: Failed
E
E
E You can reproduce this example by temporarily adding @reproduce_failure('6.54.5', b'AAELowcGAUsiAwE30QcA') as a decorator on your test case
../../../hostedtoolcache/Python/3.10.6/x64/lib/python3.10/site-packages/hypothesis/core.py:[713](https://github.com/TileDB-Inc/TileDB-Py/runs/8182439248?check_suite_focus=true#step:9:714): Flaky
=========================== short test summary info ============================
FAILED tiledb/tests/test_hypothesis.py::AttrDataTest::test_bytes_df - hypothe...
See run for more details: https://github.com/TileDB-Inc/TileDB-Py/actions/runs/2990950704