mirror of https://github.com/facebook/rocksdb.git
Error out in case of std errors in blackbox test and export file in TARGETS
Summary: - Right now in blackbox test we don't exit if there are std::error as we do in whitebox crash tests. As result those errors are swallowed. It only errors out if state is unexpected. One example that was noticed in blackbox crash test - ``` stderr has error message: ***Error restoring historical expected values: Corruption: DB is older than any restorable expected state*** Running db_stress with pid=30454: /packages/rocksdb_db_stress_internal_repo/rocks_db_stress .... ``` - This diff also provided support to export files - db_crashtest.py file to be used by different repo. Reviewed By: ajkr Differential Revision: D50564889 fbshipit-source-id: 7bafbbc6179dc79467ca2b680fe83afc7850616a
This commit is contained in:
parent
99b371b417
commit
917fd87513
2
TARGETS
2
TARGETS
|
@ -5620,3 +5620,5 @@ cpp_unittest_wrapper(name="write_unprepared_transaction_test",
|
||||||
deps=[":rocksdb_test_lib"],
|
deps=[":rocksdb_test_lib"],
|
||||||
extra_compiler_flags=[])
|
extra_compiler_flags=[])
|
||||||
|
|
||||||
|
|
||||||
|
export_file(name = "tools/db_crashtest.py")
|
||||||
|
|
|
@ -303,6 +303,7 @@ def generate_targets(repo_path, deps_map):
|
||||||
deps=json.dumps(deps["extra_deps"] + [":rocksdb_test_lib"]),
|
deps=json.dumps(deps["extra_deps"] + [":rocksdb_test_lib"]),
|
||||||
extra_compiler_flags=json.dumps(deps["extra_compiler_flags"]),
|
extra_compiler_flags=json.dumps(deps["extra_compiler_flags"]),
|
||||||
)
|
)
|
||||||
|
TARGETS.export_file("tools/db_crashtest.py")
|
||||||
|
|
||||||
print(ColorString.info("Generated TARGETS Summary:"))
|
print(ColorString.info("Generated TARGETS Summary:"))
|
||||||
print(ColorString.info("- %d libs" % TARGETS.total_lib))
|
print(ColorString.info("- %d libs" % TARGETS.total_lib))
|
||||||
|
|
|
@ -148,3 +148,9 @@ add_c_test_wrapper()
|
||||||
).encode("utf-8")
|
).encode("utf-8")
|
||||||
)
|
)
|
||||||
self.total_test = self.total_test + 1
|
self.total_test = self.total_test + 1
|
||||||
|
|
||||||
|
def export_file(self, name):
|
||||||
|
with open(self.path, "a") as targets_file:
|
||||||
|
targets_file.write(
|
||||||
|
targets_cfg.export_file_template.format(name=name)
|
||||||
|
)
|
||||||
|
|
|
@ -37,3 +37,7 @@ fancy_bench_template = """
|
||||||
fancy_bench_wrapper(suite_name="{name}", binary_to_bench_to_metric_list_map={bench_config}, slow={slow}, expected_runtime={expected_runtime}, sl_iterations={sl_iterations}, regression_threshold={regression_threshold})
|
fancy_bench_wrapper(suite_name="{name}", binary_to_bench_to_metric_list_map={bench_config}, slow={slow}, expected_runtime={expected_runtime}, sl_iterations={sl_iterations}, regression_threshold={regression_threshold})
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
export_file_template = """
|
||||||
|
export_file(name = "{name}")
|
||||||
|
"""
|
||||||
|
|
|
@ -846,6 +846,17 @@ def blackbox_crash_main(args, unknown_args):
|
||||||
print("stderr has error message:")
|
print("stderr has error message:")
|
||||||
print("***" + line + "***")
|
print("***" + line + "***")
|
||||||
|
|
||||||
|
stderrdata = errs.lower()
|
||||||
|
errorcount = stderrdata.count("error") - stderrdata.count("got errors 0 times")
|
||||||
|
print("#times error occurred in output is " + str(errorcount) + "\n")
|
||||||
|
|
||||||
|
if errorcount > 0:
|
||||||
|
print("TEST FAILED. Output has 'error'!!!\n")
|
||||||
|
sys.exit(2)
|
||||||
|
if stderrdata.find("fail") >= 0:
|
||||||
|
print("TEST FAILED. Output has 'fail'!!!\n")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
time.sleep(1) # time to stabilize before the next run
|
time.sleep(1) # time to stabilize before the next run
|
||||||
|
|
||||||
time.sleep(1) # time to stabilize before the next run
|
time.sleep(1) # time to stabilize before the next run
|
||||||
|
@ -869,6 +880,17 @@ def blackbox_crash_main(args, unknown_args):
|
||||||
print("stderr has error message:")
|
print("stderr has error message:")
|
||||||
print("***" + line + "***")
|
print("***" + line + "***")
|
||||||
|
|
||||||
|
stderrdata = errs.lower()
|
||||||
|
errorcount = stderrdata.count("error") - stderrdata.count("got errors 0 times")
|
||||||
|
print("#times error occurred in output is " + str(errorcount) + "\n")
|
||||||
|
|
||||||
|
if errorcount > 0:
|
||||||
|
print("TEST FAILED. Output has 'error'!!!\n")
|
||||||
|
sys.exit(2)
|
||||||
|
if stderrdata.find("fail") >= 0:
|
||||||
|
print("TEST FAILED. Output has 'fail'!!!\n")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
# we need to clean up after ourselves -- only do this on test success
|
# we need to clean up after ourselves -- only do this on test success
|
||||||
shutil.rmtree(dbname, True)
|
shutil.rmtree(dbname, True)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue