fix a number of warnings found by Starlark analyzer

This commit is contained in:
Christopher Parsons 2019-02-22 16:15:13 -05:00
parent a35b13a1c9
commit 3a7511ceb0
3 changed files with 24 additions and 2 deletions

View File

@ -64,6 +64,12 @@ def _impl_function_name(impl):
"""Derives the name of the given rule implementation function. """Derives the name of the given rule implementation function.
This can be used for better test feedback. This can be used for better test feedback.
Args:
impl: the rule implementation function
Returns:
The name of the given function
""" """
# Starlark currently stringifies a function as "<function NAME>", so we use # Starlark currently stringifies a function as "<function NAME>", so we use
@ -123,6 +129,7 @@ def _make(impl, attrs = None):
_ActionInfo = provider(fields = ["actions"]) _ActionInfo = provider(fields = ["actions"])
def _action_retrieving_aspect_impl(target, ctx): def _action_retrieving_aspect_impl(target, ctx):
_ignore = [ctx]
return [_ActionInfo(actions = target.actions)] return [_ActionInfo(actions = target.actions)]
_action_retrieving_aspect = aspect( _action_retrieving_aspect = aspect(
@ -282,6 +289,9 @@ def _end_analysis_test(env):
Args: Args:
env: The test environment returned by `analysistest.begin`. env: The test environment returned by `analysistest.begin`.
Returns:
A list of providers needed to automatically register the analysis test result.
""" """
return [AnalysisTestResultInfo( return [AnalysisTestResultInfo(
success = (len(env.failures) == 0), success = (len(env.failures) == 0),
@ -296,6 +306,9 @@ def _end(env):
Args: Args:
env: The test environment returned by `unittest.begin`. env: The test environment returned by `unittest.begin`.
Returns:
A list of providers needed to automatically register the test result.
""" """
tc = env.ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info tc = env.ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info
@ -419,7 +432,6 @@ def _expect_failure(env, expected_failure_msg = ""):
""" """
dep = _target_under_test(env) dep = _target_under_test(env)
if AnalysisFailureInfo in dep: if AnalysisFailureInfo in dep:
dep_failure = dep[AnalysisFailureInfo]
actual_errors = "" actual_errors = ""
for cause in dep[AnalysisFailureInfo].causes.to_list(): for cause in dep[AnalysisFailureInfo].causes.to_list():
actual_errors += cause.message + "\n" actual_errors += cause.message + "\n"
@ -435,6 +447,9 @@ def _target_actions(env):
Args: Args:
env: The test environment returned by `analysistest.begin`. env: The test environment returned by `analysistest.begin`.
Returns:
A list of actions registered by the target under test
""" """
# Validate? # Validate?
@ -446,6 +461,9 @@ def _target_under_test(env):
Args: Args:
env: The test environment returned by `analysistest.begin`. env: The test environment returned by `analysistest.begin`.
Returns:
The target under test.
""" """
result = getattr(env.ctx.attr, "target_under_test") result = getattr(env.ctx.attr, "target_under_test")
if types.is_list(result): if types.is_list(result):

View File

@ -16,6 +16,7 @@
def _analysis_test_impl(ctx): def _analysis_test_impl(ctx):
"""Implementation function for analysis_test. """ """Implementation function for analysis_test. """
_ignore = [ctx]
return [AnalysisTestResultInfo( return [AnalysisTestResultInfo(
success = True, success = True,
message = "All targets succeeded analysis", message = "All targets succeeded analysis",

View File

@ -69,6 +69,7 @@ def _failure_testing_test(ctx):
return analysistest.end(env) return analysistest.end(env)
def _failure_testing_fake_rule(ctx): def _failure_testing_fake_rule(ctx):
ignore = [ctx]
fail("This rule should never work") fail("This rule should never work")
failure_testing_fake_rule = rule( failure_testing_fake_rule = rule(
@ -92,6 +93,7 @@ def _fail_unexpected_passing_test(ctx):
return analysistest.end(env) return analysistest.end(env)
def _fail_unexpected_passing_fake_rule(ctx): def _fail_unexpected_passing_fake_rule(ctx):
_ignore = [ctx]
return [] return []
fail_unexpected_passing_fake_rule = rule( fail_unexpected_passing_fake_rule = rule(
@ -168,7 +170,8 @@ def unittest_passing_tests_suite():
Not all tests are included. Some unittest.bzl tests verify a test fails Not all tests are included. Some unittest.bzl tests verify a test fails
when assertions are not met. Such tests must be run in an e2e shell test. when assertions are not met. Such tests must be run in an e2e shell test.
This suite only includes tests which verify success tests.""" This suite only includes tests which verify success tests.
"""
unittest.suite( unittest.suite(
"unittest_tests", "unittest_tests",
basic_passing_test, basic_passing_test,