Add analysis_test rule

Targets of this rule verify that targets can be analyzed successfully.
This is similar to build_test, except no actual action execution of
the underlying targets occur. analysis_test essentially verifies that
`bazel build [targets] --nobuild` passes.
This commit is contained in:
c-parsons 2019-02-19 15:16:36 -05:00 committed by GitHub
parent 6bb8994a03
commit baaef76aaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 215 additions and 1 deletions

View File

@ -27,5 +27,6 @@ platforms:
test_targets:
- "--"
- "..."
# Shell test doesn't run on windows.
# Shell tests don't run on windows.
- "-//tests:analysis_test_e2e_test"
- "-//tests:unittest_e2e_test"

56
rules/analysis_test.bzl Normal file
View File

@ -0,0 +1,56 @@
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test verifying other targets can be successfully analyzed as part of a `bazel test`"""
def _analysis_test_impl(ctx):
"""Implementation function for analysis_test. """
return [AnalysisTestResultInfo(
success = True,
message = "All targets succeeded analysis",
)]
analysis_test = rule(
_analysis_test_impl,
attrs = {"targets": attr.label_list(mandatory = True)},
test = True,
analysis_test = True,
doc = """Test rule checking that other targets can be successfully analyzed.
This rule essentially verifies that all targets under `targets` would
generate no errors when analyzed with `bazel build [targets] --nobuild`.
Action success/failure for the targets and their transitive dependencies
are not verified. An analysis test simply ensures that each target in the transitive
dependencies propagate providers appropriately and register actions for their outputs
appropriately.
NOTE: If the targets fail to analyze, instead of the analysis_test failing, the analysis_test
will fail to build. Ideally, it would instead result in a test failure. This is a current
infrastructure limitation that may be fixed in the future.
Typical usage:
load("@bazel_skylib//rules:analysis_test.bzl", "analysis_test")
analysis_test(
name = "my_analysis_test",
targets = [
"//some/package:rule",
],
)
Args:
name: The name of the test rule.
targets: A list of targets to ensure build.
""",
)

View File

@ -62,3 +62,14 @@ sh_test(
],
tags = ["local"],
)
sh_test(
name = "analysis_test_e2e_test",
srcs = ["analysis_test_test.sh"],
data = [
":unittest.bash",
"//rules:analysis_test.bzl",
"@bazel_tools//tools/bash/runfiles",
],
tags = ["local"],
)

146
tests/analysis_test_test.sh Executable file
View File

@ -0,0 +1,146 @@
#!/bin/bash
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# End to end tests for analysis_test.bzl.
#
# End to end tests of analysis_test.bzl cover verification that
# analysis_test tests fail when their underlying test targets fail analysis.
# --- begin runfiles.bash initialization ---
set -euo pipefail
if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$0.runfiles_manifest" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"
elif [[ -f "$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"
elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$0.runfiles"
fi
fi
if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
source "$(rlocation bazel_skylib/tests/unittest.bash)" \
|| { echo "Could not source bazel_skylib/tests/unittest.bash" >&2; exit 1; }
function set_up() {
touch WORKSPACE
cat > WORKSPACE <<EOF
workspace(name = 'bazel_skylib')
EOF
touch rules/BUILD
cat > rules/BUILD <<EOF
exports_files(["*.bzl"])
EOF
mkdir -p fakerules
touch fakerules/rules.bzl
cat > fakerules/rules.bzl <<EOF
load("//rules:analysis_test.bzl", "analysis_test")
def _fake_rule_impl(ctx):
fail("This rule should never work")
fake_rule = rule(
implementation = _fake_rule_impl,
)
def _fake_depending_rule_impl(ctx):
return []
fake_depending_rule = rule(
implementation = _fake_depending_rule_impl,
attrs = {"deps" : attr.label_list()},
)
EOF
touch fakerules/BUILD
cat > fakerules/BUILD <<EOF
exports_files(["*.bzl"])
EOF
mkdir -p testdir
cat > testdir/dummy.cc <<EOF
int dummy() { return 0; }
EOF
cat > testdir/BUILD <<EOF
load("//rules:analysis_test.bzl", "analysis_test")
load("//fakerules:rules.bzl", "fake_rule", "fake_depending_rule")
fake_rule(name = "target_fails")
fake_depending_rule(name = "dep_fails",
deps = [":target_fails"])
analysis_test(
name = "direct_target_fails",
targets = [":target_fails"],
)
analysis_test(
name = "transitive_target_fails",
targets = [":dep_fails"],
)
# Use it in a non-test target
cc_library(
name = "dummy_cc_library",
srcs = ["dummy.cc"],
)
analysis_test(
name = "target_succeeds",
targets = [":dummy_cc_library"],
)
EOF
}
function tear_down() {
rm -rf testdir
rm -rf fakerules
}
function test_target_succeeds() {
bazel test //testdir:target_succeeds >"$TEST_log" 2>&1 || fail "Expected test to pass"
expect_log "PASSED"
}
function test_direct_target_fails() {
! bazel test //testdir:direct_target_fails --test_output=all --verbose_failures \
>"$TEST_log" 2>&1 || fail "Expected test to fail"
expect_log "This rule should never work"
}
function test_transitive_target_fails() {
! bazel test //testdir:transitive_target_fails --test_output=all --verbose_failures \
>"$TEST_log" 2>&1 || fail "Expected test to fail"
expect_log "This rule should never work"
}
run_suite "analysis_test test suite"