mirror of https://github.com/bazelbuild/rules_pkg
remove the obsolete deb_packages code. (#547)
* remove the obsolete deb_packages code. Having web search find this is a disservice to users
This commit is contained in:
parent
61018b8581
commit
ca16bbaaf0
|
@ -1,10 +0,0 @@
|
|||
# This the official list of Bazel authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Google Inc.
|
||||
mgIT GmbH
|
|
@ -1,20 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
# rules_go boilerplate
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
|
||||
gazelle(
|
||||
name = "gazelle",
|
||||
prefix = "github.com/bazelbuild/rules_pkg",
|
||||
)
|
||||
|
||||
# update_deb_packages boilerplate
|
||||
load("@rules_pkg//tools/update_deb_packages:update_deb_packages.bzl", "update_deb_packages")
|
||||
|
||||
update_deb_packages(
|
||||
name = "update_deb_packages",
|
||||
pgp_keys = [
|
||||
"@jessie_archive_key//file",
|
||||
"@stretch_archive_key//file",
|
||||
],
|
||||
)
|
|
@ -1,185 +0,0 @@
|
|||
workspace(name = "rules_pkg")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
url = "https://github.com/bazelbuild/rules_go/releases/download/0.18.5/rules_go-0.18.5.tar.gz",
|
||||
sha256 = "a82a352bffae6bee4e95f68a8d80a70e87f42c4741e6a448bec11998fcc82329",
|
||||
)
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/0.17.0/bazel-gazelle-0.17.0.tar.gz"],
|
||||
sha256 = "3c681998538231a2d24d0c07ed5a7658cb72bfb5fd4bf9911157c0e9ac6a2687",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
|
||||
go_rules_dependencies()
|
||||
go_register_toolchains()
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
gazelle_dependencies()
|
||||
|
||||
# Go dependencies of the update_deb_packages helper tool
|
||||
load("@bazel_gazelle//:deps.bzl", "go_repository")
|
||||
|
||||
# "golang.org/x/crypto/openpgp"
|
||||
go_repository(
|
||||
name = "org_golang_x_crypto",
|
||||
commit = "d585fd2cc9195196078f516b69daff6744ef5e84",
|
||||
importpath = "golang.org/x/crypto",
|
||||
)
|
||||
|
||||
# "github.com/knqyf263/go-deb-version"
|
||||
go_repository(
|
||||
name = "com_github_knqyf263_go_deb_version",
|
||||
commit = "9865fe14d09b1c729188ac810466dde90f897ee3",
|
||||
importpath = "github.com/knqyf263/go-deb-version",
|
||||
)
|
||||
|
||||
# "github.com/stapelberg/godebiancontrol"
|
||||
go_repository(
|
||||
name = "com_github_stapelberg_godebiancontrol",
|
||||
commit = "4376b22fb2c4dfda546c972f686310af907819b2",
|
||||
importpath = "github.com/stapelberg/godebiancontrol",
|
||||
)
|
||||
|
||||
# "github.com/bazelbuild/buildtools"
|
||||
#go_repository(
|
||||
# name = "com_github_bazelbuild_buildtools",
|
||||
# commit = "9c928655df93b94eeb3dc7f6bd040cee71c7dc59",
|
||||
# importpath = "github.com/bazelbuild/buildtools",
|
||||
#)
|
||||
|
||||
# Example for using the deb_packages ruleset
|
||||
load("//:deb_packages.bzl", "deb_packages")
|
||||
|
||||
# Not all of the following keys are actually used...
|
||||
|
||||
# The Debian jessie archive signing key
|
||||
# Source: https://ftp-master.debian.org/keys.html
|
||||
# Full fingerprint: 126C 0D24 BD8A 2942 CC7D F8AC 7638 D044 2B90 D010
|
||||
http_file(
|
||||
name = "jessie_archive_key",
|
||||
# It is highly recommended to use the sha256 hash of the key file to make sure it is untampered
|
||||
sha256 = "e42141a829b9fde8392ea2c0e329321bb29e5c0453b0b48e33c9f88bdc4873c5",
|
||||
urls = ["https://ftp-master.debian.org/keys/archive-key-8.asc"],
|
||||
)
|
||||
|
||||
# The Debian jessie security archive signing key
|
||||
# Source: https://ftp-master.debian.org/keys.html
|
||||
# Full fingerprint: D211 6914 1CEC D440 F2EB 8DDA 9D6D 8F6B C857 C906
|
||||
http_file(
|
||||
name = "jessie_security_archive_key",
|
||||
# It is highly recommended to use the sha256 hash of the key file to make sure it is untampered
|
||||
sha256 = "d05815c66deb71a595279b750aaf06370b6ad8c3b373651473c1c4b3d7da8f3c",
|
||||
urls = ["https://ftp-master.debian.org/keys/archive-key-8-security.asc"],
|
||||
)
|
||||
|
||||
# The Debian stretch archive signing key
|
||||
# Source: https://ftp-master.debian.org/keys.html
|
||||
# Full fingerprint: E1CF 20DD FFE4 B89E 8026 58F1 E0B1 1894 F66A EC98
|
||||
http_file(
|
||||
name = "stretch_archive_key",
|
||||
# It is highly recommended to use the sha256 hash of the key file to make sure it is untampered
|
||||
sha256 = "33b6a997460e177804cc44c7049a19350c11034719219390b22887471f0a2b5e",
|
||||
urls = ["https://ftp-master.debian.org/keys/archive-key-9.asc"],
|
||||
)
|
||||
|
||||
# The Debian stretch security archive signing key
|
||||
# Source: https://ftp-master.debian.org/keys.html
|
||||
# Full fingerprint: 6ED6 F5CB 5FA6 FB2F 460A E88E EDA0 D238 8AE2 2BA9
|
||||
http_file(
|
||||
name = "stretch_security_archive_key",
|
||||
# It is highly recommended to use the sha256 hash of the key file to make sure it is untampered
|
||||
sha256 = "4adecda0885f192b82c19fde129ca9d991f937437835a058da355b352a97e7dc",
|
||||
urls = ["https://ftp-master.debian.org/keys/archive-key-9-security.asc"],
|
||||
)
|
||||
|
||||
deb_packages(
|
||||
name = "debian_jessie_amd64",
|
||||
arch = "amd64",
|
||||
distro = "jessie",
|
||||
distro_type = "debian",
|
||||
mirrors = [
|
||||
"http://deb.debian.org/debian",
|
||||
# This ensures old states of this repository will build as long as the snapshot mirror works:
|
||||
"http://snapshot.debian.org/archive/debian/20171219T131415Z",
|
||||
],
|
||||
packages = {
|
||||
"ca-certificates": "pool/main/c/ca-certificates/ca-certificates_20141019+deb8u3_all.deb",
|
||||
"libc6": "pool/main/g/glibc/libc6_2.19-18+deb8u10_amd64.deb",
|
||||
"libssl1.0.0": "pool/main/o/openssl/libssl1.0.0_1.0.1t-1+deb8u7_amd64.deb",
|
||||
"netbase": "pool/main/n/netbase/netbase_5.3_all.deb",
|
||||
"openssl": "pool/main/o/openssl/openssl_1.0.1t-1+deb8u7_amd64.deb",
|
||||
"tzdata": "pool/main/t/tzdata/tzdata_2017c-0+deb8u1_all.deb",
|
||||
},
|
||||
packages_sha256 = {
|
||||
"ca-certificates": "bd799f47f5ae3260b6402b1fe19fe2c37f2f4125afcd19327bf69a9cf436aeff",
|
||||
"libc6": "0a95ee1c5bff7f73c1279b2b78f32d40da9025a76f93cb67c03f2867a7133e61",
|
||||
"libssl1.0.0": "d99de2cdca54484d23badc5683c7211b3a191977272d9e5281837af863dcdd56",
|
||||
"netbase": "3979bdd40c5666ef9bf71a5391ba01ad38e264f2ec96d289993f2a0805616dd3",
|
||||
"openssl": "d0e1464148bb2d682ccdb6f433b27a6848e4d012e8bb8a61ed9f6ad708017640",
|
||||
"tzdata": "f53b963b533100380127a20922b4265412ca4cf8f8b21c66e07c4645b7845002",
|
||||
},
|
||||
pgp_key = "jessie_archive_key",
|
||||
)
|
||||
|
||||
deb_packages(
|
||||
name = "debian_stretch_amd64",
|
||||
arch = "amd64",
|
||||
distro = "stretch",
|
||||
distro_type = "debian",
|
||||
mirrors = [
|
||||
"http://deb.debian.org/debian",
|
||||
# This ensures old states of this repository will build as long as the snapshot mirror works:
|
||||
"http://snapshot.debian.org/archive/debian/20171219T131415Z",
|
||||
],
|
||||
packages = {
|
||||
"ca-certificates": "pool/main/c/ca-certificates/ca-certificates_20161130+nmu1_all.deb",
|
||||
"libc6": "pool/main/g/glibc/libc6_2.24-11+deb9u1_amd64.deb",
|
||||
"libssl1.0.2": "pool/main/o/openssl1.0/libssl1.0.2_1.0.2l-2+deb9u1_amd64.deb",
|
||||
"netbase": "pool/main/n/netbase/netbase_5.4_all.deb",
|
||||
"openssl": "pool/main/o/openssl/openssl_1.1.0f-3+deb9u1_amd64.deb",
|
||||
"tzdata": "pool/main/t/tzdata/tzdata_2017c-0+deb9u1_all.deb",
|
||||
},
|
||||
packages_sha256 = {
|
||||
"ca-certificates": "25d6f749c4fb33ae0d7999c2c7c52b842a8b6e5487f3a5c1e61b3c21f90ac452",
|
||||
"libc6": "b3f7278d80d5d0dc428fe92309bbc0e0a1ed665548a9f660663c1e1151335ae9",
|
||||
"libssl1.0.2": "a5ed99bfdc44d3f7fed9d3e66ddf7218464d54f4277aeadea1124fc49392ba78",
|
||||
"netbase": "f226d06518081da79f72f408906a4142f8d5dce3bdc009009482512755e10a9c",
|
||||
"openssl": "268091ec6e08143f1612c3a30dd30e6f0212e3c2de95c0e93bd64cfdd2eb2954",
|
||||
"tzdata": "f3302284266843c6a3ab5250a338641bdba8970527d60dbcad0e030ed9569aa7",
|
||||
},
|
||||
pgp_key = "stretch_archive_key",
|
||||
)
|
||||
|
||||
# For the debug image
|
||||
http_file(
|
||||
name = "busybox",
|
||||
executable = True,
|
||||
sha256 = "b51b9328eb4e60748912e1c1867954a5cf7e9d5294781cae59ce225ed110523c",
|
||||
urls = ["https://busybox.net/downloads/binaries/1.27.1-i686/busybox"],
|
||||
)
|
||||
|
||||
# Docker rules.
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "aed1c249d4ec8f703edddf35cbe9dfaca0b5f5ea6e4cd9e83e99f3b0d1136c3d",
|
||||
strip_prefix = "rules_docker-0.7.0",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/v0.7.0.tar.gz"],
|
||||
)
|
||||
|
||||
# used for testing the examples
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_go_image_repos()
|
||||
|
||||
git_repository(
|
||||
name = "runtimes_common",
|
||||
remote = "https://github.com/GoogleCloudPlatform/runtimes-common.git",
|
||||
tag = "v0.1.0",
|
||||
)
|
|
@ -1,65 +0,0 @@
|
|||
def _deb_packages_impl(repository_ctx):
|
||||
# check that keys in "packages" and "packages_sha256" are the same
|
||||
for package in repository_ctx.attr.packages:
|
||||
if package not in repository_ctx.attr.packages_sha256:
|
||||
fail("Package named \"%s\" was not found in packages_sha256 of rule %s" % (package, repository_ctx.name))
|
||||
|
||||
# download each package
|
||||
package_rule_dict = {}
|
||||
for package in repository_ctx.attr.packages:
|
||||
urllist = []
|
||||
for mirror in repository_ctx.attr.mirrors:
|
||||
# allow mirror URLs that don't end in /
|
||||
if mirror.endswith("/"):
|
||||
urllist.append(mirror + repository_ctx.attr.packages[package])
|
||||
else:
|
||||
urllist.append(mirror + "/" + repository_ctx.attr.packages[package])
|
||||
repository_ctx.download(
|
||||
urllist,
|
||||
output="debs/" + repository_ctx.attr.packages_sha256[package] + ".deb",
|
||||
sha256=repository_ctx.attr.packages_sha256[package],
|
||||
executable=False)
|
||||
package_rule_dict[package] = "@" + repository_ctx.name + "//debs:" + repository_ctx.attr.packages_sha256[package] + ".deb"
|
||||
|
||||
# create the deb_packages.bzl file that contains the package name : filename mapping
|
||||
repository_ctx.file("debs/deb_packages.bzl", repository_ctx.name + " = " + struct(**package_rule_dict).to_json(), executable=False)
|
||||
|
||||
# create the BUILD file that globs all the deb files
|
||||
repository_ctx.file("debs/BUILD", """
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
deb_files = glob(["*.deb"])
|
||||
exports_files(deb_files + ["deb_packages.bzl"])
|
||||
""", executable=False)
|
||||
|
||||
_deb_packages = repository_rule(
|
||||
_deb_packages_impl,
|
||||
attrs = {
|
||||
"distro_type": attr.string(
|
||||
doc = "the name of the distribution type, required - e.g. debian or ubuntu",
|
||||
),
|
||||
"distro": attr.string(
|
||||
doc = "the name of the distribution, required - e.g. wheezy or jessie-backports",
|
||||
),
|
||||
"arch": attr.string(
|
||||
doc = "the target package architecture, required - e.g. arm64 or amd64",
|
||||
),
|
||||
"packages": attr.string_dict(
|
||||
doc = "a dictionary mapping packagename to package_path, required - e.g. {\"foo\":\"pool/main/f/foo/foo_1.2.3-0_amd64.deb\"}",
|
||||
),
|
||||
"packages_sha256": attr.string_dict(
|
||||
doc = "a dictionary mapping packagename to package_hash, required - e.g. {\"foo\":\"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\"}",
|
||||
),
|
||||
"mirrors": attr.string_list(
|
||||
doc = "a list of full URLs of the package repository, required - e.g. http://deb.debian.org/debian",
|
||||
),
|
||||
"components": attr.string_list(
|
||||
doc = "a list of accepted components - e.g. universe, multiverse",
|
||||
),
|
||||
"pgp_key": attr.string(
|
||||
doc = "the name of the http_file rule that contains the pgp key that signed the Release file at <mirrorURL>/dists/<distro>/Release, required",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def deb_packages(**kwargs):
|
||||
_deb_packages(**kwargs)
|
|
@ -1,9 +0,0 @@
|
|||
# Overview
|
||||
|
||||
This folder contains examples on how to use `deb_packages` rules in practice.
|
||||
|
||||
## `deb_packages_base`
|
||||
|
||||
Two docker images (`base`/`debug`) that contain the same packages as the distroless base image from https://github.com/GoogleCloudPlatform/distroless using the `deb_packages` rules.
|
||||
|
||||
It creates Jessie and Stretch versions of these containers.
|
|
@ -1,174 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
# Extracting the ca-certificates deb package
|
||||
sh_binary(
|
||||
name = "extract_certs",
|
||||
srcs = ["extract.sh"],
|
||||
)
|
||||
|
||||
# For convenience, rename busybox-x86_64 to busybox.
|
||||
# TODO: https://github.com/GoogleCloudPlatform/distroless/pull/119
|
||||
genrule(
|
||||
name = "busybox_tar",
|
||||
srcs = ["@busybox//file"],
|
||||
outs = ["busybox.tar"],
|
||||
cmd = "mkdir busybox; cp $(<) busybox/busybox; cd busybox; \
|
||||
for cmd in $$(./busybox --list); do \
|
||||
ln -s busybox $$cmd; \
|
||||
done; \
|
||||
cd ..; \
|
||||
tar -cf $@ busybox",
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_docker//docker:docker.bzl", "docker_build")
|
||||
load("@io_bazel_rules_docker//contrib:passwd.bzl", "passwd_file")
|
||||
load("@bazel_tools//tools/build_defs/pkg:tar.bzl", "pkg_tar")
|
||||
load("@runtimes_common//structure_tests:tests.bzl", "structure_test")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
|
||||
# Create a default passwd_file rule and put it in a tarball.
|
||||
|
||||
passwd_file(
|
||||
name = "passwd",
|
||||
gid = 0,
|
||||
info = "root",
|
||||
uid = 0,
|
||||
username = "root",
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "passwd_tar",
|
||||
srcs = [":passwd"],
|
||||
mode = "0644",
|
||||
package_dir = "etc",
|
||||
)
|
||||
|
||||
load("//examples/deb_packages/deb_packages_base:cacerts.bzl", "cacerts")
|
||||
load("@debian_jessie_amd64//debs:deb_packages.bzl", "debian_jessie_amd64")
|
||||
load("@debian_stretch_amd64//debs:deb_packages.bzl", "debian_stretch_amd64")
|
||||
|
||||
cacerts(
|
||||
name = "cacerts_jessie",
|
||||
deb = debian_jessie_amd64["ca-certificates"],
|
||||
)
|
||||
|
||||
cacerts(
|
||||
name = "cacerts_stretch",
|
||||
deb = debian_stretch_amd64["ca-certificates"],
|
||||
)
|
||||
|
||||
# Create /tmp, too many things assume it exists.
|
||||
# tmp.tar has a /tmp with the correct permissions 01777
|
||||
# A tar is needed because at the moment there is no way to create an empty directory with specific permissions
|
||||
|
||||
docker_build(
|
||||
name = "base_jessie",
|
||||
debs = [
|
||||
debian_jessie_amd64["libc6"],
|
||||
debian_jessie_amd64["libssl1.0.0"],
|
||||
debian_jessie_amd64["openssl"],
|
||||
debian_jessie_amd64["netbase"],
|
||||
debian_jessie_amd64["tzdata"],
|
||||
],
|
||||
env = {"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
|
||||
tars = [
|
||||
":passwd_tar",
|
||||
":cacerts_jessie.tar",
|
||||
":tmp.tar",
|
||||
# os-release files are not (yet) implemented in deb_packages
|
||||
#"@debian_jessie//file:os_release.tar",
|
||||
],
|
||||
)
|
||||
|
||||
docker_build(
|
||||
name = "base_stretch",
|
||||
debs = [
|
||||
debian_stretch_amd64["libc6"],
|
||||
debian_stretch_amd64["libssl1.0.2"],
|
||||
debian_stretch_amd64["openssl"],
|
||||
debian_stretch_amd64["netbase"],
|
||||
debian_stretch_amd64["tzdata"],
|
||||
],
|
||||
env = {"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
|
||||
tars = [
|
||||
":passwd_tar",
|
||||
":cacerts_stretch.tar",
|
||||
":tmp.tar",
|
||||
# os-release files are not (yet) implemented in deb_packages
|
||||
#"@debian_stretch//file:os_release.tar",
|
||||
],
|
||||
)
|
||||
|
||||
# A debug image with busybox available.
|
||||
docker_build(
|
||||
name = "debug_jessie",
|
||||
base = ":base_jessie",
|
||||
directory = "/",
|
||||
entrypoint = ["/busybox/sh"],
|
||||
env = {"PATH": "$PATH:/busybox"},
|
||||
tars = [":busybox_tar"],
|
||||
)
|
||||
|
||||
docker_build(
|
||||
name = "debug_stretch",
|
||||
base = ":base_stretch",
|
||||
directory = "/",
|
||||
entrypoint = ["/busybox/sh"],
|
||||
env = {"PATH": "$PATH:/busybox"},
|
||||
tars = [":busybox_tar"],
|
||||
)
|
||||
|
||||
structure_test(
|
||||
name = "debug_jessie_test",
|
||||
config = "testdata/debug.yaml",
|
||||
image = ":debug_jessie",
|
||||
)
|
||||
|
||||
structure_test(
|
||||
name = "debug_stretch_test",
|
||||
config = "testdata/debug.yaml",
|
||||
image = ":debug_stretch",
|
||||
)
|
||||
|
||||
structure_test(
|
||||
name = "base_jessie_test",
|
||||
config = "testdata/base.yaml",
|
||||
image = ":base_jessie",
|
||||
)
|
||||
|
||||
structure_test(
|
||||
name = "base_stretch_test",
|
||||
config = "testdata/base.yaml",
|
||||
image = ":base_stretch",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "check_certs",
|
||||
srcs = ["testdata/check_certs.go"],
|
||||
)
|
||||
|
||||
docker_build(
|
||||
name = "check_certs_jessie_image",
|
||||
base = "//examples/deb_packages/deb_packages_base:base_jessie",
|
||||
files = [":check_certs"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
docker_build(
|
||||
name = "check_certs_stretch_image",
|
||||
base = "//examples/deb_packages/deb_packages_base:base_stretch",
|
||||
files = [":check_certs"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
structure_test(
|
||||
name = "certs_jessie_test",
|
||||
config = "testdata/certs.yaml",
|
||||
image = ":check_certs_jessie_image",
|
||||
)
|
||||
|
||||
structure_test(
|
||||
name = "certs_stretch_test",
|
||||
config = "testdata/certs.yaml",
|
||||
image = ":check_certs_stretch_image",
|
||||
)
|
|
@ -1,29 +0,0 @@
|
|||
"""A rule to unpack ca certificates from the debian package."""
|
||||
|
||||
def _impl(ctx):
|
||||
args = "%s %s %s" % (ctx.executable._extract.path, ctx.file.deb.path, ctx.outputs.out.path)
|
||||
ctx.action(command = args,
|
||||
inputs = [ctx.executable._extract, ctx.file.deb],
|
||||
outputs = [ctx.outputs.out])
|
||||
|
||||
cacerts = rule(
|
||||
attrs = {
|
||||
"deb": attr.label(
|
||||
allow_files = [".deb"],
|
||||
single_file = True,
|
||||
mandatory = True,
|
||||
),
|
||||
# Implicit dependencies.
|
||||
"_extract": attr.label(
|
||||
default = Label("//examples/deb_packages/deb_packages_base:extract_certs"),
|
||||
cfg = "host",
|
||||
executable = True,
|
||||
allow_files = True,
|
||||
),
|
||||
},
|
||||
executable = False,
|
||||
outputs = {
|
||||
"out": "%{name}.tar",
|
||||
},
|
||||
implementation = _impl,
|
||||
)
|
|
@ -1,40 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 Google Inc. All rights reserved.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script extracts the CA certs from the typical debian ca-certificates debian package.
|
||||
# It would be nicer to do this in Python or Go, but neither of these languages have packages
|
||||
# that can extract .xz files in their stdlibs.
|
||||
|
||||
DEB=$1
|
||||
CERTS_PATH=$2
|
||||
|
||||
ar -x $DEB data.tar.xz
|
||||
tar -xf data.tar.xz ./usr/share/ca-certificates
|
||||
tar -xf data.tar.xz ./usr/share/doc/ca-certificates/copyright
|
||||
|
||||
# Concat all the certs.
|
||||
CERT_FILE=./etc/ssl/certs/ca-certificates.crt
|
||||
mkdir -p $(dirname $CERT_FILE)
|
||||
|
||||
CERTS=$(find usr/share/ca-certificates -type f | sort)
|
||||
for cert in $CERTS; do
|
||||
cat $cert >> $CERT_FILE
|
||||
done
|
||||
|
||||
tar -cf $2 etc/ssl/certs/ca-certificates.crt usr/share/doc/ca-certificates/copyright
|
||||
|
||||
rm data.tar.xz
|
||||
rm -rf usr/share/ca-certificates
|
|
@ -1,4 +0,0 @@
|
|||
$RUNFILES_DIR/runtimes_common/structure_tests/ext_run.sh \
|
||||
-i bazel/base:cc \
|
||||
-t $RUNFILES_DIR/distroless/base/base.tar \
|
||||
-c $RUNFILES_DIR/distroless/base/testdata/base.yaml
|
|
@ -1,30 +0,0 @@
|
|||
schemaVersion: "1.0.0"
|
||||
fileExistenceTests:
|
||||
# Basic FS sanity checks.
|
||||
- name: root
|
||||
path: '/'
|
||||
isDirectory: true
|
||||
shouldExist: true
|
||||
- name: tmp
|
||||
path: '/tmp'
|
||||
isDirectory: true
|
||||
shouldExist: true
|
||||
- name: passwd
|
||||
path: '/etc/passwd'
|
||||
shouldExist: true
|
||||
# os-release files are not implemented in deb_packages
|
||||
# - name: etc-os-release
|
||||
# path: '/etc/os-release'
|
||||
# shouldExist: true
|
||||
- name: certs
|
||||
path: '/etc/ssl/certs/ca-certificates.crt'
|
||||
shouldExist: true
|
||||
- name: certs_copyright
|
||||
path: '/usr/share/doc/ca-certificates/copyright'
|
||||
shouldExist: true
|
||||
- name: services
|
||||
path: '/etc/services'
|
||||
shouldExist: true
|
||||
- name: tzdata
|
||||
path: '/usr/sbin/tzconfig'
|
||||
shouldExist: true
|
|
@ -1,6 +0,0 @@
|
|||
schemaVersion: "1.0.0"
|
||||
commandTests:
|
||||
# Basic FS sanity checks.
|
||||
- name: certs
|
||||
command: ["/check_certs"]
|
||||
exitCode: 0
|
|
@ -1,34 +0,0 @@
|
|||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
fmt.Printf("Error %s loading system certs.\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if pool == nil {
|
||||
fmt.Println("No cert pools.")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Certs working!")
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
schemaVersion: "1.0.0"
|
||||
fileExistenceTests:
|
||||
# Basic FS sanity checks.
|
||||
- name: busybox
|
||||
path: '/busybox'
|
||||
shouldExist: true
|
||||
commandTests:
|
||||
- name: busybox
|
||||
command: ["/busybox/busybox"]
|
||||
expectedOutput: ['BusyBox v1\.27\.1']
|
Binary file not shown.
|
@ -1,8 +0,0 @@
|
|||
# Tools to help with managing rules in this repository
|
||||
|
||||
## update_deb_packages
|
||||
|
||||
A tool to update the path and SHA256 hash of packages referred to by `deb_packages` rules in WORKSPACE files.
|
||||
(Essentially `apt-get update && apt-get upgrade` for `deb_packages` rules in the current WORKSPACE.)
|
||||
|
||||
[(Documentation here)](https://github.com/bazelbuild/rules_pkg/tree/master/deb_packages/tools/update_deb_packages)
|
|
@ -1,62 +0,0 @@
|
|||
# Automatically update deb_package rules in the WORKSPACE file
|
||||
Similar to the `gazelle` tool which helps with managing golang bazel rules, it is possible to run this helper program by running `bazel run update_deb_packages`.
|
||||
|
||||
Add the following to the `BUILD` or `BUILD.bazel` file in the root directory of your repository:
|
||||
|
||||
```bzl
|
||||
load("@rules_pkg//deb_packages/tools/update_deb_packages:update_deb_packages.bzl", "update_deb_packages")
|
||||
|
||||
update_deb_packages(
|
||||
name = "update_deb_packages",
|
||||
pgp_keys = ["@rule_name_of_http_file_rule_of_pgp_key//file"],
|
||||
)
|
||||
```
|
||||
|
||||
The `pgp_keys` list must contain all `http_file` rules that are used in the `pgp_key` portion of the `deb_packages` rules in your `WORKSPACE` file.
|
||||
Referring to them is necessary, since otherwise these files wouldn't actually be downloaded by Bazel before executing the tool.
|
||||
|
||||
This repository also contains the `gazelle` boilerplate in the root `BUILD` file, since the `update_deb_packages` tool is written in go and gazelle helps with automatically generating `BUILD` files for the tool's dependencies.
|
||||
|
||||
Then you can run `bazel run update_deb_packages` and it will automatically add missing packages and update hashes and paths of the new and existing ones in your `WORKSPACE` file.
|
||||
|
||||
## Dependencies
|
||||
|
||||
`update_deb_packages` uses the `buildifier` and `buildozer` tools from [https://github.com/bazelbuild/buildtools](https://github.com/bazelbuild/buildtools) need to be available on your $PATH.
|
||||
|
||||
## Pinning versions of deb files
|
||||
|
||||
Version pinning (`foo=1.2.3-4` instead of just `foo` as package name) is supported, to have more fine grained control of which package ends up in which container.
|
||||
It is the user's responsibility to ensure that all versions are available at the mirror.
|
||||
In case you always want to have access to the latest version (e.g. for canary style builds), `foo=latest` is also supported.
|
||||
|
||||
## Add and update packages referred in BUILD files to the WORKSPACE file
|
||||
|
||||
`buildozer` is able to parse all BUILD files in a repository for `docker_build` rules.
|
||||
Using this capability, `update_deb_packages` will look through all these rules in the whole repository, check if their `debs` section exists and add all packages that didn't yet occur in the respective `deb_packages` rule.
|
||||
After this, it'll automatically run the update routine and update all `deb_packages` rules to their configured version on the mirror.
|
||||
|
||||
After running the command, the WORKSPACE file will be changed, it is highly recommended to keep this file unter version control to have a good overview on which versions and files have changed.
|
||||
|
||||
This tool is intended as an external independent helper script and not to actually run during your bazel builds (it wouldn't really be possible to change the workspace during builds anyways).
|
||||
|
||||
## run with `bazel run`
|
||||
|
||||
Similar to the `gazelle` tool which helps with managing golang bazel rules, it is possible (and recommended) to run this helper program by running `bazel run update_deb_packages`.
|
||||
|
||||
## Ignoring rules
|
||||
|
||||
In case you want or need the tool to ignore some `deb_packages` rules in the WORKSPACE file, add a `"manual_update"` tag to the rule in the `tags` default attribute. It will silently drop all rules that have this tag and just ignore whatever they contain.
|
||||
|
||||
## Behind the scenes
|
||||
|
||||
This rule works very similar to the [gazelle](https://github.com/bazelbuild/rules_go/blob/master/go/private/tools/gazelle.bzl) rules ([stable link](https://github.com/bazelbuild/rules_go/blob/ee1fef7ec1379fcf36c002fd3ac0d00d940b147e/go/private/tools/gazelle.bzl)) to execute the `gazelle` tool using `bazel run`.
|
||||
|
||||
To escape the sandboxing and have direct access to the actual `WORKSPACE` and `BUILD` files, the small shell script resolves the softlink that Bazel creates into the build environment and operates at the root of the actual repository.
|
||||
|
||||
This still creates some challenges, as it is also necessary to have access to the PGP keys, which are back in the sandbox.
|
||||
Moving them to the repository would be an option, but then it would need some reliable cleanup.
|
||||
|
||||
Instead, the tool itself uses the fact that the `bazel-<workspacename>` folder is also linked into the repository for convenience and looks for the key in there instead of the sandbox it came from.
|
||||
|
||||
As Bazel's sandboxing gets more sophisticated, it might be necessary to reevaluate this approach.
|
||||
For now it works.
|
|
@ -1,29 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"update_deb_packages.go",
|
||||
],
|
||||
importpath = "github.com/bazelbuild/rules_pkg/deb_packages/tools/update_deb_packages/src",
|
||||
deps = [
|
||||
#"@com_github_bazelbuild_buildtools//:go_default_library",
|
||||
"@com_github_knqyf263_go_deb_version//:go_default_library",
|
||||
"@com_github_stapelberg_godebiancontrol//:go_default_library",
|
||||
"@org_golang_x_crypto//openpgp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "update_deb_packages",
|
||||
srcs = ["update_deb_packages.go"],
|
||||
importpath = "github.com/bazelbuild/rules_pkg/deb_packages/tools/update_deb_packages/src",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
#"@com_github_bazelbuild_buildtools//:go_default_library",
|
||||
"@com_github_knqyf263_go_deb_version//:go_default_library",
|
||||
"@com_github_stapelberg_godebiancontrol//:go_default_library",
|
||||
"@org_golang_x_crypto//openpgp:go_default_library",
|
||||
],
|
||||
)
|
|
@ -1,29 +0,0 @@
|
|||
# **Using the `update_deb_packages` helper program**
|
||||
|
||||
As you might have noticed, it is a lot of work to get the exact path and SHA256 hash of a `.deb` package.
|
||||
All this information is already available online at the mirrors you defined in your WORKSPACE file: As soon as you know the exact distribution, package name and version, you should be able to just generate this data on the fly.
|
||||
|
||||
`update_deb_packages` is exactly such a helper program.
|
||||
It uses the `buildifier` and `buildozer` tools from [https://github.com/bazelbuild/buildtools](https://github.com/bazelbuild/buildtools), which need to be available on your $PATH.
|
||||
|
||||
Version pinning (`foo=1.2.3-4` instead of just `foo` as package name) is supported, to have more fine grained control of which package ends up in which container.
|
||||
It is the user's responsibility to ensure that all versions are available at the mirror.
|
||||
In case you always want the latest version (e.g. for canary style builds), `foo=latest` is also supported.
|
||||
|
||||
## Add and update packages referred in BUILD files to the WORKSPACE file
|
||||
|
||||
`buildozer` is able to parse all BUILD files in a repository for `docker_build` rules.
|
||||
Using this capability, `update_deb_packages` will look through all these rules in the whole repository, check if their `debs` section exists and add all packages that didn't yet occur in the respective `deb_packages` rule.
|
||||
After this, it'll automatically run the update routine and update all `deb_packages` rules to their configured version on the mirror.
|
||||
|
||||
After running the command, the WORKSPACE file will be changed, it is highly recommended to keep this file unter version control to have a good overview on which versions and files have changed.
|
||||
|
||||
This tool is intended as an external independent helper script and not to actually run during your bazel builds (it wouldn't really be possible to change the workspace during builds anyways).
|
||||
|
||||
## run with `bazel run`
|
||||
|
||||
Similar to the `gazelle` tool which helps with managing golang bazel rules, it is possible (and recommended) to run this helper program by running `bazel run update_deb_packages`.
|
||||
|
||||
## Ignoring rules
|
||||
|
||||
In case you want or need the tool to ignore some `deb_packages` rules in the WORKSPACE file, add a `"manual_update"` tag to the rule in the `tags` default attribute. It will silently drop all rules that have this tag and just ignore whatever they contain.
|
|
@ -1,633 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/openpgp"
|
||||
|
||||
"github.com/knqyf263/go-deb-version"
|
||||
"github.com/stapelberg/godebiancontrol"
|
||||
)
|
||||
|
||||
var FORCE_PACKAGE_IDENT = `{
|
||||
"IsListArg": {
|
||||
"packages": false
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func appendUniq(slice []string, v string) []string {
|
||||
for _, x := range slice {
|
||||
if x == v {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, v)
|
||||
}
|
||||
|
||||
func logFatalErr(e error) {
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// https://stackoverflow.com/a/33853856/5441396
|
||||
func downloadFile(filepath string, url string) (err error) {
|
||||
|
||||
// Create the file
|
||||
out, err := os.Create(filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
// Get the data
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("Download from %s failed with statuscode %d", url, resp.StatusCode)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Writer the body to file
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileFromURLList(filepath string, filename string, urls []string) {
|
||||
// no chunked downloads, just tries URLs one by one until it succeeds or fails because no URLs are left
|
||||
success := false
|
||||
|
||||
for _, u := range urls {
|
||||
parsed, err := url.Parse(u)
|
||||
logFatalErr(err)
|
||||
err = downloadFile(filepath, parsed.String())
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
} else {
|
||||
success = true
|
||||
// log.Printf("Sucessfully fetched %s\n", parsed.String())
|
||||
break
|
||||
}
|
||||
}
|
||||
if success == false {
|
||||
log.Fatalf("No mirror had the file %s available.\n URLS: %s", filename, urls)
|
||||
}
|
||||
}
|
||||
|
||||
func getFileFromMirror(filepath string, filename string, distro string, mirrors []string) {
|
||||
urls := make([]string, 0)
|
||||
for _, mirror := range mirrors {
|
||||
baseURL, err := url.Parse(mirror)
|
||||
logFatalErr(err)
|
||||
ref, err := url.Parse(path.Join(baseURL.Path, "dists", distro, filename))
|
||||
logFatalErr(err)
|
||||
urls = append(urls, baseURL.ResolveReference(ref).String())
|
||||
}
|
||||
getFileFromURLList(filepath, filename, urls)
|
||||
}
|
||||
|
||||
func compareFileWithHash(filepath string, sha256Hash string) bool {
|
||||
target, err := hex.DecodeString(sha256Hash)
|
||||
logFatalErr(err)
|
||||
|
||||
f, err := os.Open(filepath)
|
||||
logFatalErr(err)
|
||||
defer f.Close()
|
||||
|
||||
h := sha256.New()
|
||||
_, err = io.Copy(h, f)
|
||||
logFatalErr(err)
|
||||
|
||||
actual := h.Sum(nil)
|
||||
|
||||
if bytes.Equal(actual, target) != true {
|
||||
log.Printf("Hash mismatch: Expected %x, got %x", target, actual)
|
||||
}
|
||||
|
||||
return bytes.Equal(actual, target)
|
||||
}
|
||||
|
||||
func checkPgpSignature(keyfile string, checkfile string, sigfile string) {
|
||||
key, err := os.Open(keyfile)
|
||||
logFatalErr(err)
|
||||
|
||||
sig, err := os.Open(sigfile)
|
||||
logFatalErr(err)
|
||||
|
||||
check, err := os.Open(checkfile)
|
||||
logFatalErr(err)
|
||||
|
||||
keyring, err := openpgp.ReadArmoredKeyRing(key)
|
||||
logFatalErr(err)
|
||||
|
||||
_, err = openpgp.CheckArmoredDetachedSignature(keyring, check, sig)
|
||||
logFatalErr(err)
|
||||
}
|
||||
|
||||
func getPackages(arch string, distroType string, distro string, mirrors []string, components []string, pgpKeyFile string) (packages []godebiancontrol.Paragraph) {
|
||||
releasefile, err := ioutil.TempFile("", "Release")
|
||||
logFatalErr(err)
|
||||
|
||||
releasegpgfile, err := ioutil.TempFile("", "Releasegpg")
|
||||
logFatalErr(err)
|
||||
|
||||
// download Release + Release.gpg
|
||||
getFileFromMirror(releasefile.Name(), "Release", distro, mirrors)
|
||||
getFileFromMirror(releasegpgfile.Name(), "Release.gpg", distro, mirrors)
|
||||
|
||||
// check signature
|
||||
checkPgpSignature(pgpKeyFile, releasefile.Name(), releasegpgfile.Name())
|
||||
|
||||
os.Remove(releasegpgfile.Name())
|
||||
|
||||
// read/parse Release file
|
||||
release, err := godebiancontrol.Parse(releasefile)
|
||||
logFatalErr(err)
|
||||
os.Remove(releasefile.Name())
|
||||
|
||||
// this will be the merged Packages file
|
||||
packagesfile, err := ioutil.TempFile("", "Packages")
|
||||
logFatalErr(err)
|
||||
|
||||
// download all binary-<arch> Packages.gz files
|
||||
for _, line := range strings.Split(release[0]["SHA256"], "\n") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
//last line is an empty line
|
||||
continue
|
||||
}
|
||||
hash := fields[0]
|
||||
path := fields[2]
|
||||
isAcceptedComponent := true
|
||||
if len(components) > 0 {
|
||||
isAcceptedComponent = false
|
||||
for _, component := range components {
|
||||
if strings.HasPrefix(path, component+"/") {
|
||||
isAcceptedComponent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if isAcceptedComponent && strings.HasSuffix(path, "/binary-"+arch+"/Packages.gz") {
|
||||
tmpPackagesfile, err := ioutil.TempFile("", "Packages")
|
||||
logFatalErr(err)
|
||||
getFileFromMirror(tmpPackagesfile.Name(), path, distro, mirrors)
|
||||
// check hash of Packages.gz files
|
||||
if compareFileWithHash(tmpPackagesfile.Name(), hash) != true {
|
||||
log.Fatalf("Downloaded file %s corrupt", path)
|
||||
}
|
||||
|
||||
// unzip Packages.gz files
|
||||
handle, err := os.Open(tmpPackagesfile.Name())
|
||||
logFatalErr(err)
|
||||
defer handle.Close()
|
||||
|
||||
zipReader, err := gzip.NewReader(handle)
|
||||
logFatalErr(err)
|
||||
defer zipReader.Close()
|
||||
|
||||
content, err := ioutil.ReadAll(zipReader)
|
||||
logFatalErr(err)
|
||||
os.Remove(tmpPackagesfile.Name())
|
||||
|
||||
// append content to merged Packages file
|
||||
f, err := os.OpenFile(packagesfile.Name(), os.O_APPEND|os.O_WRONLY, 0600)
|
||||
logFatalErr(err)
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write(content)
|
||||
logFatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// read/parse merged Packages file
|
||||
parsed, err := godebiancontrol.Parse(packagesfile)
|
||||
logFatalErr(err)
|
||||
os.Remove(packagesfile.Name())
|
||||
|
||||
return parsed
|
||||
}
|
||||
|
||||
func getStringField(fieldName string, fileName string, ruleName string, workspaceContents []byte) string {
|
||||
// buildozer 'print FIELDNAME_GOES_HERE' FILENAME_GOES_HERE:RULENAME_GOES_HERE <WORKSPACE
|
||||
cmd := exec.Command("buildozer", "print "+fieldName, fileName+":"+ruleName)
|
||||
wsreader := bytes.NewReader(workspaceContents)
|
||||
if fileName == "-" {
|
||||
// see edit.stdinPackageName why this is a "-"
|
||||
cmd.Stdin = wsreader
|
||||
}
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
exiterr, ok := err.(*exec.ExitError)
|
||||
if ok == true {
|
||||
// not every platform might have exit codes
|
||||
// see https://groups.google.com/forum/#!topic/golang-nuts/MI4TyIkQqqg
|
||||
exitCode := exiterr.Sys().(interface {
|
||||
ExitStatus() int
|
||||
}).ExitStatus()
|
||||
// Return code 3 is the intended behaviour for buildozer when using "print" commands
|
||||
if exitCode != 3 {
|
||||
log.Print("Error in getStringField, command: ", cmd)
|
||||
logFatalErr(err)
|
||||
}
|
||||
} else {
|
||||
logFatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// remove trailing newline
|
||||
return strings.TrimSpace(out.String())
|
||||
}
|
||||
|
||||
func getListField(fieldName string, fileName string, ruleName string, workspaceContents []byte) []string {
|
||||
// buildozer 'print FIELDNAME_GOES_HERE' FILENAME_GOES_HERE:RULENAME_GOES_HERE <WORKSPACE
|
||||
// TODO: better failure message if buildozer is not in PATH
|
||||
cmd := exec.Command("buildozer", "print "+fieldName, fileName+":"+ruleName)
|
||||
wsreader := bytes.NewReader(workspaceContents)
|
||||
if fileName == "-" {
|
||||
// see edit.stdinPackageName why this is a "-"
|
||||
cmd.Stdin = wsreader
|
||||
}
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
exiterr, ok := err.(*exec.ExitError)
|
||||
if ok == true {
|
||||
// not every platform might have exit codes
|
||||
// see https://groups.google.com/forum/#!topic/golang-nuts/MI4TyIkQqqg
|
||||
exitCode := exiterr.Sys().(interface {
|
||||
ExitStatus() int
|
||||
}).ExitStatus()
|
||||
// Return code 3 is the intended behaviour for buildozer when using "print" commands
|
||||
if exitCode != 3 {
|
||||
log.Print("Error in getListField, command: ", cmd)
|
||||
logFatalErr(err)
|
||||
}
|
||||
} else {
|
||||
logFatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
trimmedOut := strings.TrimSpace(out.String())
|
||||
if trimmedOut == "(missing)" {
|
||||
return []string{}
|
||||
}
|
||||
var resultlist []string
|
||||
// remove trailing newline, remove [] and split at spaces
|
||||
returnlist := strings.Split(strings.Replace(strings.Trim(trimmedOut, "[]"), "\n", ",", -1), " ")
|
||||
// also split at commas
|
||||
for _, result := range returnlist {
|
||||
resultlist = append(resultlist, strings.Split(result, ",")...)
|
||||
}
|
||||
|
||||
// Example output for querying a missing field:
|
||||
// rule "//-:foo" has no attribute "bar"
|
||||
// (missing)
|
||||
if len(resultlist) == 2 {
|
||||
if resultlist[1] == "(missing)" {
|
||||
return []string{}
|
||||
}
|
||||
}
|
||||
|
||||
return resultlist
|
||||
}
|
||||
|
||||
func getMapField(fieldName string, fileName string, ruleName string, workspaceContents []byte) map[string]string {
|
||||
// buildozer 'print FIELDNAME_GOES_HERE' FILENAME_GOES_HERE:RULENAME_GOES_HERE <WORKSPACE
|
||||
cmd := exec.Command("buildozer", "print "+fieldName, fileName+":"+ruleName)
|
||||
wsreader := bytes.NewReader(workspaceContents)
|
||||
if fileName == "-" {
|
||||
// see edit.stdinPackageName why this is a "-"
|
||||
cmd.Stdin = wsreader
|
||||
}
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
exiterr, ok := err.(*exec.ExitError)
|
||||
if ok == true {
|
||||
// not every platform might have exit codes
|
||||
// see https://groups.google.com/forum/#!topic/golang-nuts/MI4TyIkQqqg
|
||||
exitCode := exiterr.Sys().(interface {
|
||||
ExitStatus() int
|
||||
}).ExitStatus()
|
||||
// Return code 3 is the intended behaviour for buildozer when using "print" commands
|
||||
if exitCode != 3 {
|
||||
log.Print("Error in getMapField, command: ", cmd)
|
||||
logFatalErr(err)
|
||||
}
|
||||
} else {
|
||||
logFatalErr(err)
|
||||
}
|
||||
}
|
||||
m := make(map[string]string)
|
||||
|
||||
for _, line := range strings.Split(out.String(), "\n") {
|
||||
var key string
|
||||
for i, token := range strings.Split(strings.Trim(strings.TrimSpace(line), ",{}"), ":") {
|
||||
if i%2 == 0 {
|
||||
// new key
|
||||
key = strings.Trim(token, " \"")
|
||||
} else {
|
||||
// value (new key was set in previous iteration)
|
||||
m[key] = strings.Trim(token, " \"")
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func getAllLabels(labelName string, fileName string, ruleName string, workspaceContents []byte) map[string][]string {
|
||||
// buildozer 'print label LABELNAME_GOES_HERE' FILENAME_GOES_HERE:RULENAME_GOES_HERE <WORKSPACE
|
||||
cmd := exec.Command("buildozer", "print label "+labelName, fileName+":"+ruleName)
|
||||
wsreader := bytes.NewReader(workspaceContents)
|
||||
if fileName == "-" {
|
||||
// see edit.stdinPackageName why this is a "-"
|
||||
cmd.Stdin = wsreader
|
||||
}
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
exiterr, ok := err.(*exec.ExitError)
|
||||
if ok == true {
|
||||
// not every platform might have exit codes
|
||||
// see https://groups.google.com/forum/#!topic/golang-nuts/MI4TyIkQqqg
|
||||
exitCode := exiterr.Sys().(interface {
|
||||
ExitStatus() int
|
||||
}).ExitStatus()
|
||||
// Return code 3 is the intended behaviour for buildozer when using "print" commands
|
||||
if exitCode != 3 {
|
||||
log.Print("Error in getAllLabels, command: ", cmd)
|
||||
logFatalErr(err)
|
||||
}
|
||||
} else {
|
||||
logFatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// output is quite messed up... best indication for useful lines is that a line ending in "," contains stuff we look for.
|
||||
pkgs := make(map[string][]string)
|
||||
|
||||
for _, line := range strings.Split(out.String(), "\n") {
|
||||
if strings.HasSuffix(line, ",") {
|
||||
name := strings.TrimSpace(strings.Split(line, "[")[0])
|
||||
pkgs[name] = appendUniq(pkgs[name], strings.Trim(strings.TrimSpace(strings.Split(line, "[")[1]), "\",]"))
|
||||
}
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
func setStringField(fieldName string, fieldContents string, fileName string, ruleName string, workspaceContents []byte, forceTable *string) string {
|
||||
// buildozer 'set FIELDNAME_GOES_HERE FIELDCONTENTS_GO_HERE' FILENAME_GOES_HERE:RULENAME_GOES_HERE <WORKSPACE
|
||||
// log.Printf("(setStringField) buildozer 'set %s %s' %s:%s\n", fieldName, fieldContents, fileName, ruleName)
|
||||
var cmd *exec.Cmd
|
||||
if forceTable != nil {
|
||||
dir, err := ioutil.TempDir("", "table_hack")
|
||||
defer os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
logFatalErr(err)
|
||||
}
|
||||
tableFile := filepath.Join(dir, "force_table.json")
|
||||
if err := ioutil.WriteFile(tableFile, []byte(*forceTable), 0666); err != nil {
|
||||
logFatalErr(err)
|
||||
}
|
||||
cmd = exec.Command("buildozer", "-add_tables="+tableFile, "set "+fieldName+" "+fieldContents, fileName+":"+ruleName)
|
||||
} else {
|
||||
cmd = exec.Command("buildozer", "set "+fieldName+" "+fieldContents, fileName+":"+ruleName)
|
||||
}
|
||||
wsreader := bytes.NewReader(workspaceContents)
|
||||
if fileName == "-" {
|
||||
// see edit.stdinPackageName why this is a "-"
|
||||
cmd.Stdin = wsreader
|
||||
}
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
exiterr, ok := err.(*exec.ExitError)
|
||||
if ok == true {
|
||||
// not every platform might have exit codes
|
||||
// see https://groups.google.com/forum/#!topic/golang-nuts/MI4TyIkQqqg
|
||||
exitCode := exiterr.Sys().(interface {
|
||||
ExitStatus() int
|
||||
}).ExitStatus()
|
||||
// Return code 3 is the intended behaviour for buildozer when using "set" commands that don't change anything
|
||||
if exitCode != 3 {
|
||||
log.Print("Error in setStringField, command: ", cmd)
|
||||
logFatalErr(err)
|
||||
}
|
||||
} else {
|
||||
logFatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func updateWorkspaceRule(workspaceContents []byte, rule string) string {
|
||||
arch := getStringField("arch", "-", rule, workspaceContents)
|
||||
distroType := getStringField("distro_type", "-", rule, workspaceContents)
|
||||
distro := getStringField("distro", "-", rule, workspaceContents)
|
||||
mirrors := getListField("mirrors", "-", rule, workspaceContents)
|
||||
components := getListField("components", "-", rule, workspaceContents)
|
||||
packages := getMapField("packages", "-", rule, workspaceContents)
|
||||
packagesSha256 := getMapField("packages_sha256", "-", rule, workspaceContents)
|
||||
pgpKeyRuleName := getStringField("pgp_key", "-", rule, workspaceContents)
|
||||
|
||||
packageNames := make([]string, 0, len(packages))
|
||||
for p := range packages {
|
||||
packageNames = append(packageNames, p)
|
||||
}
|
||||
sort.Strings(packageNames)
|
||||
|
||||
packageShaNames := make([]string, 0, len(packagesSha256))
|
||||
for p := range packages {
|
||||
packageShaNames = append(packageShaNames, p)
|
||||
}
|
||||
sort.Strings(packageShaNames)
|
||||
if reflect.DeepEqual(packageNames, packageShaNames) == false {
|
||||
log.Fatalf("Mismatch between package names in packages and packages_sha256 in rule %s.\npackages: %s\npackages_sha256: %s", rule, packageNames, packageShaNames)
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
projectName := path.Base(wd)
|
||||
pgpKeyname := path.Join("bazel-"+projectName, "external", pgpKeyRuleName, "file", "downloaded")
|
||||
|
||||
allPackages := getPackages(arch, distroType, distro, mirrors, components, pgpKeyname)
|
||||
|
||||
newPackages := make(map[string]string)
|
||||
newPackagesSha256 := make(map[string]string)
|
||||
|
||||
for _, pack := range packageNames {
|
||||
packlist := strings.Split(pack, "=")
|
||||
var packname string
|
||||
var packversion string
|
||||
var targetVersion version.Version
|
||||
if len(packlist) > 1 && packlist[1] != "latest" {
|
||||
packname = packlist[0]
|
||||
packversion = packlist[1]
|
||||
var err error
|
||||
targetVersion, err = version.NewVersion(packlist[1])
|
||||
logFatalErr(err)
|
||||
} else {
|
||||
packname = packlist[0]
|
||||
packversion = "latest"
|
||||
var err error
|
||||
targetVersion, err = version.NewVersion("0")
|
||||
logFatalErr(err)
|
||||
}
|
||||
|
||||
done := false
|
||||
for _, pkg := range allPackages {
|
||||
if pkg["Package"] == packname {
|
||||
currentVersion, err := version.NewVersion(pkg["Version"])
|
||||
logFatalErr(err)
|
||||
if packversion == "latest" {
|
||||
// iterate over all packages and keep the highest version
|
||||
if targetVersion.LessThan(currentVersion) {
|
||||
newPackages[pack] = pkg["Filename"]
|
||||
newPackagesSha256[pack] = pkg["SHA256"]
|
||||
targetVersion = currentVersion
|
||||
done = true
|
||||
}
|
||||
} else {
|
||||
// version is fixed, break once found
|
||||
if targetVersion.Equal(currentVersion) {
|
||||
newPackages[pack] = pkg["Filename"]
|
||||
newPackagesSha256[pack] = pkg["SHA256"]
|
||||
done = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if done == false {
|
||||
log.Fatalf("Package %s isn't available in %s (rule: %s)", pack, distro, rule)
|
||||
}
|
||||
}
|
||||
|
||||
pkgstring, err := json.Marshal(newPackages)
|
||||
logFatalErr(err)
|
||||
pkgshastring, err := json.Marshal(newPackagesSha256)
|
||||
logFatalErr(err)
|
||||
|
||||
// set packages
|
||||
workspaceContents = []byte(setStringField("packages", string(pkgstring), "-", rule, workspaceContents, &FORCE_PACKAGE_IDENT))
|
||||
// set packages_sha256
|
||||
workspaceContents = []byte(setStringField("packages_sha256", string(pkgshastring), "-", rule, workspaceContents, nil))
|
||||
// final run that just replaces a known value with itself to make sure the output is prettyfied
|
||||
workspaceContents = []byte(setStringField("distro", "\""+distro+"\"", "-", rule, workspaceContents, nil))
|
||||
|
||||
return string(workspaceContents)
|
||||
}
|
||||
|
||||
func updateWorkspace(workspaceContents []byte) string {
|
||||
rules := getListField("name", "-", "%deb_packages", workspaceContents)
|
||||
cleanedRules := make([]string, len(rules))
|
||||
copy(cleanedRules, rules)
|
||||
|
||||
for i, rule := range rules {
|
||||
tags := getListField("tags", "-", rule, workspaceContents)
|
||||
for _, tag := range tags {
|
||||
// drop rules with the "manual_update" tag
|
||||
if tag == "manual_update" {
|
||||
cleanedRules = append(cleanedRules[:i], cleanedRules[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, rule := range cleanedRules {
|
||||
workspaceContents = []byte(updateWorkspaceRule(workspaceContents, rule))
|
||||
}
|
||||
return string(workspaceContents)
|
||||
}
|
||||
|
||||
// add new package names to WORKSPACE rule
|
||||
func addNewPackagesToWorkspace(workspaceContents []byte) string {
|
||||
// TODO: add more rule types here if necessary
|
||||
// e.g. cacerts()
|
||||
allDebs := make(map[string][]string)
|
||||
for _, rule_type := range []string{"container_layer", "container_image"} {
|
||||
tmp := getAllLabels("debs", "//...", "%"+rule_type, workspaceContents)
|
||||
for k, _ := range tmp {
|
||||
if _, ok := allDebs[k]; !ok {
|
||||
allDebs[k] = make([]string, 0)
|
||||
}
|
||||
for _, pack := range tmp[k] {
|
||||
allDebs[k] = appendUniq(allDebs[k], pack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for rule := range allDebs {
|
||||
tags := getListField("tags", "-", rule, workspaceContents)
|
||||
for _, tag := range tags {
|
||||
// drop rules with the "manual_update" tag
|
||||
if tag == "manual_update" {
|
||||
delete(allDebs, rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for rule, debs := range allDebs {
|
||||
packages := getMapField("packages", "-", rule, workspaceContents)
|
||||
packagesSha256 := getMapField("packages_sha256", "-", rule, workspaceContents)
|
||||
for _, deb := range debs {
|
||||
packages[deb] = "placeholder"
|
||||
packagesSha256[deb] = "placeholder"
|
||||
}
|
||||
|
||||
pkgstring, err := json.Marshal(packages)
|
||||
logFatalErr(err)
|
||||
pkgshastring, err := json.Marshal(packagesSha256)
|
||||
logFatalErr(err)
|
||||
|
||||
// set packages
|
||||
workspaceContents = []byte(setStringField("packages", string(pkgstring), "-", rule, workspaceContents, &FORCE_PACKAGE_IDENT))
|
||||
// set packages_sha256
|
||||
workspaceContents = []byte(setStringField("packages_sha256", string(pkgshastring), "-", rule, workspaceContents, nil))
|
||||
}
|
||||
|
||||
return string(workspaceContents)
|
||||
}
|
||||
|
||||
// update WORKSPACE rule with new paths/hashes from mirrors
|
||||
func main() {
|
||||
workspacefile, err := os.Open("WORKSPACE")
|
||||
logFatalErr(err)
|
||||
wscontent, err := ioutil.ReadAll(workspacefile)
|
||||
logFatalErr(err)
|
||||
workspacefile.Close()
|
||||
|
||||
err = ioutil.WriteFile("WORKSPACE", []byte(updateWorkspace([]byte(addNewPackagesToWorkspace(wscontent)))), 0664)
|
||||
logFatalErr(err)
|
||||
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
# Copyright 2017 mgIT GmbH All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
_script_content = """
|
||||
BASE=$(pwd)
|
||||
WORKSPACE=$(dirname $(readlink WORKSPACE))
|
||||
cd "$WORKSPACE"
|
||||
$BASE/{update_deb_packages} {args} $@
|
||||
"""
|
||||
|
||||
def _update_deb_packages_script_impl(ctx):
|
||||
args = ctx.attr.args
|
||||
script_content = _script_content.format(update_deb_packages=ctx.file._update_deb_packages.short_path, args=" ".join(args))
|
||||
script_file = ctx.new_file(ctx.label.name+".bash")
|
||||
ctx.file_action(output=script_file, executable=True, content=script_content)
|
||||
return struct(
|
||||
files = depset([script_file]),
|
||||
runfiles = ctx.runfiles([ctx.file._update_deb_packages])
|
||||
)
|
||||
|
||||
_update_deb_packages_script = rule(
|
||||
_update_deb_packages_script_impl,
|
||||
attrs = {
|
||||
"args": attr.string_list(),
|
||||
"pgp_keys": attr.label_list(),
|
||||
"_update_deb_packages": attr.label(
|
||||
default = Label("@rules_pkg//tools/update_deb_packages/src:update_deb_packages"),
|
||||
allow_single_file = True,
|
||||
executable = True,
|
||||
cfg = "host",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def update_deb_packages(name, pgp_keys, **kwargs):
|
||||
script_name = name+"_script"
|
||||
_update_deb_packages_script(
|
||||
name = script_name,
|
||||
tags = ["manual"],
|
||||
**kwargs
|
||||
)
|
||||
native.sh_binary(
|
||||
name = name,
|
||||
srcs = [script_name],
|
||||
data = ["//:WORKSPACE"] + pgp_keys,
|
||||
tags = ["manual"],
|
||||
)
|
|
@ -71,9 +71,6 @@ organization will commit to maintaining that feature and responding to issues.
|
|||
- <root> contains shims for the .bzl files in pkg/*.bzl. They add backwards
|
||||
compatibility to to older releases.
|
||||
|
||||
Additionally:
|
||||
- deb_packages/... is defunct and unsupported.
|
||||
|
||||
### Starlark style
|
||||
|
||||
- We are moving towards fully generated docs. If you touch an attribute, you
|
||||
|
|
Loading…
Reference in New Issue