Vendor sync

This commit is contained in:
Shengjing Zhu 2020-08-19 23:57:52 +08:00
parent 7a4f48795d
commit 311fe43b3c
243 changed files with 12541 additions and 5421 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
test/test
test/piggie
test/phaul
image
rpc/rpc.proto

View File

@ -0,0 +1,28 @@
language: go
dist: bionic
os:
- linux
go:
- "1.14.x"
- "1.13.x"
- tip
env:
# Run the tests with CRIU master and criu-dev
- CRIU_BRANCH="master"
- CRIU_BRANCH="criu-dev"
install:
- sudo apt-get update
- sudo apt-get install -y libprotobuf-dev libprotobuf-c0-dev protobuf-c-compiler protobuf-compiler python-protobuf libnl-3-dev libnet-dev libcap-dev
- make install.tools
- go get github.com/checkpoint-restore/go-criu
- git clone --single-branch -b ${CRIU_BRANCH} https://github.com/checkpoint-restore/criu.git
- cd criu; make
- sudo install -D -m 755 criu/criu /usr/sbin/
- cd ..
script:
# This builds the code without running the tests.
- make lint build phaul test/test test/phaul test/piggie
# Run actual test as root as it uses CRIU.
- sudo make test phaul-test
# This builds crit-go
- make -C crit-go/magic-gen lint build magicgen test

View File

@ -0,0 +1,60 @@
GO ?= go
CC ?= gcc
ifeq ($(GOPATH),)
export GOPATH := $(shell $(GO) env GOPATH)
endif
FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH)))
GOBIN := $(shell $(GO) env GOBIN)
ifeq ($(GOBIN),)
GOBIN := $(FIRST_GOPATH)/bin
endif
all: build test phaul phaul-test
lint:
@golint -set_exit_status . test phaul
build:
@$(GO) build -v
test/piggie: test/piggie.c
@$(CC) $^ -o $@
test/test: test/main.go
@$(GO) build -v -o test/test test/main.go
test: test/test test/piggie
mkdir -p image
test/piggie
test/test dump `pidof piggie` image
test/test restore image
pkill -9 piggie || :
phaul:
@cd phaul; go build -v
test/phaul: test/phaul-main.go
@$(GO) build -v -o test/phaul test/phaul-main.go
phaul-test: test/phaul test/piggie
rm -rf image
test/piggie
test/phaul `pidof piggie`
pkill -9 piggie || :
clean:
@rm -f test/test test/piggie test/phaul
@rm -rf image
@rm -f rpc/rpc.proto
install.tools:
if [ ! -x "$(GOBIN)/golint" ]; then \
$(GO) get -u golang.org/x/lint/golint; \
fi
rpc/rpc.proto:
curl -s https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/rpc.proto -o $@
rpc/rpc.pb.go: rpc/rpc.proto
protoc --go_out=. $^
.PHONY: build test clean lint phaul

View File

@ -0,0 +1,75 @@
[![master](https://travis-ci.org/checkpoint-restore/go-criu.svg?branch=master)](https://travis-ci.org/checkpoint-restore/go-criu)
## go-criu -- Go bindings for [CRIU](https://criu.org/)
This repository provides Go bindings for CRIU. The code is based on the Go based PHaul
implementation from the CRIU repository. For easier inclusion into other Go projects the
CRIU Go bindings have been moved to this repository.
The Go bindings provide an easy way to use the CRIU RPC calls from Go without the need
to set up all the infrastructure to make the actual RPC connection to CRIU.
The following example would print the version of CRIU:
```
c := criu.MakeCriu()
version, err := c.GetCriuVersion()
fmt.Println(version)
```
or to just check if at least a certain CRIU version is installed:
```
c := criu.MakeCriu()
result, err := c.IsCriuAtLeast(31100)
```
## Releases
The first go-criu release was 3.11 based on CRIU 3.11. The initial plan
was to follow CRIU so that go-criu would carry the same version number as
CRIU.
As go-criu is imported in other projects and as Go modules are expected
to follow Semantic Versioning go-criu will also follow Semantic Versioning
starting with the 4.0.0 release.
4.0.0 is based on CRIU 3.14
## How to contribute
While bug fixes can first be identified via an "issue", that is not required.
It's ok to just open up a PR with the fix, but make sure you include the same
information you would have included in an issue - like how to reproduce it.
PRs for new features should include some background on what use cases the
new code is trying to address. When possible and when it makes sense, try to
break-up larger PRs into smaller ones - it's easier to review smaller
code changes. But only if those smaller ones make sense as stand-alone PRs.
Regardless of the type of PR, all PRs should include:
* well documented code changes
* additional testcases. Ideally, they should fail w/o your code change applied
* documentation changes
Squash your commits into logical pieces of work that might want to be reviewed
separate from the rest of the PRs. Ideally, each commit should implement a
single idea, and the PR branch should pass the tests at every commit. GitHub
makes it easy to review the cumulative effect of many commits; so, when in
doubt, use smaller commits.
PRs that fix issues should include a reference like `Closes #XXXX` in the
commit message so that github will automatically close the referenced issue
when the PR is merged.
Contributors must assert that they are in compliance with the [Developer
Certificate of Origin 1.1](http://developercertificate.org/). This is achieved
by adding a "Signed-off-by" line containing the contributor's name and e-mail
to every commit message. Your signature certifies that you wrote the patch or
otherwise have the right to pass it on as an open-source patch.
### License and copyright
Unless mentioned otherwise in a specific file's header, all code in
this project is released under the Apache 2.0 license.
The author of a change remains the copyright holder of their code
(no copyright assignment). The list of authors and contributors can be
retrieved from the git commit history and in some cases, the file headers.

View File

@ -0,0 +1,5 @@
module github.com/checkpoint-restore/go-criu/v4
go 1.13
require github.com/golang/protobuf v1.3.5

View File

@ -0,0 +1,2 @@
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=

259
vendor/github.com/checkpoint-restore/go-criu/v4/main.go generated vendored Normal file
View File

@ -0,0 +1,259 @@
package criu
import (
"errors"
"fmt"
"os"
"os/exec"
"strconv"
"syscall"
"github.com/checkpoint-restore/go-criu/v4/rpc"
"github.com/golang/protobuf/proto"
)
// Criu struct
type Criu struct {
swrkCmd *exec.Cmd
swrkSk *os.File
swrkPath string
}
// MakeCriu returns the Criu object required for most operations
func MakeCriu() *Criu {
return &Criu{
swrkPath: "criu",
}
}
// SetCriuPath allows setting the path to the CRIU binary
// if it is in a non standard location
func (c *Criu) SetCriuPath(path string) {
c.swrkPath = path
}
// Prepare sets up everything for the RPC communication to CRIU
func (c *Criu) Prepare() error {
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET, 0)
if err != nil {
return err
}
cln := os.NewFile(uintptr(fds[0]), "criu-xprt-cln")
syscall.CloseOnExec(fds[0])
srv := os.NewFile(uintptr(fds[1]), "criu-xprt-srv")
defer srv.Close()
args := []string{"swrk", strconv.Itoa(fds[1])}
cmd := exec.Command(c.swrkPath, args...)
err = cmd.Start()
if err != nil {
cln.Close()
return err
}
c.swrkCmd = cmd
c.swrkSk = cln
return nil
}
// Cleanup cleans up
func (c *Criu) Cleanup() {
if c.swrkCmd != nil {
c.swrkSk.Close()
c.swrkSk = nil
c.swrkCmd.Wait()
c.swrkCmd = nil
}
}
func (c *Criu) sendAndRecv(reqB []byte) ([]byte, int, error) {
cln := c.swrkSk
_, err := cln.Write(reqB)
if err != nil {
return nil, 0, err
}
respB := make([]byte, 2*4096)
n, err := cln.Read(respB)
if err != nil {
return nil, 0, err
}
return respB, n, nil
}
func (c *Criu) doSwrk(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) error {
resp, err := c.doSwrkWithResp(reqType, opts, nfy)
if err != nil {
return err
}
respType := resp.GetType()
if respType != reqType {
return errors.New("unexpected responce")
}
return nil
}
func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) (*rpc.CriuResp, error) {
var resp *rpc.CriuResp
req := rpc.CriuReq{
Type: &reqType,
Opts: opts,
}
if nfy != nil {
opts.NotifyScripts = proto.Bool(true)
}
if c.swrkCmd == nil {
err := c.Prepare()
if err != nil {
return nil, err
}
defer c.Cleanup()
}
for {
reqB, err := proto.Marshal(&req)
if err != nil {
return nil, err
}
respB, respS, err := c.sendAndRecv(reqB)
if err != nil {
return nil, err
}
resp = &rpc.CriuResp{}
err = proto.Unmarshal(respB[:respS], resp)
if err != nil {
return nil, err
}
if !resp.GetSuccess() {
return resp, fmt.Errorf("operation failed (msg:%s err:%d)",
resp.GetCrErrmsg(), resp.GetCrErrno())
}
respType := resp.GetType()
if respType != rpc.CriuReqType_NOTIFY {
break
}
if nfy == nil {
return resp, errors.New("unexpected notify")
}
notify := resp.GetNotify()
switch notify.GetScript() {
case "pre-dump":
err = nfy.PreDump()
case "post-dump":
err = nfy.PostDump()
case "pre-restore":
err = nfy.PreRestore()
case "post-restore":
err = nfy.PostRestore(notify.GetPid())
case "network-lock":
err = nfy.NetworkLock()
case "network-unlock":
err = nfy.NetworkUnlock()
case "setup-namespaces":
err = nfy.SetupNamespaces(notify.GetPid())
case "post-setup-namespaces":
err = nfy.PostSetupNamespaces()
case "post-resume":
err = nfy.PostResume()
default:
err = nil
}
if err != nil {
return resp, err
}
req = rpc.CriuReq{
Type: &respType,
NotifySuccess: proto.Bool(true),
}
}
return resp, nil
}
// Dump dumps a process
func (c *Criu) Dump(opts rpc.CriuOpts, nfy Notify) error {
return c.doSwrk(rpc.CriuReqType_DUMP, &opts, nfy)
}
// Restore restores a process
func (c *Criu) Restore(opts rpc.CriuOpts, nfy Notify) error {
return c.doSwrk(rpc.CriuReqType_RESTORE, &opts, nfy)
}
// PreDump does a pre-dump
func (c *Criu) PreDump(opts rpc.CriuOpts, nfy Notify) error {
return c.doSwrk(rpc.CriuReqType_PRE_DUMP, &opts, nfy)
}
// StartPageServer starts the page server
func (c *Criu) StartPageServer(opts rpc.CriuOpts) error {
return c.doSwrk(rpc.CriuReqType_PAGE_SERVER, &opts, nil)
}
// StartPageServerChld starts the page server and returns PID and port
func (c *Criu) StartPageServerChld(opts rpc.CriuOpts) (int, int, error) {
resp, err := c.doSwrkWithResp(rpc.CriuReqType_PAGE_SERVER_CHLD, &opts, nil)
if err != nil {
return 0, 0, err
}
return int(resp.Ps.GetPid()), int(resp.Ps.GetPort()), nil
}
// GetCriuVersion executes the VERSION RPC call and returns the version
// as an integer. Major * 10000 + Minor * 100 + SubLevel
func (c *Criu) GetCriuVersion() (int, error) {
resp, err := c.doSwrkWithResp(rpc.CriuReqType_VERSION, nil, nil)
if err != nil {
return 0, err
}
if resp.GetType() != rpc.CriuReqType_VERSION {
return 0, fmt.Errorf("Unexpected CRIU RPC response")
}
version := int(*resp.GetVersion().MajorNumber) * 10000
version += int(*resp.GetVersion().MinorNumber) * 100
if resp.GetVersion().Sublevel != nil {
version += int(*resp.GetVersion().Sublevel)
}
if resp.GetVersion().Gitid != nil {
// taken from runc: if it is a git release -> increase minor by 1
version -= (version % 100)
version += 100
}
return version, nil
}
// IsCriuAtLeast checks if the version is at least the same
// as the parameter version
func (c *Criu) IsCriuAtLeast(version int) (bool, error) {
criuVersion, err := c.GetCriuVersion()
if err != nil {
return false, err
}
if criuVersion >= version {
return true, nil
}
return false, nil
}

View File

@ -0,0 +1,63 @@
package criu
//Notify interface
type Notify interface {
PreDump() error
PostDump() error
PreRestore() error
PostRestore(pid int32) error
NetworkLock() error
NetworkUnlock() error
SetupNamespaces(pid int32) error
PostSetupNamespaces() error
PostResume() error
}
// NoNotify struct
type NoNotify struct {
}
// PreDump NoNotify
func (c NoNotify) PreDump() error {
return nil
}
// PostDump NoNotify
func (c NoNotify) PostDump() error {
return nil
}
// PreRestore NoNotify
func (c NoNotify) PreRestore() error {
return nil
}
// PostRestore NoNotify
func (c NoNotify) PostRestore(pid int32) error {
return nil
}
// NetworkLock NoNotify
func (c NoNotify) NetworkLock() error {
return nil
}
// NetworkUnlock NoNotify
func (c NoNotify) NetworkUnlock() error {
return nil
}
// SetupNamespaces NoNotify
func (c NoNotify) SetupNamespaces(pid int32) error {
return nil
}
// PostSetupNamespaces NoNotify
func (c NoNotify) PostSetupNamespaces() error {
return nil
}
// PostResume NoNotify
func (c NoNotify) PostResume() error {
return nil
}

File diff suppressed because it is too large Load Diff

15
vendor/github.com/cilium/ebpf/abi.go generated vendored
View File

@ -3,14 +3,13 @@ package ebpf
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"syscall"
"github.com/cilium/ebpf/internal"
"golang.org/x/xerrors"
)
// MapABI are the attributes of a Map which are available across all supported kernels.
@ -35,7 +34,7 @@ func newMapABIFromSpec(spec *MapSpec) *MapABI {
func newMapABIFromFd(fd *internal.FD) (string, *MapABI, error) {
info, err := bpfGetMapInfoByFD(fd)
if err != nil {
if xerrors.Is(err, syscall.EINVAL) {
if errors.Is(err, syscall.EINVAL) {
abi, err := newMapABIFromProc(fd)
return "", abi, err
}
@ -98,7 +97,7 @@ func newProgramABIFromSpec(spec *ProgramSpec) *ProgramABI {
func newProgramABIFromFd(fd *internal.FD) (string, *ProgramABI, error) {
info, err := bpfGetProgInfoByFD(fd)
if err != nil {
if xerrors.Is(err, syscall.EINVAL) {
if errors.Is(err, syscall.EINVAL) {
return newProgramABIFromProc(fd)
}
@ -127,7 +126,7 @@ func newProgramABIFromProc(fd *internal.FD) (string, *ProgramABI, error) {
"prog_type": &abi.Type,
"prog_tag": &name,
})
if xerrors.Is(err, errMissingFields) {
if errors.Is(err, errMissingFields) {
return "", nil, &internal.UnsupportedFeatureError{
Name: "reading ABI from /proc/self/fdinfo",
MinimumVersion: internal.Version{4, 11, 0},
@ -153,12 +152,12 @@ func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
defer fh.Close()
if err := scanFdInfoReader(fh, fields); err != nil {
return xerrors.Errorf("%s: %w", fh.Name(), err)
return fmt.Errorf("%s: %w", fh.Name(), err)
}
return nil
}
var errMissingFields = xerrors.New("missing fields")
var errMissingFields = errors.New("missing fields")
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
var (
@ -179,7 +178,7 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
}
if n, err := fmt.Fscanln(bytes.NewReader(parts[1]), field); err != nil || n != 1 {
return xerrors.Errorf("can't parse field %s: %v", name, err)
return fmt.Errorf("can't parse field %s: %v", name, err)
}
scanned++

View File

@ -2,12 +2,11 @@ package asm
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"strings"
"golang.org/x/xerrors"
)
// InstructionSize is the size of a BPF instruction in bytes
@ -43,7 +42,7 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
ins.Constant = int64(bi.Constant)
ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
if err != nil {
return 0, xerrors.Errorf("can't unmarshal registers: %s", err)
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
}
if !bi.OpCode.isDWordLoad() {
@ -53,10 +52,10 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
var bi2 bpfInstruction
if err := binary.Read(r, bo, &bi2); err != nil {
// No Wrap, to avoid io.EOF clash
return 0, xerrors.New("64bit immediate is missing second half")
return 0, errors.New("64bit immediate is missing second half")
}
if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 {
return 0, xerrors.New("64bit immediate has non-zero fields")
return 0, errors.New("64bit immediate has non-zero fields")
}
ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant)))
@ -66,7 +65,7 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
// Marshal encodes a BPF instruction.
func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
if ins.OpCode == InvalidOpCode {
return 0, xerrors.New("invalid opcode")
return 0, errors.New("invalid opcode")
}
isDWordLoad := ins.OpCode.isDWordLoad()
@ -79,7 +78,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
if err != nil {
return 0, xerrors.Errorf("can't marshal registers: %s", err)
return 0, fmt.Errorf("can't marshal registers: %s", err)
}
bpfi := bpfInstruction{
@ -113,11 +112,11 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
// Returns an error if the instruction doesn't load a map.
func (ins *Instruction) RewriteMapPtr(fd int) error {
if !ins.OpCode.isDWordLoad() {
return xerrors.Errorf("%s is not a 64 bit load", ins.OpCode)
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
return xerrors.New("not a load from a map")
return errors.New("not a load from a map")
}
// Preserve the offset value for direct map loads.
@ -136,11 +135,11 @@ func (ins *Instruction) mapPtr() uint32 {
// Returns an error if the instruction is not a direct load.
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
if !ins.OpCode.isDWordLoad() {
return xerrors.Errorf("%s is not a 64 bit load", ins.OpCode)
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
if ins.Src != PseudoMapValue {
return xerrors.New("not a direct load from a map")
return errors.New("not a direct load from a map")
}
fd := uint64(ins.Constant) & math.MaxUint32
@ -251,7 +250,7 @@ func (insns Instructions) String() string {
// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
if symbol == "" {
return xerrors.New("empty symbol")
return errors.New("empty symbol")
}
found := false
@ -286,7 +285,7 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) {
}
if _, ok := offsets[ins.Symbol]; ok {
return nil, xerrors.Errorf("duplicate symbol %s", ins.Symbol)
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
}
offsets[ins.Symbol] = i
@ -324,7 +323,7 @@ func (insns Instructions) marshalledOffsets() (map[string]int, error) {
}
if _, ok := symbols[ins.Symbol]; ok {
return nil, xerrors.Errorf("duplicate symbol %s", ins.Symbol)
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
}
symbols[ins.Symbol] = currentPos
@ -405,7 +404,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
// Rewrite bpf to bpf call
offset, ok := absoluteOffsets[ins.Reference]
if !ok {
return xerrors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
}
ins.Constant = int64(offset - num - 1)
@ -414,7 +413,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
// Rewrite jump to label
offset, ok := absoluteOffsets[ins.Reference]
if !ok {
return xerrors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
}
ins.Offset = int16(offset - num - 1)
@ -422,7 +421,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
n, err := ins.Marshal(w, bo)
if err != nil {
return xerrors.Errorf("instruction %d: %w", i, err)
return fmt.Errorf("instruction %d: %w", i, err)
}
num += int(n / InstructionSize)
@ -446,7 +445,7 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro
case binary.BigEndian:
return bpfRegisters((dst << 4) | (src & 0xF)), nil
default:
return 0, xerrors.Errorf("unrecognized ByteOrder %T", bo)
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
}
}
@ -457,7 +456,7 @@ func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err err
case binary.BigEndian:
return Register(r >> 4), Register(r & 0xf), nil
default:
return 0, 0, xerrors.Errorf("unrecognized ByteOrder %T", bo)
return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
}
}

View File

@ -1,12 +1,13 @@
package ebpf
import (
"errors"
"fmt"
"math"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"golang.org/x/xerrors"
)
// CollectionOptions control loading a collection into the kernel.
@ -64,12 +65,12 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
// Not all programs need to use the map
default:
return xerrors.Errorf("program %s: %w", progName, err)
return fmt.Errorf("program %s: %w", progName, err)
}
}
if !seen {
return xerrors.Errorf("map %s not referenced by any programs", symbol)
return fmt.Errorf("map %s not referenced by any programs", symbol)
}
// Prevent NewCollection from creating rewritten maps
@ -96,21 +97,21 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
rodata := cs.Maps[".rodata"]
if rodata == nil {
return xerrors.New("missing .rodata section")
return errors.New("missing .rodata section")
}
if rodata.BTF == nil {
return xerrors.New(".rodata section has no BTF")
return errors.New(".rodata section has no BTF")
}
if n := len(rodata.Contents); n != 1 {
return xerrors.Errorf("expected one key in .rodata, found %d", n)
return fmt.Errorf("expected one key in .rodata, found %d", n)
}
kv := rodata.Contents[0]
value, ok := kv.Value.([]byte)
if !ok {
return xerrors.Errorf("first value in .rodata is %T not []byte", kv.Value)
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
}
buf := make([]byte, len(value))
@ -185,14 +186,14 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
var handle *btf.Handle
if mapSpec.BTF != nil {
handle, err = loadBTF(btf.MapSpec(mapSpec.BTF))
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, err
}
}
m, err := newMapWithBTF(mapSpec, handle)
if err != nil {
return nil, xerrors.Errorf("map %s: %w", mapName, err)
return nil, fmt.Errorf("map %s: %w", mapName, err)
}
maps[mapName] = m
}
@ -216,29 +217,29 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
m := maps[ins.Reference]
if m == nil {
return nil, xerrors.Errorf("program %s: missing map %s", progName, ins.Reference)
return nil, fmt.Errorf("program %s: missing map %s", progName, ins.Reference)
}
fd := m.FD()
if fd < 0 {
return nil, xerrors.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
}
if err := ins.RewriteMapPtr(m.FD()); err != nil {
return nil, xerrors.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
}
}
var handle *btf.Handle
if progSpec.BTF != nil {
handle, err = loadBTF(btf.ProgramSpec(progSpec.BTF))
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, err
}
}
prog, err := newProgramWithBTF(progSpec, handle, opts.Programs)
if err != nil {
return nil, xerrors.Errorf("program %s: %w", progName, err)
return nil, fmt.Errorf("program %s: %w", progName, err)
}
progs[progName] = prog
}

View File

@ -4,6 +4,8 @@ import (
"bytes"
"debug/elf"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"os"
@ -13,8 +15,6 @@ import (
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
type elfCode struct {
@ -35,7 +35,7 @@ func LoadCollectionSpec(file string) (*CollectionSpec, error) {
spec, err := LoadCollectionSpecFromReader(f)
if err != nil {
return nil, xerrors.Errorf("file %s: %w", file, err)
return nil, fmt.Errorf("file %s: %w", file, err)
}
return spec, nil
}
@ -50,7 +50,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
symbols, err := f.Symbols()
if err != nil {
return nil, xerrors.Errorf("load symbols: %v", err)
return nil, fmt.Errorf("load symbols: %v", err)
}
ec := &elfCode{f, symbols, symbolsPerSection(symbols), "", 0}
@ -79,13 +79,13 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
dataSections[elf.SectionIndex(i)] = sec
case sec.Type == elf.SHT_REL:
if int(sec.Info) >= len(ec.Sections) {
return nil, xerrors.Errorf("found relocation section %v for missing section %v", i, sec.Info)
return nil, fmt.Errorf("found relocation section %v for missing section %v", i, sec.Info)
}
// Store relocations under the section index of the target
idx := elf.SectionIndex(sec.Info)
if relSections[idx] != nil {
return nil, xerrors.Errorf("section %d has multiple relocation sections", sec.Info)
return nil, fmt.Errorf("section %d has multiple relocation sections", sec.Info)
}
relSections[idx] = sec
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
@ -95,44 +95,52 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
ec.license, err = loadLicense(licenseSection)
if err != nil {
return nil, xerrors.Errorf("load license: %w", err)
return nil, fmt.Errorf("load license: %w", err)
}
ec.version, err = loadVersion(versionSection, ec.ByteOrder)
if err != nil {
return nil, xerrors.Errorf("load version: %w", err)
return nil, fmt.Errorf("load version: %w", err)
}
btfSpec, err := btf.LoadSpecFromReader(rd)
if err != nil {
return nil, xerrors.Errorf("load BTF: %w", err)
return nil, fmt.Errorf("load BTF: %w", err)
}
relocations, referencedSections, err := ec.loadRelocations(relSections)
if err != nil {
return nil, fmt.Errorf("load relocations: %w", err)
}
maps := make(map[string]*MapSpec)
if err := ec.loadMaps(maps, mapSections); err != nil {
return nil, xerrors.Errorf("load maps: %w", err)
return nil, fmt.Errorf("load maps: %w", err)
}
if len(btfMaps) > 0 {
if err := ec.loadBTFMaps(maps, btfMaps, btfSpec); err != nil {
return nil, xerrors.Errorf("load BTF maps: %w", err)
return nil, fmt.Errorf("load BTF maps: %w", err)
}
}
if len(dataSections) > 0 {
if err := ec.loadDataSections(maps, dataSections, btfSpec); err != nil {
return nil, xerrors.Errorf("load data sections: %w", err)
for idx := range dataSections {
if !referencedSections[idx] {
// Prune data sections which are not referenced by any
// instructions.
delete(dataSections, idx)
}
}
}
relocations, err := ec.loadRelocations(relSections)
if err != nil {
return nil, xerrors.Errorf("load relocations: %w", err)
if err := ec.loadDataSections(maps, dataSections, btfSpec); err != nil {
return nil, fmt.Errorf("load data sections: %w", err)
}
}
progs, err := ec.loadPrograms(progSections, relocations, btfSpec)
if err != nil {
return nil, xerrors.Errorf("load programs: %w", err)
return nil, fmt.Errorf("load programs: %w", err)
}
return &CollectionSpec{maps, progs}, nil
@ -140,11 +148,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
func loadLicense(sec *elf.Section) (string, error) {
if sec == nil {
return "", xerrors.New("missing license section")
return "", nil
}
data, err := sec.Data()
if err != nil {
return "", xerrors.Errorf("section %s: %v", sec.Name, err)
return "", fmt.Errorf("section %s: %v", sec.Name, err)
}
return string(bytes.TrimRight(data, "\000")), nil
}
@ -156,12 +165,12 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
var version uint32
if err := binary.Read(sec.Open(), bo, &version); err != nil {
return 0, xerrors.Errorf("section %s: %v", sec.Name, err)
return 0, fmt.Errorf("section %s: %v", sec.Name, err)
}
return version, nil
}
func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section, relocations map[elf.SectionIndex]map[uint64]elf.Symbol, btf *btf.Spec) (map[string]*ProgramSpec, error) {
func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section, relocations map[elf.SectionIndex]map[uint64]elf.Symbol, btfSpec *btf.Spec) (map[string]*ProgramSpec, error) {
var (
progs []*ProgramSpec
libs []*ProgramSpec
@ -170,17 +179,17 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
for idx, sec := range progSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
return nil, xerrors.Errorf("section %v: missing symbols", sec.Name)
return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
}
funcSym, ok := syms[0]
if !ok {
return nil, xerrors.Errorf("section %v: no label at start", sec.Name)
return nil, fmt.Errorf("section %v: no label at start", sec.Name)
}
insns, length, err := ec.loadInstructions(sec, syms, relocations[idx])
if err != nil {
return nil, xerrors.Errorf("program %s: can't unmarshal instructions: %w", funcSym.Name, err)
return nil, fmt.Errorf("program %s: can't unmarshal instructions: %w", funcSym.Name, err)
}
progType, attachType, attachTo := getProgType(sec.Name)
@ -196,10 +205,10 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
ByteOrder: ec.ByteOrder,
}
if btf != nil {
spec.BTF, err = btf.Program(sec.Name, length)
if err != nil {
return nil, xerrors.Errorf("BTF for section %s (program %s): %w", sec.Name, funcSym.Name, err)
if btfSpec != nil {
spec.BTF, err = btfSpec.Program(sec.Name, length)
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
}
}
@ -217,7 +226,7 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
for _, prog := range progs {
err := link(prog, libs)
if err != nil {
return nil, xerrors.Errorf("program %s: %w", prog.Name, err)
return nil, fmt.Errorf("program %s: %w", prog.Name, err)
}
res[prog.Name] = prog
}
@ -238,14 +247,14 @@ func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations m
return insns, offset, nil
}
if err != nil {
return nil, 0, xerrors.Errorf("offset %d: %w", offset, err)
return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
}
ins.Symbol = symbols[offset].Name
if rel, ok := relocations[offset]; ok {
if err = ec.relocateInstruction(&ins, rel); err != nil {
return nil, 0, xerrors.Errorf("offset %d: can't relocate instruction: %w", offset, err)
return nil, 0, fmt.Errorf("offset %d: can't relocate instruction: %w", offset, err)
}
}
@ -266,7 +275,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
// from the section itself.
idx := int(rel.Section)
if idx > len(ec.Sections) {
return xerrors.New("out-of-bounds section index")
return errors.New("out-of-bounds section index")
}
name = ec.Sections[idx].Name
@ -286,7 +295,7 @@ outer:
// section. Weirdly, the offset of the real symbol in the
// section is encoded in the instruction stream.
if bind != elf.STB_LOCAL {
return xerrors.Errorf("direct load: %s: unsupported relocation %s", name, bind)
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
}
// For some reason, clang encodes the offset of the symbol its
@ -308,13 +317,13 @@ outer:
case elf.STT_OBJECT:
if bind != elf.STB_GLOBAL {
return xerrors.Errorf("load: %s: unsupported binding: %s", name, bind)
return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
}
ins.Src = asm.PseudoMapFD
default:
return xerrors.Errorf("load: %s: unsupported relocation: %s", name, typ)
return fmt.Errorf("load: %s: unsupported relocation: %s", name, typ)
}
// Mark the instruction as needing an update when creating the
@ -325,18 +334,18 @@ outer:
case ins.OpCode.JumpOp() == asm.Call:
if ins.Src != asm.PseudoCall {
return xerrors.Errorf("call: %s: incorrect source register", name)
return fmt.Errorf("call: %s: incorrect source register", name)
}
switch typ {
case elf.STT_NOTYPE, elf.STT_FUNC:
if bind != elf.STB_GLOBAL {
return xerrors.Errorf("call: %s: unsupported binding: %s", name, bind)
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
}
case elf.STT_SECTION:
if bind != elf.STB_LOCAL {
return xerrors.Errorf("call: %s: unsupported binding: %s", name, bind)
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
}
// The function we want to call is in the indicated section,
@ -345,23 +354,23 @@ outer:
// A value of -1 references the first instruction in the section.
offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
if offset < 0 {
return xerrors.Errorf("call: %s: invalid offset %d", name, offset)
return fmt.Errorf("call: %s: invalid offset %d", name, offset)
}
sym, ok := ec.symbolsPerSection[rel.Section][uint64(offset)]
if !ok {
return xerrors.Errorf("call: %s: no symbol at offset %d", name, offset)
return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
}
ins.Constant = -1
name = sym.Name
default:
return xerrors.Errorf("call: %s: invalid symbol type %s", name, typ)
return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
}
default:
return xerrors.Errorf("relocation for unsupported instruction: %s", ins.OpCode)
return fmt.Errorf("relocation for unsupported instruction: %s", ins.OpCode)
}
ins.Reference = name
@ -372,11 +381,11 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
for idx, sec := range mapSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
return xerrors.Errorf("section %v: no symbols", sec.Name)
return fmt.Errorf("section %v: no symbols", sec.Name)
}
if sec.Size%uint64(len(syms)) != 0 {
return xerrors.Errorf("section %v: map descriptors are not of equal size", sec.Name)
return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
}
var (
@ -386,11 +395,11 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size {
mapSym, ok := syms[offset]
if !ok {
return xerrors.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
}
if maps[mapSym.Name] != nil {
return xerrors.Errorf("section %v: map %v already exists", sec.Name, mapSym)
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
}
lr := io.LimitReader(r, int64(size))
@ -400,19 +409,19 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
}
switch {
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
return xerrors.Errorf("map %v: missing type", mapSym)
return fmt.Errorf("map %v: missing type", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
return xerrors.Errorf("map %v: missing key size", mapSym)
return fmt.Errorf("map %v: missing key size", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
return xerrors.Errorf("map %v: missing value size", mapSym)
return fmt.Errorf("map %v: missing value size", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
return xerrors.Errorf("map %v: missing max entries", mapSym)
return fmt.Errorf("map %v: missing max entries", mapSym)
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
return xerrors.Errorf("map %v: missing flags", mapSym)
return fmt.Errorf("map %v: missing flags", mapSym)
}
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
return xerrors.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
}
maps[mapSym.Name] = &spec
@ -424,84 +433,116 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
if spec == nil {
return xerrors.Errorf("missing BTF")
return fmt.Errorf("missing BTF")
}
for idx, sec := range mapSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
return xerrors.Errorf("section %v: no symbols", sec.Name)
return fmt.Errorf("section %v: no symbols", sec.Name)
}
for _, sym := range syms {
name := sym.Name
if maps[name] != nil {
return xerrors.Errorf("section %v: map %v already exists", sec.Name, sym)
return fmt.Errorf("section %v: map %v already exists", sec.Name, sym)
}
btfMap, btfMapMembers, err := spec.Map(name)
mapSpec, err := mapSpecFromBTF(spec, name)
if err != nil {
return xerrors.Errorf("map %v: can't get BTF: %w", name, err)
return fmt.Errorf("map %v: %w", name, err)
}
spec, err := mapSpecFromBTF(btfMap, btfMapMembers)
if err != nil {
return xerrors.Errorf("map %v: %w", name, err)
}
maps[name] = spec
maps[name] = mapSpec
}
}
return nil
}
func mapSpecFromBTF(btfMap *btf.Map, btfMapMembers []btf.Member) (*MapSpec, error) {
func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
btfMap, btfMapMembers, err := spec.Map(name)
if err != nil {
return nil, fmt.Errorf("can't get BTF: %w", err)
}
keyType := btf.MapKey(btfMap)
size, err := btf.Sizeof(keyType)
if err != nil {
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
}
keySize := uint32(size)
valueType := btf.MapValue(btfMap)
size, err = btf.Sizeof(valueType)
if err != nil {
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
}
valueSize := uint32(size)
var (
mapType, flags, maxEntries uint32
err error
)
for _, member := range btfMapMembers {
switch member.Name {
case "type":
mapType, err = uintFromBTF(member.Type)
if err != nil {
return nil, xerrors.Errorf("can't get type: %w", err)
return nil, fmt.Errorf("can't get type: %w", err)
}
case "map_flags":
flags, err = uintFromBTF(member.Type)
if err != nil {
return nil, xerrors.Errorf("can't get BTF map flags: %w", err)
return nil, fmt.Errorf("can't get BTF map flags: %w", err)
}
case "max_entries":
maxEntries, err = uintFromBTF(member.Type)
if err != nil {
return nil, xerrors.Errorf("can't get BTF map max entries: %w", err)
return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
}
case "key":
case "value":
case "key_size":
if _, isVoid := keyType.(*btf.Void); !isVoid {
return nil, errors.New("both key and key_size given")
}
keySize, err = uintFromBTF(member.Type)
if err != nil {
return nil, fmt.Errorf("can't get BTF key size: %w", err)
}
case "value_size":
if _, isVoid := valueType.(*btf.Void); !isVoid {
return nil, errors.New("both value and value_size given")
}
valueSize, err = uintFromBTF(member.Type)
if err != nil {
return nil, fmt.Errorf("can't get BTF value size: %w", err)
}
case "pinning":
pinning, err := uintFromBTF(member.Type)
if err != nil {
return nil, fmt.Errorf("can't get pinning: %w", err)
}
if pinning != 0 {
return nil, fmt.Errorf("'pinning' attribute not supported: %w", ErrNotSupported)
}
case "key", "value":
default:
return nil, xerrors.Errorf("unrecognized field %s in BTF map definition", member.Name)
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
}
}
keySize, err := btf.Sizeof(btf.MapKey(btfMap))
if err != nil {
return nil, xerrors.Errorf("can't get size of BTF key: %w", err)
}
valueSize, err := btf.Sizeof(btf.MapValue(btfMap))
if err != nil {
return nil, xerrors.Errorf("can't get size of BTF value: %w", err)
}
return &MapSpec{
Type: MapType(mapType),
KeySize: uint32(keySize),
ValueSize: uint32(valueSize),
KeySize: keySize,
ValueSize: valueSize,
MaxEntries: maxEntries,
Flags: flags,
BTF: btfMap,
@ -513,12 +554,12 @@ func mapSpecFromBTF(btfMap *btf.Map, btfMapMembers []btf.Member) (*MapSpec, erro
func uintFromBTF(typ btf.Type) (uint32, error) {
ptr, ok := typ.(*btf.Pointer)
if !ok {
return 0, xerrors.Errorf("not a pointer: %v", typ)
return 0, fmt.Errorf("not a pointer: %v", typ)
}
arr, ok := ptr.Target.(*btf.Array)
if !ok {
return 0, xerrors.Errorf("not a pointer to array: %v", typ)
return 0, fmt.Errorf("not a pointer to array: %v", typ)
}
return arr.Nelems, nil
@ -526,7 +567,7 @@ func uintFromBTF(typ btf.Type) (uint32, error) {
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
if spec == nil {
return xerrors.New("data sections require BTF, make sure all consts are marked as static")
return errors.New("data sections require BTF, make sure all consts are marked as static")
}
for _, sec := range dataSections {
@ -537,11 +578,11 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[e
data, err := sec.Data()
if err != nil {
return xerrors.Errorf("data section %s: can't get contents: %w", sec.Name, err)
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
}
if uint64(len(data)) > math.MaxUint32 {
return xerrors.Errorf("data section %s: contents exceed maximum size", sec.Name)
return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
}
mapSpec := &MapSpec{
@ -633,13 +674,14 @@ func getProgType(sectionName string) (ProgramType, AttachType, string) {
return UnspecifiedProgram, AttachNone, ""
}
func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (map[elf.SectionIndex]map[uint64]elf.Symbol, error) {
func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (map[elf.SectionIndex]map[uint64]elf.Symbol, map[elf.SectionIndex]bool, error) {
result := make(map[elf.SectionIndex]map[uint64]elf.Symbol)
targets := make(map[elf.SectionIndex]bool)
for idx, sec := range sections {
rels := make(map[uint64]elf.Symbol)
if sec.Entsize < 16 {
return nil, xerrors.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
return nil, nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
}
r := sec.Open()
@ -648,20 +690,22 @@ func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (
var rel elf.Rel64
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
return nil, xerrors.Errorf("can't parse relocation at offset %v", off)
return nil, nil, fmt.Errorf("can't parse relocation at offset %v", off)
}
symNo := int(elf.R_SYM64(rel.Info) - 1)
if symNo >= len(ec.symbols) {
return nil, xerrors.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
return nil, nil, fmt.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
}
symbol := ec.symbols[symNo]
targets[symbol.Section] = true
rels[rel.Off] = ec.symbols[symNo]
}
result[idx] = rels
}
return result, nil
return result, targets, nil
}
func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]elf.Symbol {

View File

@ -1,8 +1,5 @@
module github.com/cilium/ebpf
go 1.12
go 1.13
require (
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
)
require golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9

View File

@ -1,6 +1,2 @@
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -4,6 +4,8 @@ import (
"bytes"
"debug/elf"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
@ -14,16 +16,15 @@ import (
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = xerrors.New("not found")
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
)
// Spec represents decoded BTF.
@ -76,7 +77,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
}
if sec.Size > math.MaxUint32 {
return nil, xerrors.Errorf("section %s exceeds maximum size", sec.Name)
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
}
sectionSizes[sec.Name] = uint32(sec.Size)
@ -89,7 +90,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
symbols, err := file.Symbols()
if err != nil {
return nil, xerrors.Errorf("can't read symbols: %v", err)
return nil, fmt.Errorf("can't read symbols: %v", err)
}
variableOffsets := make(map[variable]uint32)
@ -105,7 +106,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
}
if symbol.Value > math.MaxUint32 {
return nil, xerrors.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
}
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
@ -122,7 +123,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
spec.funcInfos, spec.lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
if err != nil {
return nil, xerrors.Errorf("can't read ext info: %w", err)
return nil, fmt.Errorf("can't read ext info: %w", err)
}
return spec, nil
@ -177,10 +178,10 @@ func LoadKernelSpec() (*Spec, error) {
func loadKernelSpec() (*Spec, error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if os.IsNotExist(err) {
return nil, xerrors.Errorf("can't open kernel BTF at /sys/kernel/btf/vmlinux: %w", ErrNotFound)
return nil, fmt.Errorf("can't open kernel BTF at /sys/kernel/btf/vmlinux: %w", ErrNotFound)
}
if err != nil {
return nil, xerrors.Errorf("can't read kernel BTF: %s", err)
return nil, fmt.Errorf("can't read kernel BTF: %s", err)
}
defer fh.Close()
@ -190,53 +191,53 @@ func loadKernelSpec() (*Spec, error) {
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
rawBTF, err := ioutil.ReadAll(btf)
if err != nil {
return nil, nil, xerrors.Errorf("can't read BTF: %v", err)
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
}
rd := bytes.NewReader(rawBTF)
var header btfHeader
if err := binary.Read(rd, bo, &header); err != nil {
return nil, nil, xerrors.Errorf("can't read header: %v", err)
return nil, nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, nil, xerrors.Errorf("incorrect magic value %v", header.Magic)
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, nil, xerrors.Errorf("unexpected version %v", header.Version)
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, nil, xerrors.Errorf("unsupported flags %v", header.Flags)
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, nil, xerrors.New("header is too short")
return nil, nil, errors.New("header is too short")
}
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
return nil, nil, xerrors.Errorf("header padding: %v", err)
return nil, nil, fmt.Errorf("header padding: %v", err)
}
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
return nil, nil, xerrors.Errorf("can't seek to start of string section: %v", err)
return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
}
rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
if err != nil {
return nil, nil, xerrors.Errorf("can't read type names: %w", err)
return nil, nil, fmt.Errorf("can't read type names: %w", err)
}
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
return nil, nil, xerrors.Errorf("can't seek to start of type section: %v", err)
return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
}
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
if err != nil {
return nil, nil, xerrors.Errorf("can't read types: %w", err)
return nil, nil, fmt.Errorf("can't read types: %w", err)
}
return rawTypes, rawStrings, nil
@ -258,9 +259,13 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
return err
}
if name == ".kconfig" || name == ".ksym" {
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
}
size, ok := sectionSizes[name]
if !ok {
return xerrors.Errorf("data section %s: missing size", name)
return fmt.Errorf("data section %s: missing size", name)
}
rawTypes[i].SizeType = size
@ -269,17 +274,17 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
for j, secInfo := range secinfos {
id := int(secInfo.Type - 1)
if id >= len(rawTypes) {
return xerrors.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
}
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
if err != nil {
return xerrors.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
}
offset, ok := variableOffsets[variable{name, varName}]
if !ok {
return xerrors.Errorf("data section %s: missing offset for variable %s", name, varName)
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
}
secinfos[j].Offset = offset
@ -289,7 +294,12 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
return nil
}
func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
type marshalOpts struct {
ByteOrder binary.ByteOrder
StripFuncLinkage bool
}
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
var (
buf bytes.Buffer
header = new(btfHeader)
@ -301,9 +311,14 @@ func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
_, _ = buf.Write(make([]byte, headerLen))
// Write type section, just after the header.
for _, typ := range s.rawTypes {
if err := typ.Marshal(&buf, bo); err != nil {
return nil, xerrors.Errorf("can't marshal BTF: %w", err)
for _, raw := range s.rawTypes {
switch {
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
raw.SetLinkage(linkageStatic)
}
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
}
@ -325,9 +340,9 @@ func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
}
raw := buf.Bytes()
err := binary.Write(sliceWriter(raw[:headerLen]), bo, header)
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
if err != nil {
return nil, xerrors.Errorf("can't write header: %v", err)
return nil, fmt.Errorf("can't write header: %v", err)
}
return raw, nil
@ -337,7 +352,7 @@ type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
if len(p) != len(sw) {
return 0, xerrors.New("size doesn't match")
return 0, errors.New("size doesn't match")
}
return copy(sw, p), nil
@ -347,17 +362,22 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
//
// Length is the number of bytes in the raw BPF instruction stream.
//
// Returns an error if there is no BTF.
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
// contain extended BTF info.
func (s *Spec) Program(name string, length uint64) (*Program, error) {
if length == 0 {
return nil, xerrors.New("length musn't be zero")
return nil, errors.New("length musn't be zero")
}
if s.funcInfos == nil && s.lineInfos == nil {
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
}
funcInfos, funcOK := s.funcInfos[name]
lineInfos, lineOK := s.lineInfos[name]
if !funcOK && !lineOK {
return nil, xerrors.Errorf("no BTF for program %s", name)
return nil, fmt.Errorf("no extended BTF info for section %s", name)
}
return &Program{s, length, funcInfos, lineInfos}, nil
@ -374,7 +394,7 @@ func (s *Spec) Map(name string) (*Map, []Member, error) {
mapStruct, ok := mapVar.Type.(*Struct)
if !ok {
return nil, nil, xerrors.Errorf("expected struct, have %s", mapVar.Type)
return nil, nil, fmt.Errorf("expected struct, have %s", mapVar.Type)
}
var key, value Type
@ -389,11 +409,11 @@ func (s *Spec) Map(name string) (*Map, []Member, error) {
}
if key == nil {
return nil, nil, xerrors.Errorf("map %s: missing 'key' in type", name)
key = (*Void)(nil)
}
if value == nil {
return nil, nil, xerrors.Errorf("map %s: missing 'value' in type", name)
value = (*Void)(nil)
}
return &Map{s, key, value}, mapStruct.Members, nil
@ -403,7 +423,7 @@ func (s *Spec) Map(name string) (*Map, []Member, error) {
func (s *Spec) Datasec(name string) (*Map, error) {
var datasec Datasec
if err := s.FindType(name, &datasec); err != nil {
return nil, xerrors.Errorf("data section %s: can't get BTF: %w", name, err)
return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
}
return &Map{s, &Void{}, &datasec}, nil
@ -427,14 +447,14 @@ func (s *Spec) FindType(name string, typ Type) error {
}
if candidate != nil {
return xerrors.Errorf("type %s: multiple candidates for %T", name, typ)
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
}
candidate = typ
}
if candidate == nil {
return xerrors.Errorf("type %s: %w", name, ErrNotFound)
return fmt.Errorf("type %s: %w", name, ErrNotFound)
}
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
@ -456,16 +476,19 @@ func NewHandle(spec *Spec) (*Handle, error) {
}
if spec.byteOrder != internal.NativeEndian {
return nil, xerrors.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
}
btf, err := spec.marshal(internal.NativeEndian)
btf, err := spec.marshal(marshalOpts{
ByteOrder: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
})
if err != nil {
return nil, xerrors.Errorf("can't marshal BTF: %w", err)
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, xerrors.New("BTF exceeds the maximum size")
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &bpfLoadBTFAttr{
@ -549,12 +572,12 @@ func ProgramSpec(s *Program) *Spec {
func ProgramAppend(s, other *Program) error {
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
if err != nil {
return xerrors.Errorf("func infos: %w", err)
return fmt.Errorf("func infos: %w", err)
}
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
if err != nil {
return xerrors.Errorf("line infos: %w", err)
return fmt.Errorf("line infos: %w", err)
}
s.length += other.length
@ -608,26 +631,36 @@ func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
return internal.NewFD(uint32(fd)), nil
}
func minimalBTF(bo binary.ByteOrder) []byte {
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
const minHeaderLength = 24
typesLen := uint32(binary.Size(types))
header := btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: minHeaderLength,
TypeOff: 0,
TypeLen: typesLen,
StringOff: typesLen,
StringLen: uint32(len(strings)),
}
buf := new(bytes.Buffer)
_ = binary.Write(buf, bo, &header)
_ = binary.Write(buf, bo, types)
buf.Write(strings)
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
var (
types struct {
Integer btfType
Var btfType
btfVar struct{ Linkage uint32 }
}
typLen = uint32(binary.Size(&types))
strings = []byte{0, 'a', 0}
header = btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: minHeaderLength,
TypeOff: 0,
TypeLen: typLen,
StringOff: typLen,
StringLen: uint32(len(strings)),
}
)
// We use a BTF_KIND_VAR here, to make sure that
@ -638,16 +671,8 @@ func minimalBTF(bo binary.ByteOrder) []byte {
types.Var.SetKind(kindVar)
types.Var.SizeType = 1
buf := new(bytes.Buffer)
_ = binary.Write(buf, bo, &header)
_ = binary.Write(buf, bo, &types)
buf.Write(strings)
btf := marshalBTF(&types, strings, internal.NativeEndian)
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
btf := minimalBTF(internal.NativeEndian)
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
@ -657,5 +682,35 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
}
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return !xerrors.Is(err, unix.EINVAL), nil
return !errors.Is(err, unix.EINVAL), nil
})
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() (bool, error) {
var (
types struct {
FuncProto btfType
Func btfType
}
strings = []byte{0, 'a', 0}
)
types.FuncProto.SetKind(kindFuncProto)
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto
types.Func.NameOff = 1
types.Func.SetLinkage(linkageGlobal)
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
})
if err == nil {
fd.Close()
}
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return !errors.Is(err, unix.EINVAL), nil
})

View File

@ -4,8 +4,6 @@ import (
"encoding/binary"
"fmt"
"io"
"golang.org/x/xerrors"
)
// btfKind describes a Type.
@ -33,6 +31,14 @@ const (
kindDatasec
)
type btfFuncLinkage uint8
const (
linkageStatic btfFuncLinkage = iota
linkageGlobal
linkageExtern
)
const (
btfTypeKindShift = 24
btfTypeKindLen = 4
@ -44,7 +50,7 @@ const (
type btfType struct {
NameOff uint32
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 0-15: vlen (e.g. # of struct's members), linkage
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
* bits 28-30: unused
@ -130,6 +136,14 @@ func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) Linkage() btfFuncLinkage {
return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) Type() TypeID {
// TODO: Panic here if wrong kind?
return TypeID(bt.SizeType)
@ -199,7 +213,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
if err := binary.Read(r, bo, &header); err == io.EOF {
return types, nil
} else if err != nil {
return nil, xerrors.Errorf("can't read type info for id %v: %v", id, err)
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
}
var data interface{}
@ -228,7 +242,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
case kindDatasec:
data = make([]btfVarSecinfo, header.Vlen())
default:
return nil, xerrors.Errorf("type id %v: unknown kind: %v", id, header.Kind())
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}
if data == nil {
@ -237,7 +251,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
}
if err := binary.Read(r, bo, data); err != nil {
return nil, xerrors.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
}
types = append(types, rawType{header, data})

View File

@ -3,13 +3,13 @@ package btf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"golang.org/x/xerrors"
)
type btfExtHeader struct {
@ -27,49 +27,49 @@ type btfExtHeader struct {
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, err error) {
var header btfExtHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, nil, xerrors.Errorf("can't read header: %v", err)
return nil, nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, nil, xerrors.Errorf("incorrect magic value %v", header.Magic)
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, nil, xerrors.Errorf("unexpected version %v", header.Version)
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, nil, xerrors.Errorf("unsupported flags %v", header.Flags)
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, nil, xerrors.New("header is too short")
return nil, nil, errors.New("header is too short")
}
// Of course, the .BTF.ext header has different semantics than the
// .BTF ext header. We need to ignore non-null values.
_, err = io.CopyN(ioutil.Discard, r, remainder)
if err != nil {
return nil, nil, xerrors.Errorf("header padding: %v", err)
return nil, nil, fmt.Errorf("header padding: %v", err)
}
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
return nil, nil, xerrors.Errorf("can't seek to function info section: %v", err)
return nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
}
funcInfo, err = parseExtInfo(io.LimitReader(r, int64(header.FuncInfoLen)), bo, strings)
if err != nil {
return nil, nil, xerrors.Errorf("function info: %w", err)
return nil, nil, fmt.Errorf("function info: %w", err)
}
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
return nil, nil, xerrors.Errorf("can't seek to line info section: %v", err)
return nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
}
lineInfo, err = parseExtInfo(io.LimitReader(r, int64(header.LineInfoLen)), bo, strings)
if err != nil {
return nil, nil, xerrors.Errorf("line info: %w", err)
return nil, nil, fmt.Errorf("line info: %w", err)
}
return funcInfo, lineInfo, nil
@ -92,7 +92,7 @@ type extInfo struct {
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
if other.recordSize != ei.recordSize {
return extInfo{}, xerrors.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
}
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
@ -117,7 +117,7 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
// while the ELF tracks it in bytes.
insnOff := uint32(info.InsnOff / asm.InstructionSize)
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
return nil, xerrors.Errorf("can't write instruction offset: %v", err)
return nil, fmt.Errorf("can't write instruction offset: %v", err)
}
buf.Write(info.Opaque)
@ -129,12 +129,12 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, xerrors.Errorf("can't read record size: %v", err)
return nil, fmt.Errorf("can't read record size: %v", err)
}
if recordSize < 4 {
// Need at least insnOff
return nil, xerrors.New("record size too short")
return nil, errors.New("record size too short")
}
result := make(map[string]extInfo)
@ -143,32 +143,32 @@ func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[st
if err := binary.Read(r, bo, &infoHeader); err == io.EOF {
return result, nil
} else if err != nil {
return nil, xerrors.Errorf("can't read ext info header: %v", err)
return nil, fmt.Errorf("can't read ext info header: %v", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return nil, xerrors.Errorf("can't get section name: %w", err)
return nil, fmt.Errorf("can't get section name: %w", err)
}
if infoHeader.NumInfo == 0 {
return nil, xerrors.Errorf("section %s has invalid number of records", secName)
return nil, fmt.Errorf("section %s has invalid number of records", secName)
}
var records []extInfoRecord
for i := uint32(0); i < infoHeader.NumInfo; i++ {
var byteOff uint32
if err := binary.Read(r, bo, &byteOff); err != nil {
return nil, xerrors.Errorf("section %v: can't read extended info offset: %v", secName, err)
return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
}
buf := make([]byte, int(recordSize-4))
if _, err := io.ReadFull(r, buf); err != nil {
return nil, xerrors.Errorf("section %v: can't read record: %v", secName, err)
return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
}
if byteOff%asm.InstructionSize != 0 {
return nil, xerrors.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
}
records = append(records, extInfoRecord{uint64(byteOff), buf})

View File

@ -2,10 +2,10 @@ package btf
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"golang.org/x/xerrors"
)
type stringTable []byte
@ -13,19 +13,19 @@ type stringTable []byte
func readStringTable(r io.Reader) (stringTable, error) {
contents, err := ioutil.ReadAll(r)
if err != nil {
return nil, xerrors.Errorf("can't read string table: %v", err)
return nil, fmt.Errorf("can't read string table: %v", err)
}
if len(contents) < 1 {
return nil, xerrors.New("string table is empty")
return nil, errors.New("string table is empty")
}
if contents[0] != '\x00' {
return nil, xerrors.New("first item in string table is non-empty")
return nil, errors.New("first item in string table is non-empty")
}
if contents[len(contents)-1] != '\x00' {
return nil, xerrors.New("string table isn't null terminated")
return nil, errors.New("string table isn't null terminated")
}
return stringTable(contents), nil
@ -33,22 +33,22 @@ func readStringTable(r io.Reader) (stringTable, error) {
func (st stringTable) Lookup(offset uint32) (string, error) {
if int64(offset) > int64(^uint(0)>>1) {
return "", xerrors.Errorf("offset %d overflows int", offset)
return "", fmt.Errorf("offset %d overflows int", offset)
}
pos := int(offset)
if pos >= len(st) {
return "", xerrors.Errorf("offset %d is out of bounds", offset)
return "", fmt.Errorf("offset %d is out of bounds", offset)
}
if pos > 0 && st[pos-1] != '\x00' {
return "", xerrors.Errorf("offset %d isn't start of a string", offset)
return "", fmt.Errorf("offset %d isn't start of a string", offset)
}
str := st[pos:]
end := bytes.IndexByte(str, '\x00')
if end == -1 {
return "", xerrors.Errorf("offset %d isn't null terminated", offset)
return "", fmt.Errorf("offset %d isn't null terminated", offset)
}
return string(str[:end]), nil

View File

@ -1,9 +1,9 @@
package btf
import (
"errors"
"fmt"
"math"
"golang.org/x/xerrors"
)
const maxTypeDepth = 32
@ -38,9 +38,10 @@ func (n Name) name() string {
// Void is the unit type of BTF.
type Void struct{}
func (v Void) ID() TypeID { return 0 }
func (v Void) copy() Type { return Void{} }
func (v Void) walk(*copyStack) {}
func (v *Void) ID() TypeID { return 0 }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
func (v *Void) walk(*copyStack) {}
// Int is an integer of a given length.
type Int struct {
@ -310,7 +311,7 @@ func Sizeof(typ Type) (int, error) {
switch v := typ.(type) {
case *Array:
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
return 0, xerrors.New("overflow")
return 0, errors.New("overflow")
}
// Arrays may be of zero length, which allows
@ -336,22 +337,22 @@ func Sizeof(typ Type) (int, error) {
continue
default:
return 0, xerrors.Errorf("unrecognized type %T", typ)
return 0, fmt.Errorf("unrecognized type %T", typ)
}
if n > 0 && elem > math.MaxInt64/n {
return 0, xerrors.New("overflow")
return 0, errors.New("overflow")
}
size := n * elem
if int64(int(size)) != size {
return 0, xerrors.New("overflow")
return 0, errors.New("overflow")
}
return int(size), nil
}
return 0, xerrors.New("exceeded type depth")
return 0, errors.New("exceeded type depth")
}
// copy a Type recursively.
@ -433,7 +434,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
for i, btfMember := range raw {
name, err := rawStrings.LookupName(btfMember.NameOff)
if err != nil {
return nil, xerrors.Errorf("can't get name for member %d: %w", i, err)
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
}
members = append(members, Member{
Name: name,
@ -447,7 +448,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
}
types := make([]Type, 0, len(rawTypes))
types = append(types, Void{})
types = append(types, (*Void)(nil))
namedTypes = make(map[string][]Type)
for i, raw := range rawTypes {
@ -460,7 +461,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
name, err := rawStrings.LookupName(raw.NameOff)
if err != nil {
return nil, xerrors.Errorf("can't get name for type id %d: %w", id, err)
return nil, fmt.Errorf("can't get name for type id %d: %w", id, err)
}
switch raw.Kind() {
@ -484,14 +485,14 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember))
if err != nil {
return nil, xerrors.Errorf("struct %s (id %d): %w", name, id, err)
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{id, name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember))
if err != nil {
return nil, xerrors.Errorf("union %s (id %d): %w", name, id, err)
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
typ = &Union{id, name, raw.Size(), members}
@ -551,7 +552,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
typ = &Datasec{id, name, raw.SizeType, vars}
default:
return nil, xerrors.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
types = append(types, typ)
@ -566,7 +567,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
for _, fixup := range fixups {
i := int(fixup.id)
if i >= len(types) {
return nil, xerrors.Errorf("reference to invalid type id: %d", fixup.id)
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
// Default void (id 0) to unknown
@ -576,7 +577,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
}
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
return nil, xerrors.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
return nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
}
*fixup.typ = types[i]

View File

@ -2,11 +2,11 @@ package internal
import (
"bytes"
"errors"
"fmt"
"strings"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
// ErrorWithLog returns an error that includes logs from the
@ -16,7 +16,7 @@ import (
// the log. It is used to check for truncation of the output.
func ErrorWithLog(err error, log []byte, logErr error) error {
logStr := strings.Trim(CString(log), "\t\r\n ")
if xerrors.Is(logErr, unix.ENOSPC) {
if errors.Is(logErr, unix.ENOSPC) {
logStr += " (truncated...)"
}

View File

@ -1,16 +1,16 @@
package internal
import (
"errors"
"fmt"
"os"
"runtime"
"strconv"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
var ErrClosedFd = xerrors.New("use of closed file descriptor")
var ErrClosedFd = errors.New("use of closed file descriptor")
type FD struct {
raw int64
@ -57,7 +57,7 @@ func (fd *FD) Dup() (*FD, error) {
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, xerrors.Errorf("can't dup fd: %v", err)
return nil, fmt.Errorf("can't dup fd: %v", err)
}
return NewFD(uint32(dup)), nil

View File

@ -1,14 +1,13 @@
package internal
import (
"errors"
"fmt"
"sync"
"golang.org/x/xerrors"
)
// ErrNotSupported indicates that a feature is not supported by the current kernel.
var ErrNotSupported = xerrors.New("not supported")
var ErrNotSupported = errors.New("not supported")
// UnsupportedFeatureError is returned by FeatureTest() functions.
type UnsupportedFeatureError struct {
@ -67,7 +66,7 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error {
}
available, err := fn()
if xerrors.Is(err, ErrNotSupported) {
if errors.Is(err, ErrNotSupported) {
// The feature test aborted because a dependent feature
// is missing, which we should cache.
available = false
@ -75,7 +74,7 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error {
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return xerrors.Errorf("can't detect support for %s: %w", name, err)
return fmt.Errorf("can't detect support for %s: %w", name, err)
}
ft.successful = true
@ -99,7 +98,7 @@ func NewVersion(ver string) (Version, error) {
var major, minor, patch uint16
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
if n < 2 {
return Version{}, xerrors.Errorf("invalid version: %s", ver)
return Version{}, fmt.Errorf("invalid version: %s", ver)
}
return Version{major, minor, patch}, nil
}

View File

@ -1,6 +1,6 @@
package internal
import "golang.org/x/xerrors"
import "errors"
// DiscardZeroes makes sure that all written bytes are zero
// before discarding them.
@ -9,7 +9,7 @@ type DiscardZeroes struct{}
func (DiscardZeroes) Write(p []byte) (int, error) {
for _, b := range p {
if b != 0 {
return 0, xerrors.New("encountered non-zero byte")
return 0, errors.New("encountered non-zero byte")
}
}
return len(p), nil

View File

@ -1,12 +1,12 @@
package internal
import (
"fmt"
"path/filepath"
"runtime"
"unsafe"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
//go:generate stringer -output syscall_string.go -type=BPFCmd
@ -107,7 +107,7 @@ func BPFObjPin(fileName string, fd *FD) error {
return err
}
if uint64(statfs.Type) != bpfFSType {
return xerrors.Errorf("%s is not on a bpf filesystem", fileName)
return fmt.Errorf("%s is not on a bpf filesystem", fileName)
}
value, err := fd.Value()
@ -121,7 +121,7 @@ func BPFObjPin(fileName string, fd *FD) error {
}
_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return xerrors.Errorf("pin object %s: %w", fileName, err)
return fmt.Errorf("pin object %s: %w", fileName, err)
}
return nil
}
@ -133,7 +133,7 @@ func BPFObjGet(fileName string) (*FD, error) {
}
ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return nil, xerrors.Errorf("get object %s: %w", fileName, err)
return nil, fmt.Errorf("get object %s: %w", fileName, err)
}
return NewFD(uint32(ptr)), nil
}

View File

@ -1,10 +1,10 @@
package ebpf
import (
"fmt"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal/btf"
"golang.org/x/xerrors"
)
// link resolves bpf-to-bpf calls.
@ -28,7 +28,7 @@ func link(prog *ProgramSpec, libs []*ProgramSpec) error {
needed, err := needSection(insns, lib.Instructions)
if err != nil {
return xerrors.Errorf("linking %s: %w", lib.Name, err)
return fmt.Errorf("linking %s: %w", lib.Name, err)
}
if !needed {
@ -41,7 +41,7 @@ func link(prog *ProgramSpec, libs []*ProgramSpec) error {
if prog.BTF != nil && lib.BTF != nil {
if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
return xerrors.Errorf("linking BTF of %s: %w", lib.Name, err)
return fmt.Errorf("linking BTF of %s: %w", lib.Name, err)
}
}
}

95
vendor/github.com/cilium/ebpf/map.go generated vendored
View File

@ -1,21 +1,20 @@
package ebpf
import (
"errors"
"fmt"
"strings"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
// Errors returned by Map and MapIterator methods.
var (
ErrKeyNotExist = xerrors.New("key does not exist")
ErrKeyExist = xerrors.New("key already exists")
ErrIterationAborted = xerrors.New("iteration aborted")
ErrKeyNotExist = errors.New("key does not exist")
ErrKeyExist = errors.New("key already exists")
ErrIterationAborted = errors.New("iteration aborted")
)
// MapID represents the unique ID of an eBPF map
@ -92,7 +91,7 @@ type Map struct {
// You should not use fd after calling this function.
func NewMapFromFD(fd int) (*Map, error) {
if fd < 0 {
return nil, xerrors.New("invalid fd")
return nil, errors.New("invalid fd")
}
bpfFd := internal.NewFD(uint32(fd))
@ -118,8 +117,8 @@ func NewMap(spec *MapSpec) (*Map, error) {
}
handle, err := btf.NewHandle(btf.MapSpec(spec.BTF))
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
return nil, xerrors.Errorf("can't load BTF: %w", err)
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("can't load BTF: %w", err)
}
return newMapWithBTF(spec, handle)
@ -131,7 +130,7 @@ func newMapWithBTF(spec *MapSpec, handle *btf.Handle) (*Map, error) {
}
if spec.InnerMap == nil {
return nil, xerrors.Errorf("%s requires InnerMap", spec.Type)
return nil, fmt.Errorf("%s requires InnerMap", spec.Type)
}
template, err := createMap(spec.InnerMap, nil, handle)
@ -155,25 +154,25 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
}
if abi.ValueSize != 0 && abi.ValueSize != 4 {
return nil, xerrors.New("ValueSize must be zero or four for map of map")
return nil, errors.New("ValueSize must be zero or four for map of map")
}
abi.ValueSize = 4
case PerfEventArray:
if abi.KeySize != 0 && abi.KeySize != 4 {
return nil, xerrors.New("KeySize must be zero or four for perf event array")
return nil, errors.New("KeySize must be zero or four for perf event array")
}
abi.KeySize = 4
if abi.ValueSize != 0 && abi.ValueSize != 4 {
return nil, xerrors.New("ValueSize must be zero or four for perf event array")
return nil, errors.New("ValueSize must be zero or four for perf event array")
}
abi.ValueSize = 4
if abi.MaxEntries == 0 {
n, err := internal.PossibleCPUs()
if err != nil {
return nil, xerrors.Errorf("perf event array: %w", err)
return nil, fmt.Errorf("perf event array: %w", err)
}
abi.MaxEntries = uint32(n)
}
@ -181,7 +180,7 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
if abi.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze {
if err := haveMapMutabilityModifiers(); err != nil {
return nil, xerrors.Errorf("map create: %w", err)
return nil, fmt.Errorf("map create: %w", err)
}
}
@ -197,7 +196,7 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
var err error
attr.innerMapFd, err = inner.Value()
if err != nil {
return nil, xerrors.Errorf("map create: %w", err)
return nil, fmt.Errorf("map create: %w", err)
}
}
@ -213,7 +212,7 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
fd, err := bpfMapCreate(&attr)
if err != nil {
return nil, xerrors.Errorf("map create: %w", err)
return nil, fmt.Errorf("map create: %w", err)
}
m, err := newMap(fd, spec.Name, abi)
@ -223,13 +222,13 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
if err := m.populate(spec.Contents); err != nil {
m.Close()
return nil, xerrors.Errorf("map create: can't set initial contents: %w", err)
return nil, fmt.Errorf("map create: can't set initial contents: %w", err)
}
if spec.Freeze {
if err := m.Freeze(); err != nil {
m.Close()
return nil, xerrors.Errorf("can't freeze map: %w", err)
return nil, fmt.Errorf("can't freeze map: %w", err)
}
}
@ -301,9 +300,9 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
*value = m
return nil
case *Map:
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
case Map:
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
case **Program:
p, err := unmarshalProgram(valueBytes)
@ -315,9 +314,9 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
*value = p
return nil
case *Program:
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
case Program:
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
default:
return unmarshalBytes(valueOut, valueBytes)
@ -332,11 +331,11 @@ func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return xerrors.Errorf("can't marshal key: %w", err)
return fmt.Errorf("can't marshal key: %w", err)
}
if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil {
return xerrors.Errorf("lookup and delete failed: %w", err)
return fmt.Errorf("lookup and delete failed: %w", err)
}
return unmarshalBytes(valueOut, valueBytes)
@ -350,7 +349,7 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
valuePtr := internal.NewSlicePointer(valueBytes)
err := m.lookup(key, valuePtr)
if xerrors.Is(err, ErrKeyNotExist) {
if errors.Is(err, ErrKeyNotExist) {
return nil, nil
}
@ -360,11 +359,11 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return xerrors.Errorf("can't marshal key: %w", err)
return fmt.Errorf("can't marshal key: %w", err)
}
if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil {
return xerrors.Errorf("lookup failed: %w", err)
return fmt.Errorf("lookup failed: %w", err)
}
return nil
}
@ -394,7 +393,7 @@ func (m *Map) Put(key, value interface{}) error {
func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return xerrors.Errorf("can't marshal key: %w", err)
return fmt.Errorf("can't marshal key: %w", err)
}
var valuePtr internal.Pointer
@ -404,11 +403,11 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
valuePtr, err = marshalPtr(value, int(m.abi.ValueSize))
}
if err != nil {
return xerrors.Errorf("can't marshal value: %w", err)
return fmt.Errorf("can't marshal value: %w", err)
}
if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil {
return xerrors.Errorf("update failed: %w", err)
return fmt.Errorf("update failed: %w", err)
}
return nil
@ -420,11 +419,11 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
func (m *Map) Delete(key interface{}) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return xerrors.Errorf("can't marshal key: %w", err)
return fmt.Errorf("can't marshal key: %w", err)
}
if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil {
return xerrors.Errorf("delete failed: %w", err)
return fmt.Errorf("delete failed: %w", err)
}
return nil
}
@ -446,7 +445,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error {
}
if err := unmarshalBytes(nextKeyOut, nextKeyBytes); err != nil {
return xerrors.Errorf("can't unmarshal next key: %w", err)
return fmt.Errorf("can't unmarshal next key: %w", err)
}
return nil
}
@ -463,7 +462,7 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
nextKeyPtr := internal.NewSlicePointer(nextKey)
err := m.nextKey(key, nextKeyPtr)
if xerrors.Is(err, ErrKeyNotExist) {
if errors.Is(err, ErrKeyNotExist) {
return nil, nil
}
@ -479,12 +478,12 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
if key != nil {
keyPtr, err = marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return xerrors.Errorf("can't marshal key: %w", err)
return fmt.Errorf("can't marshal key: %w", err)
}
}
if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil {
return xerrors.Errorf("next key failed: %w", err)
return fmt.Errorf("next key failed: %w", err)
}
return nil
}
@ -537,7 +536,7 @@ func (m *Map) Clone() (*Map, error) {
dup, err := m.fd.Dup()
if err != nil {
return nil, xerrors.Errorf("can't clone map: %w", err)
return nil, fmt.Errorf("can't clone map: %w", err)
}
return newMap(dup, m.name, &m.abi)
@ -555,11 +554,11 @@ func (m *Map) Pin(fileName string) error {
// It makes no changes to kernel-side restrictions.
func (m *Map) Freeze() error {
if err := haveMapMutabilityModifiers(); err != nil {
return xerrors.Errorf("can't freeze map: %w", err)
return fmt.Errorf("can't freeze map: %w", err)
}
if err := bpfMapFreeze(m.fd); err != nil {
return xerrors.Errorf("can't freeze map: %w", err)
return fmt.Errorf("can't freeze map: %w", err)
}
return nil
}
@ -567,7 +566,7 @@ func (m *Map) Freeze() error {
func (m *Map) populate(contents []MapKV) error {
for _, kv := range contents {
if err := m.Put(kv.Key, kv.Value); err != nil {
return xerrors.Errorf("key %v: %w", kv.Key, err)
return fmt.Errorf("key %v: %w", kv.Key, err)
}
}
return nil
@ -601,7 +600,7 @@ func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) {
func unmarshalMap(buf []byte) (*Map, error) {
if len(buf) != 4 {
return nil, xerrors.New("map id requires 4 byte value")
return nil, errors.New("map id requires 4 byte value")
}
// Looking up an entry in a nested map or prog array returns an id,
@ -626,12 +625,12 @@ func patchValue(value []byte, typ btf.Type, replacements map[string]interface{})
replaced := make(map[string]bool)
replace := func(name string, offset, size int, replacement interface{}) error {
if offset+size > len(value) {
return xerrors.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
}
buf, err := marshalBytes(replacement, size)
if err != nil {
return xerrors.Errorf("marshal %s: %w", name, err)
return fmt.Errorf("marshal %s: %w", name, err)
}
copy(value[offset:offset+size], buf)
@ -655,7 +654,7 @@ func patchValue(value []byte, typ btf.Type, replacements map[string]interface{})
}
default:
return xerrors.Errorf("patching %T is not supported", typ)
return fmt.Errorf("patching %T is not supported", typ)
}
if len(replaced) == len(replacements) {
@ -670,10 +669,10 @@ func patchValue(value []byte, typ btf.Type, replacements map[string]interface{})
}
if len(missing) == 1 {
return xerrors.Errorf("unknown field: %s", missing[0])
return fmt.Errorf("unknown field: %s", missing[0])
}
return xerrors.Errorf("unknown fields: %s", strings.Join(missing, ","))
return fmt.Errorf("unknown fields: %s", strings.Join(missing, ","))
}
// MapIterator iterates a Map.
@ -731,7 +730,7 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
mi.prevKey = mi.prevBytes
mi.err = mi.target.Lookup(nextBytes, valueOut)
if xerrors.Is(mi.err, ErrKeyNotExist) {
if errors.Is(mi.err, ErrKeyNotExist) {
// Even though the key should be valid, we couldn't look up
// its value. If we're iterating a hash map this is probably
// because a concurrent delete removed the value before we
@ -750,7 +749,7 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
return mi.err == nil
}
mi.err = xerrors.Errorf("%w", ErrIterationAborted)
mi.err = fmt.Errorf("%w", ErrIterationAborted)
return false
}

View File

@ -4,13 +4,13 @@ import (
"bytes"
"encoding"
"encoding/binary"
"errors"
"fmt"
"reflect"
"runtime"
"unsafe"
"github.com/cilium/ebpf/internal"
"golang.org/x/xerrors"
)
func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
@ -18,7 +18,7 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
if length == 0 {
return internal.NewPointer(nil), nil
}
return internal.Pointer{}, xerrors.New("can't use nil as key of map")
return internal.Pointer{}, errors.New("can't use nil as key of map")
}
if ptr, ok := data.(unsafe.Pointer); ok {
@ -42,12 +42,12 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
case []byte:
buf = value
case unsafe.Pointer:
err = xerrors.New("can't marshal from unsafe.Pointer")
err = errors.New("can't marshal from unsafe.Pointer")
default:
var wr bytes.Buffer
err = binary.Write(&wr, internal.NativeEndian, value)
if err != nil {
err = xerrors.Errorf("encoding %T: %v", value, err)
err = fmt.Errorf("encoding %T: %v", value, err)
}
buf = wr.Bytes()
}
@ -56,7 +56,7 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
}
if len(buf) != length {
return nil, xerrors.Errorf("%T doesn't marshal to %d bytes", data, length)
return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length)
}
return buf, nil
}
@ -92,13 +92,13 @@ func unmarshalBytes(data interface{}, buf []byte) error {
*value = buf
return nil
case string:
return xerrors.New("require pointer to string")
return errors.New("require pointer to string")
case []byte:
return xerrors.New("require pointer to []byte")
return errors.New("require pointer to []byte")
default:
rd := bytes.NewReader(buf)
if err := binary.Read(rd, internal.NativeEndian, value); err != nil {
return xerrors.Errorf("decoding %T: %v", value, err)
return fmt.Errorf("decoding %T: %v", value, err)
}
return nil
}
@ -113,7 +113,7 @@ func unmarshalBytes(data interface{}, buf []byte) error {
func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) {
sliceType := reflect.TypeOf(slice)
if sliceType.Kind() != reflect.Slice {
return internal.Pointer{}, xerrors.New("per-CPU value requires slice")
return internal.Pointer{}, errors.New("per-CPU value requires slice")
}
possibleCPUs, err := internal.PossibleCPUs()
@ -124,7 +124,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er
sliceValue := reflect.ValueOf(slice)
sliceLen := sliceValue.Len()
if sliceLen > possibleCPUs {
return internal.Pointer{}, xerrors.Errorf("per-CPU value exceeds number of CPUs")
return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
}
alignedElemLength := align(elemLength, 8)
@ -151,7 +151,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er
func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error {
slicePtrType := reflect.TypeOf(slicePtr)
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
return xerrors.Errorf("per-cpu value requires pointer to slice")
return fmt.Errorf("per-cpu value requires pointer to slice")
}
possibleCPUs, err := internal.PossibleCPUs()
@ -170,7 +170,7 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
step := len(buf) / possibleCPUs
if step < elemLength {
return xerrors.Errorf("per-cpu element length is larger than available data")
return fmt.Errorf("per-cpu element length is larger than available data")
}
for i := 0; i < possibleCPUs; i++ {
var elem interface{}
@ -188,7 +188,7 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
err := unmarshalBytes(elem, elemBytes)
if err != nil {
return xerrors.Errorf("cpu %d: %w", i, err)
return fmt.Errorf("cpu %d: %w", i, err)
}
buf = buf[step:]

View File

@ -3,6 +3,7 @@ package ebpf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
"strings"
@ -12,8 +13,6 @@ import (
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
@ -120,8 +119,8 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
}
handle, err := btf.NewHandle(btf.ProgramSpec(spec.BTF))
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
return nil, xerrors.Errorf("can't load BTF: %w", err)
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("can't load BTF: %w", err)
}
return newProgramWithBTF(spec, handle, opts)
@ -165,7 +164,7 @@ func newProgramWithBTF(spec *ProgramSpec, btf *btf.Handle, opts ProgramOptions)
}
err = internal.ErrorWithLog(err, logBuf, logErr)
return nil, xerrors.Errorf("can't load program: %w", err)
return nil, fmt.Errorf("can't load program: %w", err)
}
// NewProgramFromFD creates a program from a raw fd.
@ -175,7 +174,7 @@ func newProgramWithBTF(spec *ProgramSpec, btf *btf.Handle, opts ProgramOptions)
// Requires at least Linux 4.11.
func NewProgramFromFD(fd int) (*Program, error) {
if fd < 0 {
return nil, xerrors.New("invalid fd")
return nil, errors.New("invalid fd")
}
bpfFd := internal.NewFD(uint32(fd))
@ -198,15 +197,15 @@ func newProgram(fd *internal.FD, name string, abi *ProgramABI) *Program {
func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr, error) {
if len(spec.Instructions) == 0 {
return nil, xerrors.New("Instructions cannot be empty")
return nil, errors.New("Instructions cannot be empty")
}
if len(spec.License) == 0 {
return nil, xerrors.New("License cannot be empty")
return nil, errors.New("License cannot be empty")
}
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
return nil, xerrors.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
}
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
@ -235,7 +234,7 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
if err != nil {
return nil, xerrors.Errorf("can't get BTF line infos: %w", err)
return nil, fmt.Errorf("can't get BTF line infos: %w", err)
}
attr.lineInfoRecSize = recSize
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
@ -243,7 +242,7 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
if err != nil {
return nil, xerrors.Errorf("can't get BTF function infos: %w", err)
return nil, fmt.Errorf("can't get BTF function infos: %w", err)
}
attr.funcInfoRecSize = recSize
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
@ -301,7 +300,7 @@ func (p *Program) Clone() (*Program, error) {
dup, err := p.fd.Dup()
if err != nil {
return nil, xerrors.Errorf("can't clone program: %w", err)
return nil, fmt.Errorf("can't clone program: %w", err)
}
return newProgram(dup, p.name, &p.abi), nil
@ -312,7 +311,7 @@ func (p *Program) Clone() (*Program, error) {
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
func (p *Program) Pin(fileName string) error {
if err := internal.BPFObjPin(fileName, p.fd); err != nil {
return xerrors.Errorf("can't pin program: %w", err)
return fmt.Errorf("can't pin program: %w", err)
}
return nil
}
@ -336,7 +335,7 @@ func (p *Program) Close() error {
func (p *Program) Test(in []byte) (uint32, []byte, error) {
ret, out, _, err := p.testRun(in, 1, nil)
if err != nil {
return ret, nil, xerrors.Errorf("can't test program: %w", err)
return ret, nil, fmt.Errorf("can't test program: %w", err)
}
return ret, out, nil
}
@ -355,7 +354,7 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) {
func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
ret, _, total, err := p.testRun(in, repeat, reset)
if err != nil {
return ret, total, xerrors.Errorf("can't benchmark program: %w", err)
return ret, total, fmt.Errorf("can't benchmark program: %w", err)
}
return ret, total, nil
}
@ -387,7 +386,7 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() (
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return !xerrors.Is(err, unix.EINVAL), nil
return !errors.Is(err, unix.EINVAL), nil
})
func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) {
@ -434,14 +433,14 @@ func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte,
break
}
if xerrors.Is(err, unix.EINTR) {
if errors.Is(err, unix.EINTR) {
if reset != nil {
reset()
}
continue
}
return 0, nil, 0, xerrors.Errorf("can't run test: %w", err)
return 0, nil, 0, fmt.Errorf("can't run test: %w", err)
}
if int(attr.dataSizeOut) > cap(out) {
@ -457,7 +456,7 @@ func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte,
func unmarshalProgram(buf []byte) (*Program, error) {
if len(buf) != 4 {
return nil, xerrors.New("program id requires 4 byte value")
return nil, errors.New("program id requires 4 byte value")
}
// Looking up an entry in a nested map or prog array returns an id,
@ -483,7 +482,7 @@ func (p *Program) MarshalBinary() ([]byte, error) {
// Deprecated: use link.RawAttachProgram instead.
func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
if fd < 0 {
return xerrors.New("invalid fd")
return errors.New("invalid fd")
}
pfd, err := p.fd.Value()
@ -506,11 +505,11 @@ func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
// Deprecated: use link.RawDetachProgram instead.
func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error {
if fd < 0 {
return xerrors.New("invalid fd")
return errors.New("invalid fd")
}
if flags != 0 {
return xerrors.New("flags must be zero")
return errors.New("flags must be zero")
}
pfd, err := p.fd.Value()
@ -539,7 +538,7 @@ func LoadPinnedProgram(fileName string) (*Program, error) {
name, abi, err := newProgramABIFromFd(fd)
if err != nil {
_ = fd.Close()
return nil, xerrors.Errorf("can't get ABI for %s: %w", fileName, err)
return nil, fmt.Errorf("can't get ABI for %s: %w", fileName, err)
}
return newProgram(fd, name, abi), nil
@ -599,7 +598,7 @@ func (p *Program) ID() (ProgramID, error) {
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
kernel, err := btf.LoadKernelSpec()
if err != nil {
return nil, xerrors.Errorf("can't resolve BTF type %s: %w", name, err)
return nil, fmt.Errorf("can't resolve BTF type %s: %w", name, err)
}
type match struct {
@ -612,7 +611,7 @@ func resolveBTFType(name string, progType ProgramType, attachType AttachType) (b
case match{Tracing, AttachTraceIter}:
var target btf.Func
if err := kernel.FindType("bpf_iter_"+name, &target); err != nil {
return nil, xerrors.Errorf("can't resolve BTF for iterator %s: %w", name, err)
return nil, fmt.Errorf("can't resolve BTF for iterator %s: %w", name, err)
}
return &target, nil

View File

@ -15,8 +15,13 @@ if [[ "${1:-}" = "--in-vm" ]]; then
export GOPROXY=file:///run/go-root/pkg/mod/cache/download
export GOCACHE=/run/go-cache
elfs=""
if [[ -d "/run/input/bpf" ]]; then
elfs="/run/input/bpf"
fi
echo Running tests...
/usr/local/bin/go test -coverprofile="$1/coverage.txt" -covermode=atomic -v ./...
/usr/local/bin/go test -coverprofile="$1/coverage.txt" -covermode=atomic -v -elfs "$elfs" ./...
touch "$1/success"
exit 0
fi
@ -39,20 +44,34 @@ if [[ -z "${kernel_version}" ]]; then
fi
readonly kernel="linux-${kernel_version}.bz"
readonly selftests="linux-${kernel_version}-selftests-bpf.bz"
readonly input="$(mktemp -d)"
readonly output="$(mktemp -d)"
readonly tmp_dir="${TMPDIR:-$(mktemp -d)}"
readonly tmp_dir="${TMPDIR:-/tmp}"
readonly branch="${BRANCH:-master}"
test -e "${tmp_dir}/${kernel}" || {
echo Fetching "${kernel}"
curl --fail -L "https://github.com/cilium/ci-kernels/blob/master/${kernel}?raw=true" -o "${tmp_dir}/${kernel}"
fetch() {
echo Fetching "${1}"
wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}"
}
fetch "${kernel}"
if fetch "${selftests}"; then
mkdir "${input}/bpf"
tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf"
else
echo "No selftests found, disabling"
fi
echo Testing on "${kernel_version}"
$sudo virtme-run --kimg "${tmp_dir}/${kernel}" --memory 512M --pwd \
--rwdir=/run/input="${input}" \
--rwdir=/run/output="${output}" \
--rodir=/run/go-path="$(go env GOPATH)" \
--rwdir=/run/go-cache="$(go env GOCACHE)" \
--script-sh "$(realpath "$0") --in-vm /run/output"
--script-sh "$(realpath "$0") --in-vm /run/output" \
--qemu-opts -smp 2 # need at least two CPUs for some tests
if [[ ! -e "${output}/success" ]]; then
echo "Test failed on ${kernel_version}"
@ -66,4 +85,5 @@ else
fi
fi
$sudo rm -r "${input}"
$sudo rm -r "${output}"

View File

@ -1,19 +1,19 @@
package ebpf
import (
"errors"
"fmt"
"os"
"unsafe"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix"
"golang.org/x/xerrors"
)
// Generic errors returned by BPF syscalls.
var (
ErrNotExist = xerrors.New("requested object does not exist")
ErrNotExist = errors.New("requested object does not exist")
)
// bpfObjName is a null-terminated string made up of
@ -174,8 +174,8 @@ func bpfProgTestRun(attr *bpfProgTestRunAttr) error {
func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if xerrors.Is(err, os.ErrPermission) {
return nil, xerrors.New("permission denied or insufficient rlimit to lock memory for map")
if errors.Is(err, os.ErrPermission) {
return nil, errors.New("permission denied or insufficient rlimit to lock memory for map")
}
if err != nil {
@ -317,11 +317,11 @@ func wrapObjError(err error) error {
if err == nil {
return nil
}
if xerrors.Is(err, unix.ENOENT) {
return xerrors.Errorf("%w", ErrNotExist)
if errors.Is(err, unix.ENOENT) {
return fmt.Errorf("%w", ErrNotExist)
}
return xerrors.New(err.Error())
return errors.New(err.Error())
}
func wrapMapError(err error) error {
@ -329,15 +329,15 @@ func wrapMapError(err error) error {
return nil
}
if xerrors.Is(err, unix.ENOENT) {
if errors.Is(err, unix.ENOENT) {
return ErrKeyNotExist
}
if xerrors.Is(err, unix.EEXIST) {
if errors.Is(err, unix.EEXIST) {
return ErrKeyExist
}
return xerrors.New(err.Error())
return errors.New(err.Error())
}
func bpfMapFreeze(m *internal.FD) error {
@ -367,7 +367,7 @@ func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) er
}
_, err = internal.BPF(internal.BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return xerrors.Errorf("fd %d: %w", fd, err)
return fmt.Errorf("fd %d: %w", fd, err)
}
return nil
}
@ -375,7 +375,7 @@ func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) er
func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
var info bpfProgInfo
if err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
return nil, xerrors.Errorf("can't get program info: %w", err)
return nil, fmt.Errorf("can't get program info: %w", err)
}
return &info, nil
}
@ -384,7 +384,7 @@ func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
var info bpfMapInfo
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
if err != nil {
return nil, xerrors.Errorf("can't get map info: %w", err)
return nil, fmt.Errorf("can't get map info: %w", err)
}
return &info, nil
}

View File

@ -23,7 +23,7 @@ import (
"strings"
"sync"
"github.com/godbus/dbus"
"github.com/godbus/dbus/v5"
)
const (

View File

@ -20,7 +20,7 @@ import (
"path"
"strconv"
"github.com/godbus/dbus"
"github.com/godbus/dbus/v5"
)
func (c *Conn) jobComplete(signal *dbus.Signal) {

View File

@ -15,7 +15,7 @@
package dbus
import (
"github.com/godbus/dbus"
"github.com/godbus/dbus/v5"
)
// From the systemd docs:
@ -56,7 +56,7 @@ type execStart struct {
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
func PropExecStart(command []string, uncleanIsFailure bool) Property {
execStarts := []execStart{
execStart{
{
Path: command[0],
Args: command,
UncleanIsFailure: uncleanIsFailure,

View File

@ -19,7 +19,7 @@ import (
"log"
"time"
"github.com/godbus/dbus"
"github.com/godbus/dbus/v5"
)
const (

View File

@ -1,46 +0,0 @@
dist: precise
language: go
go_import_path: github.com/godbus/dbus
sudo: true
go:
- 1.7.3
- 1.8.7
- 1.9.5
- 1.10.1
- tip
env:
global:
matrix:
- TARGET=amd64
- TARGET=arm64
- TARGET=arm
- TARGET=386
- TARGET=ppc64le
matrix:
fast_finish: true
allow_failures:
- go: tip
exclude:
- go: tip
env: TARGET=arm
- go: tip
env: TARGET=arm64
- go: tip
env: TARGET=386
- go: tip
env: TARGET=ppc64le
addons:
apt:
packages:
- dbus
- dbus-x11
before_install:
script:
- go test -v -race ./... # Run all the tests with the race detector enabled
- go vet ./... # go vet is the official Go static analyzer

View File

@ -1 +0,0 @@
module github.com/godbus/dbus

50
vendor/github.com/godbus/dbus/v5/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,50 @@
dist: bionic
language: go
go_import_path: github.com/godbus/dbus
go:
- 1.11.x
- 1.12.x
- 1.13.x
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
addons:
apt:
packages:
- dbus
- dbus-x11
before_install:
- export GO111MODULE=on
script:
- go test -v -race -mod=readonly ./... # Run all the tests with the race detector enabled
- go vet ./... # go vet is the official Go static analyzer
jobs:
include:
# The build matrix doesn't cover build stages, so manually expand
# the jobs with anchors
- &multiarch
stage: "Multiarch Test"
go: 1.11.x
env: TARGETS="386 arm arm64 ppc64le"
before_install:
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
script:
- |
set -e
for target in $TARGETS; do
printf "\e[1mRunning test suite under ${target}.\e[0m\n"
GOARCH="$target" go test -v ./...
printf "\n\n"
done
- <<: *multiarch
go: 1.12.x
- <<: *multiarch
go: 1.13.x

View File

@ -77,7 +77,7 @@ func (conn *Conn) Auth(methods []Auth) error {
for _, m := range methods {
if name, data, status := m.FirstData(); bytes.Equal(v, name) {
var ok bool
err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
err = authWriteLine(conn.transport, []byte("AUTH"), v, data)
if err != nil {
return err
}
@ -127,7 +127,7 @@ func (conn *Conn) Auth(methods []Auth) error {
// tryAuth tries to authenticate with m as the mechanism, using state as the
// initial authState and in for reading input. It returns (nil, true) on
// success, (nil, false) on a REJECTED and (someErr, false) if some other
// error occured.
// error occurred.
func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
for {
s, err := authReadLine(in)

View File

@ -60,7 +60,7 @@ func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
// getCookie searches for the cookie identified by id in context and returns
// the cookie content or nil. (Since HandleData can't return a specific error,
// but only whether an error occured, this function also doesn't bother to
// but only whether an error occurred, this function also doesn't bother to
// return an error.)
func (a authCookieSha1) getCookie(context, id []byte) []byte {
file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))

View File

@ -5,7 +5,6 @@ import (
"errors"
"io"
"os"
"reflect"
"strings"
"sync"
)
@ -31,6 +30,12 @@ var ErrClosed = errors.New("dbus: connection closed by user")
type Conn struct {
transport
ctx context.Context
cancelCtx context.CancelFunc
closeOnce sync.Once
closeErr error
busObj BusObject
unixFD bool
uuid string
@ -38,6 +43,8 @@ type Conn struct {
handler Handler
signalHandler SignalHandler
serialGen SerialGenerator
inInt Interceptor
outInt Interceptor
names *nameTracker
calls *callTracker
@ -134,6 +141,8 @@ func SystemBus() (conn *Conn, err error) {
}
// SystemBusPrivate returns a new private connection to the system bus.
// Note: this connection is not ready to use. One must perform Auth and Hello
// on the connection before it is useable.
func SystemBusPrivate(opts ...ConnOption) (*Conn, error) {
return Dial(getSystemBusPlatformAddress(), opts...)
}
@ -188,6 +197,33 @@ func WithSerialGenerator(gen SerialGenerator) ConnOption {
}
}
// Interceptor intercepts incoming and outgoing messages.
type Interceptor func(msg *Message)
// WithIncomingInterceptor sets the given interceptor for incoming messages.
func WithIncomingInterceptor(interceptor Interceptor) ConnOption {
return func(conn *Conn) error {
conn.inInt = interceptor
return nil
}
}
// WithOutgoingInterceptor sets the given interceptor for outgoing messages.
func WithOutgoingInterceptor(interceptor Interceptor) ConnOption {
return func(conn *Conn) error {
conn.outInt = interceptor
return nil
}
}
// WithContext overrides the default context for the connection.
func WithContext(ctx context.Context) ConnOption {
return func(conn *Conn) error {
conn.ctx = ctx
return nil
}
}
// NewConn creates a new private *Conn from an already established connection.
func NewConn(conn io.ReadWriteCloser, opts ...ConnOption) (*Conn, error) {
return newConn(genericTransport{conn}, opts...)
@ -209,6 +245,15 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
return nil, err
}
}
if conn.ctx == nil {
conn.ctx = context.Background()
}
conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx)
go func() {
<-conn.ctx.Done()
conn.Close()
}()
conn.calls = newCallTracker()
if conn.handler == nil {
conn.handler = NewDefaultHandler()
@ -235,27 +280,38 @@ func (conn *Conn) BusObject() BusObject {
// and the channels passed to Eavesdrop and Signal are closed. This method must
// not be called on shared connections.
func (conn *Conn) Close() error {
conn.outHandler.close()
if term, ok := conn.signalHandler.(Terminator); ok {
term.Terminate()
}
conn.closeOnce.Do(func() {
conn.outHandler.close()
if term, ok := conn.signalHandler.(Terminator); ok {
term.Terminate()
}
if term, ok := conn.handler.(Terminator); ok {
term.Terminate()
}
if term, ok := conn.handler.(Terminator); ok {
term.Terminate()
}
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
close(conn.eavesdropped)
}
conn.eavesdroppedLck.Unlock()
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
close(conn.eavesdropped)
}
conn.eavesdroppedLck.Unlock()
return conn.transport.Close()
conn.cancelCtx()
conn.closeErr = conn.transport.Close()
})
return conn.closeErr
}
// Context returns the context associated with the connection. The
// context will be cancelled when the connection is closed.
func (conn *Conn) Context() context.Context {
return conn.ctx
}
// Eavesdrop causes conn to send all incoming messages to the given channel
// without further processing. Method replies, errors and signals will not be
// sent to the appropiate channels and method calls will not be handled. If nil
// sent to the appropriate channels and method calls will not be handled. If nil
// is passed, the normal behaviour is restored.
//
// The caller has to make sure that ch is sufficiently buffered;
@ -267,7 +323,7 @@ func (conn *Conn) Eavesdrop(ch chan<- *Message) {
conn.eavesdroppedLck.Unlock()
}
// GetSerial returns an unused serial.
// getSerial returns an unused serial.
func (conn *Conn) getSerial() uint32 {
return conn.serialGen.GetSerial()
}
@ -292,7 +348,7 @@ func (conn *Conn) inWorker() {
msg, err := conn.ReadMessage()
if err != nil {
if _, ok := err.(InvalidMessageError); !ok {
// Some read error occured (usually EOF); we can't really do
// Some read error occurred (usually EOF); we can't really do
// anything but to shut down all stuff and returns errors to all
// pending replies.
conn.Close()
@ -321,6 +377,10 @@ func (conn *Conn) inWorker() {
// Ignore it.
continue
}
if conn.inInt != nil {
conn.inInt(msg)
}
switch msg.Type {
case TypeError:
conn.serialGen.RetireSerial(conn.calls.handleDBusError(msg))
@ -381,13 +441,10 @@ func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
return &Object{conn, dest, path}
}
// outWorker runs in an own goroutine, encoding and sending messages that are
// sent to conn.out.
func (conn *Conn) sendMessage(msg *Message) {
conn.sendMessageAndIfClosed(msg, func() {})
}
func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) {
if conn.outInt != nil {
conn.outInt(msg)
}
err := conn.outHandler.sendAndIfClosed(msg, ifClosed)
conn.calls.handleSendError(msg, err)
if err != nil {
@ -483,7 +540,7 @@ func (conn *Conn) sendError(err error, dest string, serial uint32) {
if len(e.Body) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
}
conn.sendMessage(msg)
conn.sendMessageAndIfClosed(msg, nil)
}
// sendReply creates a method reply message corresponding to the parameters and
@ -501,33 +558,54 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
conn.sendMessage(msg)
conn.sendMessageAndIfClosed(msg, nil)
}
func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) {
if !isDefaultSignalHandler(conn.signalHandler) {
return
}
handler := conn.signalHandler.(*defaultSignalHandler)
fn(handler, ch)
// AddMatchSignal registers the given match rule to receive broadcast
// signals based on their contents.
func (conn *Conn) AddMatchSignal(options ...MatchOption) error {
options = append([]MatchOption{withMatchType("signal")}, options...)
return conn.busObj.Call(
"org.freedesktop.DBus.AddMatch", 0,
formatMatchOptions(options),
).Store()
}
// RemoveMatchSignal removes the first rule that matches previously registered with AddMatchSignal.
func (conn *Conn) RemoveMatchSignal(options ...MatchOption) error {
options = append([]MatchOption{withMatchType("signal")}, options...)
return conn.busObj.Call(
"org.freedesktop.DBus.RemoveMatch", 0,
formatMatchOptions(options),
).Store()
}
// Signal registers the given channel to be passed all received signal messages.
// The caller has to make sure that ch is sufficiently buffered; if a message
// arrives when a write to c is not possible, it is discarded.
//
// Multiple of these channels can be registered at the same time.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
// none of the channels passed to Signal will receive any signals.
//
// Panics if the signal handler is not a `SignalRegistrar`.
func (conn *Conn) Signal(ch chan<- *Signal) {
conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch)
handler, ok := conn.signalHandler.(SignalRegistrar)
if !ok {
panic("cannot use this method with a non SignalRegistrar handler")
}
handler.AddSignal(ch)
}
// RemoveSignal removes the given channel from the list of the registered channels.
//
// Panics if the signal handler is not a `SignalRegistrar`.
func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch)
handler, ok := conn.signalHandler.(SignalRegistrar)
if !ok {
panic("cannot use this method with a non SignalRegistrar handler")
}
handler.RemoveSignal(ch)
}
// SupportsUnixFDs returns whether the underlying transport supports passing of
@ -614,18 +692,6 @@ func getTransport(address string) (transport, error) {
return nil, err
}
// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
// of arbitrary types, containes the values that are obtained from dereferencing
// all elements in vs.
func dereferenceAll(vs []interface{}) []interface{} {
for i := range vs {
v := reflect.ValueOf(vs[i])
v = v.Elem()
vs[i] = v.Interface()
}
return vs
}
// getKey gets a key from a the list of keys. Returns "" on error / not found...
func getKey(s, key string) string {
for _, keyEqualsValue := range strings.Split(s, ",") {
@ -650,7 +716,9 @@ func (h *outputHandler) sendAndIfClosed(msg *Message, ifClosed func()) error {
h.closed.lck.RLock()
defer h.closed.lck.RUnlock()
if h.closed.isClosed {
ifClosed()
if ifClosed != nil {
ifClosed()
}
return nil
}
h.sendLck.Lock()
@ -801,7 +869,6 @@ func (tracker *callTracker) finalize(sn uint32) {
delete(tracker.calls, sn)
c.ContextCancel()
}
return
}
func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) {
@ -815,7 +882,6 @@ func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) {
c.Body = body
c.done()
}
return
}
func (tracker *callTracker) finalizeWithError(sn uint32, err error) {
@ -829,7 +895,6 @@ func (tracker *callTracker) finalizeWithError(sn uint32, err error) {
c.Err = err
c.done()
}
return
}
func (tracker *callTracker) finalizeAllWithError(err error) {

View File

@ -14,8 +14,10 @@ import (
"strings"
)
var execCommand = exec.Command
func getSessionBusPlatformAddress() (string, error) {
cmd := exec.Command("dbus-launch")
cmd := execCommand("dbus-launch")
b, err := cmd.CombinedOutput()
if err != nil {
@ -25,7 +27,7 @@ func getSessionBusPlatformAddress() (string, error) {
i := bytes.IndexByte(b, '=')
j := bytes.IndexByte(b, '\n')
if i == -1 || j == -1 {
if i == -1 || j == -1 || i > j {
return "", errors.New("dbus: couldn't determine address of session bus")
}

View File

@ -4,7 +4,6 @@ package dbus
import (
"os"
"fmt"
)
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
@ -12,7 +11,7 @@ const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
func getSystemBusPlatformAddress() string {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" {
return fmt.Sprintf("unix:path=%s", address)
return address
}
return defaultSystemBusAddress
}
}

View File

@ -87,6 +87,7 @@ func setDest(dest, src reflect.Value) error {
}
if isVariant(src.Type()) && !isVariant(dest.Type()) {
src = getVariantValue(src)
return store(dest, src)
}
if !src.Type().ConvertibleTo(dest.Type()) {
return fmt.Errorf(

View File

@ -188,8 +188,14 @@ func (dec *decoder) decode(s string, depth int) interface{} {
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
sig := s[1:]
length := dec.decode("u", depth).(uint32)
v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
// capacity can be determined only for fixed-size element types
var capacity int
if s := sigByteSize(sig); s != 0 {
capacity = int(length) / s
}
v := reflect.MakeSlice(reflect.SliceOf(typeFor(sig)), 0, capacity)
// Even for empty arrays, the correct padding must be included
align := alignment(typeFor(s[1:]))
if len(s) > 1 && s[1] == '(' {
@ -227,6 +233,51 @@ func (dec *decoder) decode(s string, depth int) interface{} {
}
}
// sigByteSize tries to calculates size of the given signature in bytes.
//
// It returns zero when it can't, for example when it contains non-fixed size
// types such as strings, maps and arrays that require reading of the transmitted
// data, for that we would need to implement the unread method for Decoder first.
func sigByteSize(sig string) int {
var total int
for offset := 0; offset < len(sig); {
switch sig[offset] {
case 'y':
total += 1
offset += 1
case 'n', 'q':
total += 2
offset += 1
case 'b', 'i', 'u', 'h':
total += 4
offset += 1
case 'x', 't', 'd':
total += 8
offset += 1
case '(':
i := 1
depth := 1
for i < len(sig[offset:]) && depth != 0 {
if sig[offset+i] == '(' {
depth++
} else if sig[offset+i] == ')' {
depth--
}
i++
}
s := sigByteSize(sig[offset+1 : offset+i-1])
if s == 0 {
return 0
}
total += s
offset += i
default:
return 0
}
}
return total
}
// A FormatError is an error in the wire format.
type FormatError string

View File

@ -47,7 +47,7 @@ func (h *defaultHandler) introspectPath(path ObjectPath) string {
subpath := make(map[string]struct{})
var xml bytes.Buffer
xml.WriteString("<node>")
for obj, _ := range h.objects {
for obj := range h.objects {
p := string(path)
if p != "/" {
p += "/"
@ -57,7 +57,7 @@ func (h *defaultHandler) introspectPath(path ObjectPath) string {
subpath[node_name] = struct{}{}
}
}
for s, _ := range subpath {
for s := range subpath {
xml.WriteString("\n\t<node name=\"" + s + "\"/>")
}
xml.WriteString("\n</node>")
@ -234,88 +234,95 @@ func (obj *exportedIntf) isFallbackInterface() bool {
//
// Deprecated: this is the default value, don't use it, it will be unexported.
func NewDefaultSignalHandler() *defaultSignalHandler {
return &defaultSignalHandler{
closeChan: make(chan struct{}),
}
}
func isDefaultSignalHandler(handler SignalHandler) bool {
_, ok := handler.(*defaultSignalHandler)
return ok
return &defaultSignalHandler{}
}
type defaultSignalHandler struct {
sync.RWMutex
closed bool
signals []chan<- *Signal
closeChan chan struct{}
mu sync.RWMutex
closed bool
signals []*signalChannelData
}
func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) {
sh.RLock()
defer sh.RUnlock()
sh.mu.RLock()
defer sh.mu.RUnlock()
if sh.closed {
return
}
for _, ch := range sh.signals {
select {
case ch <- signal:
case <-sh.closeChan:
return
default:
go func() {
select {
case ch <- signal:
case <-sh.closeChan:
return
}
}()
}
for _, scd := range sh.signals {
scd.deliver(signal)
}
}
func (sh *defaultSignalHandler) Init() error {
sh.Lock()
sh.signals = make([]chan<- *Signal, 0)
sh.closeChan = make(chan struct{})
sh.Unlock()
return nil
}
func (sh *defaultSignalHandler) Terminate() {
sh.Lock()
if !sh.closed {
close(sh.closeChan)
}
sh.closed = true
for _, ch := range sh.signals {
close(ch)
}
sh.signals = nil
sh.Unlock()
}
func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) {
sh.Lock()
defer sh.Unlock()
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.closed {
return
}
sh.signals = append(sh.signals, ch)
for _, scd := range sh.signals {
scd.close()
close(scd.ch)
}
sh.closed = true
sh.signals = nil
}
func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) {
sh.Lock()
defer sh.Unlock()
func (sh *defaultSignalHandler) AddSignal(ch chan<- *Signal) {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.closed {
return
}
sh.signals = append(sh.signals, &signalChannelData{
ch: ch,
done: make(chan struct{}),
})
}
func (sh *defaultSignalHandler) RemoveSignal(ch chan<- *Signal) {
sh.mu.Lock()
defer sh.mu.Unlock()
if sh.closed {
return
}
for i := len(sh.signals) - 1; i >= 0; i-- {
if ch == sh.signals[i] {
if ch == sh.signals[i].ch {
sh.signals[i].close()
copy(sh.signals[i:], sh.signals[i+1:])
sh.signals[len(sh.signals)-1] = nil
sh.signals = sh.signals[:len(sh.signals)-1]
}
}
}
type signalChannelData struct {
wg sync.WaitGroup
ch chan<- *Signal
done chan struct{}
}
func (scd *signalChannelData) deliver(signal *Signal) {
select {
case scd.ch <- signal:
case <-scd.done:
return
default:
scd.wg.Add(1)
go scd.deferredDeliver(signal)
}
}
func (scd *signalChannelData) deferredDeliver(signal *Signal) {
select {
case scd.ch <- signal:
case <-scd.done:
}
scd.wg.Done()
}
func (scd *signalChannelData) close() {
close(scd.done)
scd.wg.Wait() // wait until all spawned goroutines return
}

View File

@ -61,7 +61,7 @@ Handling Unix file descriptors deserves special mention. To use them, you should
first check that they are supported on a connection by calling SupportsUnixFDs.
If it returns true, all method of Connection will translate messages containing
UnixFD's to messages that are accompanied by the given file descriptors with the
UnixFD values being substituted by the correct indices. Similarily, the indices
UnixFD values being substituted by the correct indices. Similarly, the indices
of incoming messages are automatically resolved. It shouldn't be necessary to use
UnixFDIndex.

View File

@ -60,7 +60,7 @@ func (enc *encoder) binwrite(v interface{}) {
}
}
// Encode encodes the given values to the underyling reader. All written values
// Encode encodes the given values to the underlying reader. All written values
// are aligned properly as required by the D-Bus spec.
func (enc *encoder) Encode(vs ...interface{}) (err error) {
defer func() {

View File

@ -171,7 +171,7 @@ func (conn *Conn) handleCall(msg *Message) {
}
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
conn.sendMessage(reply)
conn.sendMessageAndIfClosed(reply, nil)
}
}

3
vendor/github.com/godbus/dbus/v5/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/godbus/dbus/v5
go 1.12

0
vendor/github.com/godbus/dbus/v5/go.sum generated vendored Normal file
View File

62
vendor/github.com/godbus/dbus/v5/match.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package dbus
import (
"strings"
)
// MatchOption specifies option for dbus routing match rule. Options can be constructed with WithMatch* helpers.
// For full list of available options consult
// https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
type MatchOption struct {
key string
value string
}
func formatMatchOptions(options []MatchOption) string {
items := make([]string, 0, len(options))
for _, option := range options {
items = append(items, option.key+"='"+option.value+"'")
}
return strings.Join(items, ",")
}
// WithMatchOption creates match option with given key and value
func WithMatchOption(key, value string) MatchOption {
return MatchOption{key, value}
}
// doesn't make sense to export this option because clients can only
// subscribe to messages with signal type.
func withMatchType(typ string) MatchOption {
return WithMatchOption("type", typ)
}
// WithMatchSender sets sender match option.
func WithMatchSender(sender string) MatchOption {
return WithMatchOption("sender", sender)
}
// WithMatchSender sets interface match option.
func WithMatchInterface(iface string) MatchOption {
return WithMatchOption("interface", iface)
}
// WithMatchMember sets member match option.
func WithMatchMember(member string) MatchOption {
return WithMatchOption("member", member)
}
// WithMatchObjectPath creates match option that filters events based on given path
func WithMatchObjectPath(path ObjectPath) MatchOption {
return WithMatchOption("path", string(path))
}
// WithMatchPathNamespace sets path_namespace match option.
func WithMatchPathNamespace(namespace ObjectPath) MatchOption {
return WithMatchOption("path_namespace", string(namespace))
}
// WithMatchDestination sets destination match option.
func WithMatchDestination(destination string) MatchOption {
return WithMatchOption("destination", destination)
}

View File

@ -16,6 +16,7 @@ type BusObject interface {
AddMatchSignal(iface, member string, options ...MatchOption) *Call
RemoveMatchSignal(iface, member string, options ...MatchOption) *Call
GetProperty(p string) (Variant, error)
SetProperty(p string, v interface{}) error
Destination() string
Path() ObjectPath
}
@ -37,41 +38,16 @@ func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags
return <-o.createCall(ctx, method, flags, make(chan *Call, 1), args...).Done
}
// MatchOption specifies option for dbus routing match rule. Options can be constructed with WithMatch* helpers.
// For full list of available options consult
// https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
type MatchOption struct {
key string
value string
}
// WithMatchOption creates match option with given key and value
func WithMatchOption(key, value string) MatchOption {
return MatchOption{key, value}
}
// WithMatchObjectPath creates match option that filters events based on given path
func WithMatchObjectPath(path ObjectPath) MatchOption {
return MatchOption{"path", string(path)}
}
func formatMatchOptions(options []MatchOption) string {
items := make([]string, 0, len(options))
for _, option := range options {
items = append(items, option.key+"='"+option.value+"'")
}
return strings.Join(items, ",")
}
// AddMatchSignal subscribes BusObject to signals from specified interface,
// method (member). Additional filter rules can be added via WithMatch* option constructors.
// Note: To filter events by object path you have to specify this path via an option.
//
// Deprecated: use (*Conn) AddMatchSignal instead.
func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *Call {
base := []MatchOption{
{"type", "signal"},
{"interface", iface},
{"member", member},
withMatchType("signal"),
WithMatchInterface(iface),
WithMatchMember(member),
}
options = append(base, options...)
@ -84,11 +60,13 @@ func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *C
// RemoveMatchSignal unsubscribes BusObject from signals from specified interface,
// method (member). Additional filter rules can be added via WithMatch* option constructors
//
// Deprecated: use (*Conn) RemoveMatchSignal instead.
func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) *Call {
base := []MatchOption{
{"type", "signal"},
{"interface", iface},
{"member", member},
withMatchType("signal"),
WithMatchInterface(iface),
WithMatchMember(member),
}
options = append(base, options...)
@ -146,7 +124,7 @@ func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch
}
if msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
ch = make(chan *Call, 10)
ch = make(chan *Call, 1)
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Object).Go")
}
@ -187,7 +165,7 @@ func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch
return call
}
// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
// GetProperty calls org.freedesktop.DBus.Properties.Get on the given
// object. The property name must be given in interface.member notation.
func (o *Object) GetProperty(p string) (Variant, error) {
idx := strings.LastIndex(p, ".")
@ -208,6 +186,20 @@ func (o *Object) GetProperty(p string) (Variant, error) {
return result, nil
}
// SetProperty calls org.freedesktop.DBus.Properties.Set on the given
// object. The property name must be given in interface.member notation.
func (o *Object) SetProperty(p string, v interface{}) error {
idx := strings.LastIndex(p, ".")
if idx == -1 || idx+1 == len(p) {
return errors.New("dbus: invalid property " + p)
}
iface := p[:idx]
prop := p[idx+1:]
return o.Call("org.freedesktop.DBus.Properties.Set", 0, iface, prop, v).Err
}
// Destination returns the destination that calls on (o *Object) are sent to.
func (o *Object) Destination() string {
return o.dest

View File

@ -77,6 +77,14 @@ type SignalHandler interface {
DeliverSignal(iface, name string, signal *Signal)
}
// SignalRegistrar manages signal delivery channels.
//
// This is an optional set of methods for `SignalHandler`.
type SignalRegistrar interface {
AddSignal(ch chan<- *Signal)
RemoveSignal(ch chan<- *Signal)
}
// A DBusError is used to convert a generic object to a D-Bus error.
//
// Any custom error mechanism may implement this interface to provide

View File

@ -1,5 +1,3 @@
//+build !windows
package dbus
import (

View File

@ -203,7 +203,7 @@ func (t *unixTransport) SendMessage(msg *Message) error {
}
} else {
if err := msg.EncodeTo(t, nativeEndian); err != nil {
return nil
return err
}
}
return nil

View File

@ -26,7 +26,7 @@ func MakeVariantWithSignature(v interface{}, s Signature) Variant {
}
// ParseVariant parses the given string as a variant as described at
// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
// https://developer.gnome.org/glib/stable/gvariant-text.html. If sig is not
// empty, it is taken to be the expected signature for the variant.
func ParseVariant(s string, sig Signature) (Variant, error) {
tokens := varLex(s)
@ -129,7 +129,7 @@ func (v Variant) Signature() Signature {
}
// String returns the string representation of the underlying value of v as
// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
// described at https://developer.gnome.org/glib/stable/gvariant-text.html.
func (v Variant) String() string {
s, unamb := v.format()
if !unamb {

View File

@ -51,7 +51,7 @@ func varLex(s string) []varToken {
}
func (l *varLexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
@ -214,17 +214,17 @@ func varLexNumber(l *varLexer) lexState {
digits = "01234567"
}
}
for strings.IndexRune(digits, l.next()) >= 0 {
for strings.ContainsRune(digits, l.next()) {
}
l.backup()
if l.accept(".") {
for strings.IndexRune(digits, l.next()) >= 0 {
for strings.ContainsRune(digits, l.next()) {
}
l.backup()
}
if l.accept("eE") {
l.accept("+-")
for strings.IndexRune("0123456789", l.next()) >= 0 {
for strings.ContainsRune("0123456789", l.next()) {
}
l.backup()
}

View File

@ -14,7 +14,7 @@ const (
NodeStatusDown = "down"
// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
// respectively, for receiving allocations. This is orthoginal to the node
// respectively, for receiving allocations. This is orthogonal to the node
// status being ready.
NodeSchedulingEligible = "eligible"
NodeSchedulingIneligible = "ineligible"
@ -435,6 +435,25 @@ func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error {
return err
}
// Purge removes a node from the system. Nodes can still re-join the cluster if
// they are alive.
func (n *Nodes) Purge(nodeID string, q *QueryOptions) (*NodePurgeResponse, *QueryMeta, error) {
var resp NodePurgeResponse
path := fmt.Sprintf("/v1/node/%s/purge", nodeID)
qm, err := n.client.putQuery(path, nil, &resp, q)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
// NodePurgeResponse is used to deserialize a Purge response.
type NodePurgeResponse struct {
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
}
// DriverInfo is used to deserialize a DriverInfo entry
type DriverInfo struct {
Attributes map[string]string

202
vendor/github.com/moby/sys/mountinfo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
vendor/github.com/moby/sys/mountinfo/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/moby/sys/mountinfo
go 1.14

View File

@ -1,4 +1,31 @@
package mount
package mountinfo
import "io"
// GetMounts retrieves a list of mounts for the current running process,
// with an optional filter applied (use nil for no filter).
func GetMounts(f FilterFunc) ([]*Info, error) {
return parseMountTable(f)
}
// GetMountsFromReader retrieves a list of mounts from the
// reader provided, with an optional filter applied (use nil
// for no filter). This can be useful in tests or benchmarks
// that provide a fake mountinfo data.
func GetMountsFromReader(reader io.Reader, f FilterFunc) ([]*Info, error) {
return parseInfoFile(reader, f)
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo.
func Mounted(mountpoint string) (bool, error) {
entries, err := GetMounts(SingleEntryFilter(mountpoint))
if err != nil {
return false, err
}
return len(entries) > 0, nil
}
// Info reveals information about a particular mounted filesystem. This
// struct is populated from the content in the /proc/<pid>/mountinfo file.

View File

@ -0,0 +1,58 @@
package mountinfo
import "strings"
// FilterFunc is a type defining a callback function for GetMount(),
// used to filter out mountinfo entries we're not interested in,
// and/or stop further processing if we found what we wanted.
//
// It takes a pointer to the Info struct (not fully populated,
// currently only Mountpoint, Fstype, Source, and (on Linux)
// VfsOpts are filled in), and returns two booleans:
//
// - skip: true if the entry should be skipped
// - stop: true if parsing should be stopped after the entry
type FilterFunc func(*Info) (skip, stop bool)
// PrefixFilter discards all entries whose mount points
// do not start with a specific prefix
func PrefixFilter(prefix string) FilterFunc {
return func(m *Info) (bool, bool) {
skip := !strings.HasPrefix(m.Mountpoint, prefix)
return skip, false
}
}
// SingleEntryFilter looks for a specific entry
func SingleEntryFilter(mp string) FilterFunc {
return func(m *Info) (bool, bool) {
if m.Mountpoint == mp {
return false, true // don't skip, stop now
}
return true, false // skip, keep going
}
}
// ParentsFilter returns all entries whose mount points
// can be parents of a path specified, discarding others.
//
// For example, given `/var/lib/docker/something`, entries
// like `/var/lib/docker`, `/var` and `/` are returned.
func ParentsFilter(path string) FilterFunc {
return func(m *Info) (bool, bool) {
skip := !strings.HasPrefix(path, m.Mountpoint)
return skip, false
}
}
// FstypeFilter returns all entries that match provided fstype(s).
func FstypeFilter(fstype ...string) FilterFunc {
return func(m *Info) (bool, bool) {
for _, t := range fstype {
if m.Fstype == t {
return false, false // don't skeep, keep going
}
}
return true, false // skip, keep going
}
}

View File

@ -0,0 +1,53 @@
package mountinfo
/*
#include <sys/param.h>
#include <sys/ucred.h>
#include <sys/mount.h>
*/
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
var rawEntries *C.struct_statfs
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
if count == 0 {
return nil, fmt.Errorf("Failed to call getmntinfo")
}
var entries []C.struct_statfs
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
header.Cap = count
header.Len = count
header.Data = uintptr(unsafe.Pointer(rawEntries))
var out []*Info
for _, entry := range entries {
var mountinfo Info
var skip, stop bool
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
if filter != nil {
// filter out entries we're not interested in
skip, stop = filter(&mountinfo)
if skip {
continue
}
}
out = append(out, &mountinfo)
if stop {
break
}
}
return out, nil
}

152
vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// +build go1.13
package mountinfo
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
s := bufio.NewScanner(r)
out := []*Info{}
var err error
for s.Scan() {
if err = s.Err(); err != nil {
return nil, err
}
/*
See http://man7.org/linux/man-pages/man5/proc.5.html
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
(1) mount ID: unique identifier of the mount (may be reused after umount)
(2) parent ID: ID of parent (or of self for the top of the mount tree)
(3) major:minor: value of st_dev for files on filesystem
(4) root: root of the mount within the filesystem
(5) mount point: mount point relative to the process's root
(6) mount options: per mount options
(7) optional fields: zero or more fields of the form "tag[:value]"
(8) separator: marks the end of the optional fields
(9) filesystem type: name of filesystem of the form "type[.subtype]"
(10) mount source: filesystem specific information or "none"
(11) super options: per super block options
In other words, we have:
* 6 mandatory fields (1)..(6)
* 0 or more optional fields (7)
* a separator field (8)
* 3 mandatory fields (9)..(11)
*/
text := s.Text()
fields := strings.Split(text, " ")
numFields := len(fields)
if numFields < 10 {
// should be at least 10 fields
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
}
// separator field
sepIdx := numFields - 4
// In Linux <= 3.9 mounting a cifs with spaces in a share
// name (like "//srv/My Docs") _may_ end up having a space
// in the last field of mountinfo (like "unc=//serv/My Docs").
// Since kernel 3.10-rc1, cifs option "unc=" is ignored,
// so spaces should not appear.
//
// Check for a separator, and work around the spaces bug
for fields[sepIdx] != "-" {
sepIdx--
if sepIdx == 5 {
return nil, fmt.Errorf("Parsing '%s' failed: missing - separator", text)
}
}
p := &Info{}
// Fill in the fields that a filter might check
p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
if err != nil {
return nil, fmt.Errorf("Parsing '%s' failed: unable to unquote mount point field: %w", fields[4], err)
}
p.Fstype = fields[sepIdx+1]
p.Source = fields[sepIdx+2]
p.VfsOpts = fields[sepIdx+3]
// Run a filter soon so we can skip parsing/adding entries
// the caller is not interested in
var skip, stop bool
if filter != nil {
skip, stop = filter(p)
if skip {
continue
}
}
// Fill in the rest of the fields
// ignore any numbers parsing errors, as there should not be any
p.ID, _ = strconv.Atoi(fields[0])
p.Parent, _ = strconv.Atoi(fields[1])
mm := strings.Split(fields[2], ":")
if len(mm) != 2 {
return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
}
p.Major, _ = strconv.Atoi(mm[0])
p.Minor, _ = strconv.Atoi(mm[1])
p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
if err != nil {
return nil, fmt.Errorf("Parsing '%s' failed: unable to unquote root field: %w", fields[3], err)
}
p.Opts = fields[5]
// zero or more optional fields
switch {
case sepIdx == 6:
// zero, do nothing
case sepIdx == 7:
p.Optional = fields[6]
default:
p.Optional = strings.Join(fields[6:sepIdx-1], " ")
}
out = append(out, p)
if stop {
break
}
}
return out, nil
}
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
// bind mounts
func parseMountTable(filter FilterFunc) ([]*Info, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f, filter)
}
// PidMountInfo collects the mounts for a specific process ID. If the process
// ID is unknown, it is better to use `GetMounts` which will inspect
// "/proc/self/mountinfo" instead.
func PidMountInfo(pid int) ([]*Info, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f, nil)
}

View File

@ -0,0 +1,17 @@
// +build !windows,!linux,!freebsd freebsd,!cgo
package mountinfo
import (
"fmt"
"io"
"runtime"
)
func parseMountTable(_ FilterFunc) ([]*Info, error) {
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
func parseInfoFile(_ io.Reader, f FilterFunc) ([]*Info, error) {
return parseMountTable(f)
}

View File

@ -0,0 +1,12 @@
package mountinfo
import "io"
func parseMountTable(_ FilterFunc) ([]*Info, error) {
// Do NOT return an error!
return nil, nil
}
func parseInfoFile(_ io.Reader, f FilterFunc) ([]*Info, error) {
return parseMountTable(f)
}

View File

@ -22,9 +22,10 @@ func CopyFile(source string, dest string) error {
uid := int(st.Uid)
gid := int(st.Gid)
modeType := si.Mode()&os.ModeType
// Handle symlinks
if si.Mode()&os.ModeSymlink != 0 {
if modeType == os.ModeSymlink {
target, err := os.Readlink(source)
if err != nil {
return err
@ -35,15 +36,14 @@ func CopyFile(source string, dest string) error {
}
// Handle device files
if st.Mode&syscall.S_IFMT == syscall.S_IFBLK || st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
if modeType == os.ModeDevice {
devMajor := int64(major(uint64(st.Rdev)))
devMinor := int64(minor(uint64(st.Rdev)))
mode := uint32(si.Mode() & 07777)
if st.Mode&syscall.S_IFMT == syscall.S_IFBLK {
mode |= syscall.S_IFBLK
}
if st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
mode := uint32(si.Mode() & os.ModePerm)
if si.Mode()&os.ModeCharDevice != 0 {
mode |= syscall.S_IFCHR
} else {
mode |= syscall.S_IFBLK
}
if err := syscall.Mknod(dest, mode, int(mkdev(devMajor, devMinor))); err != nil {
return err
@ -76,7 +76,7 @@ func CopyFile(source string, dest string) error {
}
// Chmod the file
if !(si.Mode()&os.ModeSymlink == os.ModeSymlink) {
if !(modeType == os.ModeSymlink) {
if err := os.Chmod(dest, si.Mode()); err != nil {
return err
}

View File

@ -3,6 +3,7 @@ package fileutils
import (
"os"
"path/filepath"
"syscall"
)
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
@ -14,9 +15,13 @@ func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
st, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil {
if !st.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
// nothing to do; directory path fully exists already
return nil
}
@ -34,7 +39,7 @@ func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error
}
}
if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
if err := os.MkdirAll(path, mode); err != nil {
return err
}

View File

@ -155,8 +155,7 @@ config := &configs.Config{
Parent: "system",
Resources: &configs.Resources{
MemorySwappiness: nil,
AllowAllDevices: nil,
AllowedDevices: configs.DefaultAllowedDevices,
Devices: specconv.AllowedDevices,
},
},
MaskPaths: []string{
@ -166,7 +165,7 @@ config := &configs.Config{
ReadonlyPaths: []string{
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
},
Devices: configs.DefaultAutoCreatedDevices,
Devices: specconv.AllowedDevices,
Hostname: "testing",
Mounts: []*configs.Mount{
{

View File

@ -3,8 +3,6 @@
package cgroups
import (
"fmt"
"github.com/opencontainers/runc/libcontainer/configs"
)
@ -27,48 +25,27 @@ type Manager interface {
// Destroys the cgroup set
Destroy() error
// The option func SystemdCgroups() and Cgroupfs() require following attributes:
// Paths map[string]string
// Cgroups *configs.Cgroup
// Paths maps cgroup subsystem to path at which it is mounted.
// Cgroups specifies specific cgroup settings for the various subsystems
// Returns cgroup paths to save in a state file and to be able to
// restore the object later.
GetPaths() map[string]string
// GetUnifiedPath returns the unified path when running in unified mode.
// The value corresponds to the all values of GetPaths() map.
//
// GetUnifiedPath returns error when running in hybrid mode as well as
// in legacy mode.
GetUnifiedPath() (string, error)
// Path returns a cgroup path to the specified controller/subsystem.
// For cgroupv2, the argument is unused and can be empty.
Path(string) string
// Sets the cgroup as configured.
Set(container *configs.Config) error
// Gets the cgroup as configured.
// GetPaths returns cgroup path(s) to save in a state file in order to restore later.
//
// For cgroup v1, a key is cgroup subsystem name, and the value is the path
// to the cgroup for this subsystem.
//
// For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
GetPaths() map[string]string
// GetCgroups returns the cgroup data as configured.
GetCgroups() (*configs.Cgroup, error)
}
type NotFoundError struct {
Subsystem string
}
// GetFreezerState retrieves the current FreezerState of the cgroup.
GetFreezerState() (configs.FreezerState, error)
func (e *NotFoundError) Error() string {
return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
}
func NewNotFoundError(sub string) error {
return &NotFoundError{
Subsystem: sub,
}
}
func IsNotFound(err error) bool {
if err == nil {
return false
}
_, ok := err.(*NotFoundError)
return ok
// Whether the cgroup path exists or not
Exists() bool
}

View File

@ -0,0 +1,373 @@
// +build linux
// SPDX-License-Identifier: Apache-2.0
/*
* Copyright (C) 2020 Aleksa Sarai <cyphar@cyphar.com>
* Copyright (C) 2020 SUSE LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package devices
import (
"bufio"
"io"
"regexp"
"sort"
"strconv"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/pkg/errors"
)
// deviceMeta is a DeviceRule without the Allow or Permissions fields, and no
// wildcard-type support. It's effectively the "match" portion of a metadata
// rule, for the purposes of our emulation.
type deviceMeta struct {
node configs.DeviceType
major int64
minor int64
}
// deviceRule is effectively the tuple (deviceMeta, DevicePermissions).
type deviceRule struct {
meta deviceMeta
perms configs.DevicePermissions
}
// deviceRules is a mapping of device metadata rules to the associated
// permissions in the ruleset.
type deviceRules map[deviceMeta]configs.DevicePermissions
func (r deviceRules) orderedEntries() []deviceRule {
var rules []deviceRule
for meta, perms := range r {
rules = append(rules, deviceRule{meta: meta, perms: perms})
}
sort.Slice(rules, func(i, j int) bool {
// Sort by (major, minor, type).
a, b := rules[i].meta, rules[j].meta
return a.major < b.major ||
(a.major == b.major && a.minor < b.minor) ||
(a.major == b.major && a.minor == b.minor && a.node < b.node)
})
return rules
}
type Emulator struct {
defaultAllow bool
rules deviceRules
}
func (e *Emulator) IsBlacklist() bool {
return e.defaultAllow
}
func (e *Emulator) IsAllowAll() bool {
return e.IsBlacklist() && len(e.rules) == 0
}
var devicesListRegexp = regexp.MustCompile(`^([abc])\s+(\d+|\*):(\d+|\*)\s+([rwm]+)$`)
func parseLine(line string) (*deviceRule, error) {
matches := devicesListRegexp.FindStringSubmatch(line)
if matches == nil {
return nil, errors.Errorf("line doesn't match devices.list format")
}
var (
rule deviceRule
node = matches[1]
major = matches[2]
minor = matches[3]
perms = matches[4]
)
// Parse the node type.
switch node {
case "a":
// Super-special case -- "a" always means every device with every
// access mode. In fact, for devices.list this actually indicates that
// the cgroup is in black-list mode.
// TODO: Double-check that the entire file is "a *:* rwm".
return nil, nil
case "b":
rule.meta.node = configs.BlockDevice
case "c":
rule.meta.node = configs.CharDevice
default:
// Should never happen!
return nil, errors.Errorf("unknown device type %q", node)
}
// Parse the major number.
if major == "*" {
rule.meta.major = configs.Wildcard
} else {
val, err := strconv.ParseUint(major, 10, 32)
if err != nil {
return nil, errors.Wrap(err, "parse major number")
}
rule.meta.major = int64(val)
}
// Parse the minor number.
if minor == "*" {
rule.meta.minor = configs.Wildcard
} else {
val, err := strconv.ParseUint(minor, 10, 32)
if err != nil {
return nil, errors.Wrap(err, "parse minor number")
}
rule.meta.minor = int64(val)
}
// Parse the access permissions.
rule.perms = configs.DevicePermissions(perms)
if !rule.perms.IsValid() || rule.perms.IsEmpty() {
// Should never happen!
return nil, errors.Errorf("parse access mode: contained unknown modes or is empty: %q", perms)
}
return &rule, nil
}
func (e *Emulator) addRule(rule deviceRule) error {
if e.rules == nil {
e.rules = make(map[deviceMeta]configs.DevicePermissions)
}
// Merge with any pre-existing permissions.
oldPerms := e.rules[rule.meta]
newPerms := rule.perms.Union(oldPerms)
e.rules[rule.meta] = newPerms
return nil
}
func (e *Emulator) rmRule(rule deviceRule) error {
// Give an error if any of the permissions requested to be removed are
// present in a partially-matching wildcard rule, because such rules will
// be ignored by cgroupv1.
//
// This is a diversion from cgroupv1, but is necessary to avoid leading
// users into a false sense of security. cgroupv1 will silently(!) ignore
// requests to remove partial exceptions, but we really shouldn't do that.
//
// It may seem like we could just "split" wildcard rules which hit this
// issue, but unfortunately there are 2^32 possible major and minor
// numbers, which would exhaust kernel memory quickly if we did this. Not
// to mention it'd be really slow (the kernel side is implemented as a
// linked-list of exceptions).
for _, partialMeta := range []deviceMeta{
{node: rule.meta.node, major: configs.Wildcard, minor: rule.meta.minor},
{node: rule.meta.node, major: rule.meta.major, minor: configs.Wildcard},
{node: rule.meta.node, major: configs.Wildcard, minor: configs.Wildcard},
} {
// This wildcard rule is equivalent to the requested rule, so skip it.
if rule.meta == partialMeta {
continue
}
// Only give an error if the set of permissions overlap.
partialPerms := e.rules[partialMeta]
if !partialPerms.Intersection(rule.perms).IsEmpty() {
return errors.Errorf("requested rule [%v %v] not supported by devices cgroupv1 (cannot punch hole in existing wildcard rule [%v %v])", rule.meta, rule.perms, partialMeta, partialPerms)
}
}
// Subtract all of the permissions listed from the full match rule. If the
// rule didn't exist, all of this is a no-op.
newPerms := e.rules[rule.meta].Difference(rule.perms)
if newPerms.IsEmpty() {
delete(e.rules, rule.meta)
} else {
e.rules[rule.meta] = newPerms
}
// TODO: The actual cgroup code doesn't care if an exception didn't exist
// during removal, so not erroring out here is /accurate/ but quite
// worrying. Maybe we should do additional validation, but again we
// have to worry about backwards-compatibility.
return nil
}
func (e *Emulator) allow(rule *deviceRule) error {
// This cgroup is configured as a black-list. Reset the entire emulator,
// and put is into black-list mode.
if rule == nil || rule.meta.node == configs.WildcardDevice {
*e = Emulator{
defaultAllow: true,
rules: nil,
}
return nil
}
var err error
if e.defaultAllow {
err = errors.Wrap(e.rmRule(*rule), "remove 'deny' exception")
} else {
err = errors.Wrap(e.addRule(*rule), "add 'allow' exception")
}
return err
}
func (e *Emulator) deny(rule *deviceRule) error {
// This cgroup is configured as a white-list. Reset the entire emulator,
// and put is into white-list mode.
if rule == nil || rule.meta.node == configs.WildcardDevice {
*e = Emulator{
defaultAllow: false,
rules: nil,
}
return nil
}
var err error
if e.defaultAllow {
err = errors.Wrap(e.addRule(*rule), "add 'deny' exception")
} else {
err = errors.Wrap(e.rmRule(*rule), "remove 'allow' exception")
}
return err
}
func (e *Emulator) Apply(rule configs.DeviceRule) error {
if !rule.Type.CanCgroup() {
return errors.Errorf("cannot add rule [%#v] with non-cgroup type %q", rule, rule.Type)
}
innerRule := &deviceRule{
meta: deviceMeta{
node: rule.Type,
major: rule.Major,
minor: rule.Minor,
},
perms: rule.Permissions,
}
if innerRule.meta.node == configs.WildcardDevice {
innerRule = nil
}
if rule.Allow {
return e.allow(innerRule)
} else {
return e.deny(innerRule)
}
}
// EmulatorFromList takes a reader to a "devices.list"-like source, and returns
// a new Emulator that represents the state of the devices cgroup. Note that
// black-list devices cgroups cannot be fully reconstructed, due to limitations
// in the devices cgroup API. Instead, such cgroups are always treated as
// "allow all" cgroups.
func EmulatorFromList(list io.Reader) (*Emulator, error) {
// Normally cgroups are in black-list mode by default, but the way we
// figure out the current mode is whether or not devices.list has an
// allow-all rule. So we default to a white-list, and the existence of an
// "a *:* rwm" entry will tell us otherwise.
e := &Emulator{
defaultAllow: false,
}
// Parse the "devices.list".
s := bufio.NewScanner(list)
for s.Scan() {
line := s.Text()
deviceRule, err := parseLine(line)
if err != nil {
return nil, errors.Wrapf(err, "parsing line %q", line)
}
// "devices.list" is an allow list. Note that this means that in
// black-list mode, we have no idea what rules are in play. As a
// result, we need to be very careful in Transition().
if err := e.allow(deviceRule); err != nil {
return nil, errors.Wrapf(err, "adding devices.list rule")
}
}
if err := s.Err(); err != nil {
return nil, errors.Wrap(err, "reading devices.list lines")
}
return e, nil
}
// Transition calculates what is the minimally-disruptive set of rules need to
// be applied to a devices cgroup in order to transition to the given target.
// This means that any already-existing rules will not be applied, and
// disruptive rules (like denying all device access) will only be applied if
// necessary.
//
// This function is the sole reason for all of Emulator -- to allow us
// to figure out how to update a containers' cgroups without causing spurrious
// device errors (if possible).
func (source *Emulator) Transition(target *Emulator) ([]*configs.DeviceRule, error) {
var transitionRules []*configs.DeviceRule
oldRules := source.rules
// If the default policy doesn't match, we need to include a "disruptive"
// rule (either allow-all or deny-all) in order to switch the cgroup to the
// correct default policy.
//
// However, due to a limitation in "devices.list" we cannot be sure what
// deny rules are in place in a black-list cgroup. Thus if the source is a
// black-list we also have to include a disruptive rule.
if source.IsBlacklist() || source.defaultAllow != target.defaultAllow {
transitionRules = append(transitionRules, &configs.DeviceRule{
Type: 'a',
Major: -1,
Minor: -1,
Permissions: configs.DevicePermissions("rwm"),
Allow: target.defaultAllow,
})
// The old rules are only relevant if we aren't starting out with a
// disruptive rule.
oldRules = nil
}
// NOTE: We traverse through the rules in a sorted order so we always write
// the same set of rules (this is to aid testing).
// First, we create inverse rules for any old rules not in the new set.
// This includes partial-inverse rules for specific permissions. This is a
// no-op if we added a disruptive rule, since oldRules will be empty.
for _, rule := range oldRules.orderedEntries() {
meta, oldPerms := rule.meta, rule.perms
newPerms := target.rules[meta]
droppedPerms := oldPerms.Difference(newPerms)
if !droppedPerms.IsEmpty() {
transitionRules = append(transitionRules, &configs.DeviceRule{
Type: meta.node,
Major: meta.major,
Minor: meta.minor,
Permissions: droppedPerms,
Allow: target.defaultAllow,
})
}
}
// Add any additional rules which weren't in the old set. We happen to
// filter out rules which are present in both sets, though this isn't
// strictly necessary.
for _, rule := range target.rules.orderedEntries() {
meta, newPerms := rule.meta, rule.perms
oldPerms := oldRules[meta]
gainedPerms := newPerms.Difference(oldPerms)
if !gainedPerms.IsEmpty() {
transitionRules = append(transitionRules, &configs.DeviceRule{
Type: meta.node,
Major: meta.major,
Minor: meta.minor,
Permissions: gainedPerms,
Allow: !target.defaultAllow,
})
}
}
return transitionRules, nil
}

Some files were not shown because too many files have changed in this diff Show More