remove all opencontainers/runc

Signed-off-by: Yoan Blanc <yoan@dosimple.ch>
This commit is contained in:
Yoan Blanc 2020-03-21 15:33:30 +01:00
parent bdf8820bce
commit 7c2859ec95
No known key found for this signature in database
GPG Key ID: 6058CF4574298812
206 changed files with 0 additions and 33302 deletions

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,213 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
package dbus
import (
"fmt"
"os"
"strconv"
"strings"
"sync"
"github.com/godbus/dbus"
)
const (
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
num = `0123456789`
alphanum = alpha + num
signalBuffer = 100
)
// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
func needsEscape(i int, b byte) bool {
// Escape everything that is not a-z-A-Z-0-9
// Also escape 0-9 if it's the first character
return strings.IndexByte(alphanum, b) == -1 ||
(i == 0 && strings.IndexByte(num, b) != -1)
}
// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
// rules that systemd uses for serializing special characters.
func PathBusEscape(path string) string {
// Special case the empty string
if len(path) == 0 {
return "_"
}
n := []byte{}
for i := 0; i < len(path); i++ {
c := path[i]
if needsEscape(i, c) {
e := fmt.Sprintf("_%x", c)
n = append(n, []byte(e)...)
} else {
n = append(n, c)
}
}
return string(n)
}
// Conn is a connection to systemd's dbus endpoint.
type Conn struct {
// sysconn/sysobj are only used to call dbus methods
sysconn *dbus.Conn
sysobj dbus.BusObject
// sigconn/sigobj are only used to receive dbus signals
sigconn *dbus.Conn
sigobj dbus.BusObject
jobListener struct {
jobs map[dbus.ObjectPath]chan<- string
sync.Mutex
}
subscriber struct {
updateCh chan<- *SubStateUpdate
errCh chan<- error
sync.Mutex
ignore map[dbus.ObjectPath]int64
cleanIgnore int64
}
}
// New establishes a connection to any available bus and authenticates.
// Callers should call Close() when done with the connection.
func New() (*Conn, error) {
conn, err := NewSystemConnection()
if err != nil && os.Geteuid() == 0 {
return NewSystemdConnection()
}
return conn, err
}
// NewSystemConnection establishes a connection to the system bus and authenticates.
// Callers should call Close() when done with the connection
func NewSystemConnection() (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(dbus.SystemBusPrivate)
})
}
// NewUserConnection establishes a connection to the session bus and
// authenticates. This can be used to connect to systemd user instances.
// Callers should call Close() when done with the connection.
func NewUserConnection() (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(dbus.SessionBusPrivate)
})
}
// NewSystemdConnection establishes a private, direct connection to systemd.
// This can be used for communicating with systemd without a dbus daemon.
// Callers should call Close() when done with the connection.
func NewSystemdConnection() (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
// We skip Hello when talking directly to systemd.
return dbusAuthConnection(func() (*dbus.Conn, error) {
return dbus.Dial("unix:path=/run/systemd/private")
})
})
}
// Close closes an established connection
func (c *Conn) Close() {
c.sysconn.Close()
c.sigconn.Close()
}
// NewConnection establishes a connection to a bus using a caller-supplied function.
// This allows connecting to remote buses through a user-supplied mechanism.
// The supplied function may be called multiple times, and should return independent connections.
// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded,
// and any authentication should be handled by the function.
func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
sysconn, err := dialBus()
if err != nil {
return nil, err
}
sigconn, err := dialBus()
if err != nil {
sysconn.Close()
return nil, err
}
c := &Conn{
sysconn: sysconn,
sysobj: systemdObject(sysconn),
sigconn: sigconn,
sigobj: systemdObject(sigconn),
}
c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
// Setup the listeners on jobs so that we can get completions
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
c.dispatch()
return c, nil
}
// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
// interface. The value is returned in its string representation, as defined at
// https://developer.gnome.org/glib/unstable/gvariant-text.html
func (c *Conn) GetManagerProperty(prop string) (string, error) {
variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
if err != nil {
return "", err
}
return variant.String(), nil
}
func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
conn, err := createBus()
if err != nil {
return nil, err
}
// Only use EXTERNAL method, and hardcode the uid (not username)
// to avoid a username lookup (which requires a dynamically linked
// libc)
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
err = conn.Auth(methods)
if err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
conn, err := dbusAuthConnection(createBus)
if err != nil {
return nil, err
}
if err = conn.Hello(); err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
func systemdObject(conn *dbus.Conn) dbus.BusObject {
return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
}

View File

@ -1,565 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"errors"
"path"
"strconv"
"github.com/godbus/dbus"
)
func (c *Conn) jobComplete(signal *dbus.Signal) {
var id uint32
var job dbus.ObjectPath
var unit string
var result string
dbus.Store(signal.Body, &id, &job, &unit, &result)
c.jobListener.Lock()
out, ok := c.jobListener.jobs[job]
if ok {
out <- result
delete(c.jobListener.jobs, job)
}
c.jobListener.Unlock()
}
func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
if ch != nil {
c.jobListener.Lock()
defer c.jobListener.Unlock()
}
var p dbus.ObjectPath
err := c.sysobj.Call(job, 0, args...).Store(&p)
if err != nil {
return 0, err
}
if ch != nil {
c.jobListener.jobs[p] = ch
}
// ignore error since 0 is fine if conversion fails
jobID, _ := strconv.Atoi(path.Base(string(p)))
return jobID, nil
}
// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
// specified by the mode string).
//
// Takes the unit to activate, plus a mode string. The mode needs to be one of
// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
// "replace" the call will start the unit and its dependencies, possibly
// replacing already queued jobs that conflict with this. If "fail" the call
// will start the unit and its dependencies, but will fail if this would change
// an already queued job. If "isolate" the call will start the unit in question
// and terminate all units that aren't dependencies of it. If
// "ignore-dependencies" it will start a unit but ignore all its dependencies.
// If "ignore-requirements" it will start a unit but only ignore the
// requirement dependencies. It is not recommended to make use of the latter
// two options.
//
// If the provided channel is non-nil, a result string will be sent to it upon
// job completion: one of done, canceled, timeout, failed, dependency, skipped.
// done indicates successful execution of a job. canceled indicates that a job
// has been canceled before it finished execution. timeout indicates that the
// job timeout was reached. failed indicates that the job failed. dependency
// indicates that a job this job has been depending on failed and the job hence
// has been removed too. skipped indicates that a job was skipped because it
// didn't apply to the units current state.
//
// If no error occurs, the ID of the underlying systemd job will be returned. There
// does exist the possibility for no error to be returned, but for the returned job
// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
// should not be considered authoritative.
//
// If an error does occur, it will be returned to the user alongside a job ID of 0.
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
}
// StopUnit is similar to StartUnit but stops the specified unit rather
// than starting it.
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
}
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
}
// RestartUnit restarts a service. If a service is restarted that isn't
// running it will be started.
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
}
// TryRestartUnit is like RestartUnit, except that a service that isn't running
// is not affected by the restart.
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
}
// ReloadOrRestart attempts a reload if the unit supports it and use a restart
// otherwise.
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
}
// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
// flavored restart otherwise.
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
}
// StartTransientUnit() may be used to create and start a transient unit, which
// will be released as soon as it is not running or referenced anymore or the
// system is rebooted. name is the unit name including suffix, and must be
// unique. mode is the same as in StartUnit(), properties contains properties
// of the unit.
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
}
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
// processes are killed.
func (c *Conn) KillUnit(name string, signal int32) {
c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
}
// ResetFailedUnit resets the "failed" state of a specific unit.
func (c *Conn) ResetFailedUnit(name string) error {
return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
}
// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
var err error
var props map[string]dbus.Variant
path := unitPath(unit)
if !path.IsValid() {
return nil, errors.New("invalid unit name: " + unit)
}
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
if err != nil {
return nil, err
}
out := make(map[string]interface{}, len(props))
for k, v := range props {
out[k] = v.Value()
}
return out, nil
}
// GetUnitProperties takes the unit name and returns all of its dbus object properties.
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
}
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
var err error
var prop dbus.Variant
path := unitPath(unit)
if !path.IsValid() {
return nil, errors.New("invalid unit name: " + unit)
}
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
if err != nil {
return nil, err
}
return &Property{Name: propertyName, Value: prop}, nil
}
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
}
// GetServiceProperty returns property for given service name and property name
func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
}
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
}
// SetUnitProperties() may be used to modify certain unit properties at runtime.
// Not all properties may be changed at runtime, but many resource management
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
// instantly, and stored on disk for future boots, unless runtime is true, in which
// case the settings only apply until the next reboot. name is the name of the unit
// to modify. properties are the settings to set, encoded as an array of property
// name and value pairs.
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
}
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
}
type UnitStatus struct {
Name string // The primary unit name as string
Description string // The human readable description string
LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
ActiveState string // The active state (i.e. whether the unit is currently started or not)
SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
Path dbus.ObjectPath // The unit object path
JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
JobType string // The job type as string
JobPath dbus.ObjectPath // The job object path
}
type storeFunc func(retvalues ...interface{}) error
func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
result := make([][]interface{}, 0)
err := f(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
status := make([]UnitStatus, len(result))
statusInterface := make([]interface{}, len(status))
for i := range status {
statusInterface[i] = &status[i]
}
err = dbus.Store(resultInterface, statusInterface...)
if err != nil {
return nil, err
}
return status, nil
}
// ListUnits returns an array with all currently loaded units. Note that
// units may be known by multiple names at the same time, and hence there might
// be more unit names loaded than actual units behind them.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
}
// ListUnitsFiltered returns an array with units filtered by state.
// It takes a list of units' statuses to filter.
func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
}
// ListUnitsByPatterns returns an array with units.
// It takes a list of units' statuses and names to filter.
// Note that units may be known by multiple names at the same time,
// and hence there might be more unit names loaded than actual units behind them.
func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
}
// ListUnitsByNames returns an array with units. It takes a list of units'
// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
// method, this method returns statuses even for inactive or non-existing
// units. Input array should contain exact unit names, but not patterns.
func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
}
type UnitFile struct {
Path string
Type string
}
func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
result := make([][]interface{}, 0)
err := f(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
files := make([]UnitFile, len(result))
fileInterface := make([]interface{}, len(files))
for i := range files {
fileInterface[i] = &files[i]
}
err = dbus.Store(resultInterface, fileInterface...)
if err != nil {
return nil, err
}
return files, nil
}
// ListUnitFiles returns an array of all available units on disk.
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
}
// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
}
type LinkUnitFileChange EnableUnitFileChange
// LinkUnitFiles() links unit files (that are located outside of the
// usual unit search paths) into the unit search path.
//
// It takes a list of absolute paths to unit files to link and two
// booleans. The first boolean controls whether the unit shall be
// enabled for runtime only (true, /run), or persistently (false,
// /etc).
// The second controls whether symlinks pointing to other units shall
// be replaced if necessary.
//
// This call returns a list of the changes made. The list consists of
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]LinkUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
// EnableUnitFiles() may be used to enable one or more units in the system (by
// creating symlinks to them in /etc or /run).
//
// It takes a list of unit files to enable (either just file names or full
// absolute paths if the unit files are residing outside the usual unit
// search paths), and two booleans: the first controls whether the unit shall
// be enabled for runtime only (true, /run), or persistently (false, /etc).
// The second one controls whether symlinks pointing to other units shall
// be replaced if necessary.
//
// This call returns one boolean and an array with the changes made. The
// boolean signals whether the unit files contained any enablement
// information (i.e. an [Install]) section. The changes list consists of
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
var carries_install_info bool
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
if err != nil {
return false, nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]EnableUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return false, nil, err
}
return carries_install_info, changes, nil
}
type EnableUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// DisableUnitFiles() may be used to disable one or more units in the system (by
// removing symlinks to them from /etc or /run).
//
// It takes a list of unit files to disable (either just file names or full
// absolute paths if the unit files are residing outside the usual unit
// search paths), and one boolean: whether the unit was enabled for runtime
// only (true, /run), or persistently (false, /etc).
//
// This call returns an array with the changes made. The changes list
// consists of structures with three strings: the type of the change (one of
// symlink or unlink), the file name of the symlink and the destination of the
// symlink.
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]DisableUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
type DisableUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// MaskUnitFiles masks one or more units in the system
//
// It takes three arguments:
// * list of units to mask (either just file names or full
// absolute paths if the unit files are residing outside
// the usual unit search paths)
// * runtime to specify whether the unit was enabled for runtime
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
// * force flag
func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]MaskUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
type MaskUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// UnmaskUnitFiles unmasks one or more units in the system
//
// It takes two arguments:
// * list of unit files to mask (either just file names or full
// absolute paths if the unit files are residing outside
// the usual unit search paths)
// * runtime to specify whether the unit was enabled for runtime
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]UnmaskUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
type UnmaskUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// Reload instructs systemd to scan for and reload unit files. This is
// equivalent to a 'systemctl daemon-reload'.
func (c *Conn) Reload() error {
return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
}
func unitPath(name string) dbus.ObjectPath {
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
}

View File

@ -1,237 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"github.com/godbus/dbus"
)
// From the systemd docs:
//
// The properties array of StartTransientUnit() may take many of the settings
// that may also be configured in unit files. Not all parameters are currently
// accepted though, but we plan to cover more properties with future release.
// Currently you may set the Description, Slice and all dependency types of
// units, as well as RemainAfterExit, ExecStart for service units,
// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
// directly to their counterparts in unit files and as normal D-Bus object
// properties. The exception here is the PIDs field of scope units which is
// used for construction of the scope only and specifies the initial PIDs to
// add to the scope object.
type Property struct {
Name string
Value dbus.Variant
}
type PropertyCollection struct {
Name string
Properties []Property
}
type execStart struct {
Path string // the binary path to execute
Args []string // an array with all arguments to pass to the executed command, starting with argument 0
UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
}
// PropExecStart sets the ExecStart service property. The first argument is a
// slice with the binary path to execute followed by the arguments to pass to
// the executed command. See
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
func PropExecStart(command []string, uncleanIsFailure bool) Property {
execStarts := []execStart{
execStart{
Path: command[0],
Args: command,
UncleanIsFailure: uncleanIsFailure,
},
}
return Property{
Name: "ExecStart",
Value: dbus.MakeVariant(execStarts),
}
}
// PropRemainAfterExit sets the RemainAfterExit service property. See
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
func PropRemainAfterExit(b bool) Property {
return Property{
Name: "RemainAfterExit",
Value: dbus.MakeVariant(b),
}
}
// PropType sets the Type service property. See
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
func PropType(t string) Property {
return Property{
Name: "Type",
Value: dbus.MakeVariant(t),
}
}
// PropDescription sets the Description unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
func PropDescription(desc string) Property {
return Property{
Name: "Description",
Value: dbus.MakeVariant(desc),
}
}
func propDependency(name string, units []string) Property {
return Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
// PropRequires sets the Requires unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
func PropRequires(units ...string) Property {
return propDependency("Requires", units)
}
// PropRequiresOverridable sets the RequiresOverridable unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
func PropRequiresOverridable(units ...string) Property {
return propDependency("RequiresOverridable", units)
}
// PropRequisite sets the Requisite unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
func PropRequisite(units ...string) Property {
return propDependency("Requisite", units)
}
// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
func PropRequisiteOverridable(units ...string) Property {
return propDependency("RequisiteOverridable", units)
}
// PropWants sets the Wants unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
func PropWants(units ...string) Property {
return propDependency("Wants", units)
}
// PropBindsTo sets the BindsTo unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
func PropBindsTo(units ...string) Property {
return propDependency("BindsTo", units)
}
// PropRequiredBy sets the RequiredBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
func PropRequiredBy(units ...string) Property {
return propDependency("RequiredBy", units)
}
// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
func PropRequiredByOverridable(units ...string) Property {
return propDependency("RequiredByOverridable", units)
}
// PropWantedBy sets the WantedBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
func PropWantedBy(units ...string) Property {
return propDependency("WantedBy", units)
}
// PropBoundBy sets the BoundBy unit property. See
// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
func PropBoundBy(units ...string) Property {
return propDependency("BoundBy", units)
}
// PropConflicts sets the Conflicts unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
func PropConflicts(units ...string) Property {
return propDependency("Conflicts", units)
}
// PropConflictedBy sets the ConflictedBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
func PropConflictedBy(units ...string) Property {
return propDependency("ConflictedBy", units)
}
// PropBefore sets the Before unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
func PropBefore(units ...string) Property {
return propDependency("Before", units)
}
// PropAfter sets the After unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
func PropAfter(units ...string) Property {
return propDependency("After", units)
}
// PropOnFailure sets the OnFailure unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
func PropOnFailure(units ...string) Property {
return propDependency("OnFailure", units)
}
// PropTriggers sets the Triggers unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
func PropTriggers(units ...string) Property {
return propDependency("Triggers", units)
}
// PropTriggeredBy sets the TriggeredBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
func PropTriggeredBy(units ...string) Property {
return propDependency("TriggeredBy", units)
}
// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
func PropPropagatesReloadTo(units ...string) Property {
return propDependency("PropagatesReloadTo", units)
}
// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
func PropRequiresMountsFor(units ...string) Property {
return propDependency("RequiresMountsFor", units)
}
// PropSlice sets the Slice unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
func PropSlice(slice string) Property {
return Property{
Name: "Slice",
Value: dbus.MakeVariant(slice),
}
}
// PropPids sets the PIDs field of scope units used in the initial construction
// of the scope only and specifies the initial PIDs to add to the scope object.
// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties
func PropPids(pids ...uint32) Property {
return Property{
Name: "PIDs",
Value: dbus.MakeVariant(pids),
}
}

View File

@ -1,47 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
type set struct {
data map[string]bool
}
func (s *set) Add(value string) {
s.data[value] = true
}
func (s *set) Remove(value string) {
delete(s.data, value)
}
func (s *set) Contains(value string) (exists bool) {
_, exists = s.data[value]
return
}
func (s *set) Length() int {
return len(s.data)
}
func (s *set) Values() (values []string) {
for val, _ := range s.data {
values = append(values, val)
}
return
}
func newSet() *set {
return &set{make(map[string]bool)}
}

View File

@ -1,250 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"errors"
"time"
"github.com/godbus/dbus"
)
const (
cleanIgnoreInterval = int64(10 * time.Second)
ignoreInterval = int64(30 * time.Millisecond)
)
// Subscribe sets up this connection to subscribe to all systemd dbus events.
// This is required before calling SubscribeUnits. When the connection closes
// systemd will automatically stop sending signals so there is no need to
// explicitly call Unsubscribe().
func (c *Conn) Subscribe() error {
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
if err != nil {
return err
}
return nil
}
// Unsubscribe this connection from systemd dbus events.
func (c *Conn) Unsubscribe() error {
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
if err != nil {
return err
}
return nil
}
func (c *Conn) dispatch() {
ch := make(chan *dbus.Signal, signalBuffer)
c.sigconn.Signal(ch)
go func() {
for {
signal, ok := <-ch
if !ok {
return
}
if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
c.jobComplete(signal)
}
if c.subscriber.updateCh == nil {
continue
}
var unitPath dbus.ObjectPath
switch signal.Name {
case "org.freedesktop.systemd1.Manager.JobRemoved":
unitName := signal.Body[2].(string)
c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
case "org.freedesktop.systemd1.Manager.UnitNew":
unitPath = signal.Body[1].(dbus.ObjectPath)
case "org.freedesktop.DBus.Properties.PropertiesChanged":
if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
unitPath = signal.Path
}
}
if unitPath == dbus.ObjectPath("") {
continue
}
c.sendSubStateUpdate(unitPath)
}
}()
}
// Returns two unbuffered channels which will receive all changed units every
// interval. Deleted units are sent as nil.
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
}
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
// size of the channels, the comparison function for detecting changes and a filter
// function for cutting down on the noise that your channel receives.
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
old := make(map[string]*UnitStatus)
statusChan := make(chan map[string]*UnitStatus, buffer)
errChan := make(chan error, buffer)
go func() {
for {
timerChan := time.After(interval)
units, err := c.ListUnits()
if err == nil {
cur := make(map[string]*UnitStatus)
for i := range units {
if filterUnit != nil && filterUnit(units[i].Name) {
continue
}
cur[units[i].Name] = &units[i]
}
// add all new or changed units
changed := make(map[string]*UnitStatus)
for n, u := range cur {
if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
changed[n] = u
}
delete(old, n)
}
// add all deleted units
for oldN := range old {
changed[oldN] = nil
}
old = cur
if len(changed) != 0 {
statusChan <- changed
}
} else {
errChan <- err
}
<-timerChan
}
}()
return statusChan, errChan
}
type SubStateUpdate struct {
UnitName string
SubState string
}
// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
// Although this writes to updateCh on every state change, the reported state
// may be more recent than the change that generated it (due to an unavoidable
// race in the systemd dbus interface). That is, this method provides a good
// way to keep a current view of all units' states, but is not guaranteed to
// show every state transition they go through. Furthermore, state changes
// will only be written to the channel with non-blocking writes. If updateCh
// is full, it attempts to write an error to errCh; if errCh is full, the error
// passes silently.
func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
c.subscriber.Lock()
defer c.subscriber.Unlock()
c.subscriber.updateCh = updateCh
c.subscriber.errCh = errCh
}
func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
c.subscriber.Lock()
defer c.subscriber.Unlock()
if c.shouldIgnore(path) {
return
}
info, err := c.GetUnitProperties(string(path))
if err != nil {
select {
case c.subscriber.errCh <- err:
default:
}
}
name := info["Id"].(string)
substate := info["SubState"].(string)
update := &SubStateUpdate{name, substate}
select {
case c.subscriber.updateCh <- update:
default:
select {
case c.subscriber.errCh <- errors.New("update channel full!"):
default:
}
}
c.updateIgnore(path, info)
}
// The ignore functions work around a wart in the systemd dbus interface.
// Requesting the properties of an unloaded unit will cause systemd to send a
// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
// properties on UnitNew (as that's the only indication of a new unit coming up
// for the first time), we would enter an infinite loop if we did not attempt
// to detect and ignore these spurious signals. The signal themselves are
// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
// unloaded unit's signals for a short time after requesting its properties.
// This means that we will miss e.g. a transient unit being restarted
// *immediately* upon failure and also a transient unit being started
// immediately after requesting its status (with systemctl status, for example,
// because this causes a UnitNew signal to be sent which then causes us to fetch
// the properties).
func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
t, ok := c.subscriber.ignore[path]
return ok && t >= time.Now().UnixNano()
}
func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
c.cleanIgnore()
// unit is unloaded - it will trigger bad systemd dbus behavior
if info["LoadState"].(string) == "not-found" {
c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
}
}
// without this, ignore would grow unboundedly over time
func (c *Conn) cleanIgnore() {
now := time.Now().UnixNano()
if c.subscriber.cleanIgnore < now {
c.subscriber.cleanIgnore = now + cleanIgnoreInterval
for p, t := range c.subscriber.ignore {
if t < now {
delete(c.subscriber.ignore, p)
}
}
}
}

View File

@ -1,57 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"time"
)
// SubscriptionSet returns a subscription set which is like conn.Subscribe but
// can filter to only return events for a set of units.
type SubscriptionSet struct {
*set
conn *Conn
}
func (s *SubscriptionSet) filter(unit string) bool {
return !s.Contains(unit)
}
// Subscribe starts listening for dbus events for all of the units in the set.
// Returns channels identical to conn.SubscribeUnits.
func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
// TODO: Make fully evented by using systemd 209 with properties changed values
return s.conn.SubscribeUnitsCustom(time.Second, 0,
mismatchUnitStatus,
func(unit string) bool { return s.filter(unit) },
)
}
// NewSubscriptionSet returns a new subscription set.
func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
return &SubscriptionSet{newSet(), conn}
}
// mismatchUnitStatus returns true if the provided UnitStatus objects
// are not equivalent. false is returned if the objects are equivalent.
// Only the Name, Description and state-related fields are used in
// the comparison.
func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
return u1.Name != u2.Name ||
u1.Description != u2.Description ||
u1.LoadState != u2.LoadState ||
u1.ActiveState != u2.ActiveState ||
u1.SubState != u2.SubState
}

View File

@ -1,90 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package util contains utility functions related to systemd that applications
// can use to check things like whether systemd is running. Note that some of
// these functions attempt to manually load systemd libraries at runtime rather
// than linking against them.
package util
import (
"fmt"
"io/ioutil"
"os"
"strings"
)
var (
ErrNoCGO = fmt.Errorf("go-systemd built with CGO disabled")
)
// GetRunningSlice attempts to retrieve the name of the systemd slice in which
// the current process is running.
// This function is a wrapper around the libsystemd C library; if it cannot be
// opened, an error is returned.
func GetRunningSlice() (string, error) {
return getRunningSlice()
}
// RunningFromSystemService tries to detect whether the current process has
// been invoked from a system service. The condition for this is whether the
// process is _not_ a user process. User processes are those running in session
// scopes or under per-user `systemd --user` instances.
//
// To avoid false positives on systems without `pam_systemd` (which is
// responsible for creating user sessions), this function also uses a heuristic
// to detect whether it's being invoked from a session leader process. This is
// the case if the current process is executed directly from a service file
// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the
// command is instead launched in a subshell or similar so that it is not
// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`)
//
// This function is a wrapper around the libsystemd C library; if this is
// unable to successfully open a handle to the library for any reason (e.g. it
// cannot be found), an error will be returned.
func RunningFromSystemService() (bool, error) {
return runningFromSystemService()
}
// CurrentUnitName attempts to retrieve the name of the systemd system unit
// from which the calling process has been invoked. It wraps the systemd
// `sd_pid_get_unit` call, with the same caveat: for processes not part of a
// systemd system unit, this function will return an error.
func CurrentUnitName() (string, error) {
return currentUnitName()
}
// IsRunningSystemd checks whether the host was booted with systemd as its init
// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
// checks whether /run/systemd/system/ exists and is a directory.
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
func IsRunningSystemd() bool {
fi, err := os.Lstat("/run/systemd/system")
if err != nil {
return false
}
return fi.IsDir()
}
// GetMachineID returns a host's 128-bit machine ID as a string. This functions
// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads
// the contents of /etc/machine-id
// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html
func GetMachineID() (string, error) {
machineID, err := ioutil.ReadFile("/etc/machine-id")
if err != nil {
return "", fmt.Errorf("failed to read /etc/machine-id: %v", err)
}
return strings.TrimSpace(string(machineID)), nil
}

View File

@ -1,174 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build cgo
package util
// #include <stdlib.h>
// #include <sys/types.h>
// #include <unistd.h>
//
// int
// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
// {
// int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
//
// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
// return sd_pid_get_owner_uid(pid, uid);
// }
//
// int
// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
// {
// int (*sd_pid_get_unit)(pid_t, char **);
//
// sd_pid_get_unit = (int (*)(pid_t, char **))f;
// return sd_pid_get_unit(pid, unit);
// }
//
// int
// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
// {
// int (*sd_pid_get_slice)(pid_t, char **);
//
// sd_pid_get_slice = (int (*)(pid_t, char **))f;
// return sd_pid_get_slice(pid, slice);
// }
//
// int
// am_session_leader()
// {
// return (getsid(0) == getpid());
// }
import "C"
import (
"fmt"
"syscall"
"unsafe"
"github.com/coreos/pkg/dlopen"
)
var libsystemdNames = []string{
// systemd < 209
"libsystemd-login.so.0",
"libsystemd-login.so",
// systemd >= 209 merged libsystemd-login into libsystemd proper
"libsystemd.so.0",
"libsystemd.so",
}
func getRunningSlice() (slice string, err error) {
var h *dlopen.LibHandle
h, err = dlopen.GetHandle(libsystemdNames)
if err != nil {
return
}
defer func() {
if err1 := h.Close(); err1 != nil {
err = err1
}
}()
sd_pid_get_slice, err := h.GetSymbolPointer("sd_pid_get_slice")
if err != nil {
return
}
var s string
sl := C.CString(s)
defer C.free(unsafe.Pointer(sl))
ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
if ret < 0 {
err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
return
}
return C.GoString(sl), nil
}
func runningFromSystemService() (ret bool, err error) {
var h *dlopen.LibHandle
h, err = dlopen.GetHandle(libsystemdNames)
if err != nil {
return
}
defer func() {
if err1 := h.Close(); err1 != nil {
err = err1
}
}()
sd_pid_get_owner_uid, err := h.GetSymbolPointer("sd_pid_get_owner_uid")
if err != nil {
return
}
var uid C.uid_t
errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
serrno := syscall.Errno(-errno)
// when we're running from a unit file, sd_pid_get_owner_uid returns
// ENOENT (systemd <220) or ENXIO (systemd >=220)
switch {
case errno >= 0:
ret = false
case serrno == syscall.ENOENT, serrno == syscall.ENXIO:
// Since the implementation of sessions in systemd relies on
// the `pam_systemd` module, using the sd_pid_get_owner_uid
// heuristic alone can result in false positives if that module
// (or PAM itself) is not present or properly configured on the
// system. As such, we also check if we're the session leader,
// which should be the case if we're invoked from a unit file,
// but not if e.g. we're invoked from the command line from a
// user's login session
ret = C.am_session_leader() == 1
default:
err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
}
return
}
func currentUnitName() (unit string, err error) {
var h *dlopen.LibHandle
h, err = dlopen.GetHandle(libsystemdNames)
if err != nil {
return
}
defer func() {
if err1 := h.Close(); err1 != nil {
err = err1
}
}()
sd_pid_get_unit, err := h.GetSymbolPointer("sd_pid_get_unit")
if err != nil {
return
}
var s string
u := C.CString(s)
defer C.free(unsafe.Pointer(u))
ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
if ret < 0 {
err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
return
}
unit = C.GoString(u)
return
}

View File

@ -1,23 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !cgo
package util
func getRunningSlice() (string, error) { return "", ErrNoCGO }
func runningFromSystemService() (bool, error) { return false, ErrNoCGO }
func currentUnitName() (string, error) { return "", ErrNoCGO }

201
vendor/github.com/coreos/pkg/LICENSE generated vendored
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View File

@ -1,82 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package dlopen provides some convenience functions to dlopen a library and
// get its symbols.
package dlopen
// #cgo LDFLAGS: -ldl
// #include <stdlib.h>
// #include <dlfcn.h>
import "C"
import (
"errors"
"fmt"
"unsafe"
)
var ErrSoNotFound = errors.New("unable to open a handle to the library")
// LibHandle represents an open handle to a library (.so)
type LibHandle struct {
Handle unsafe.Pointer
Libname string
}
// GetHandle tries to get a handle to a library (.so), attempting to access it
// by the names specified in libs and returning the first that is successfully
// opened. Callers are responsible for closing the handler. If no library can
// be successfully opened, an error is returned.
func GetHandle(libs []string) (*LibHandle, error) {
for _, name := range libs {
libname := C.CString(name)
defer C.free(unsafe.Pointer(libname))
handle := C.dlopen(libname, C.RTLD_LAZY)
if handle != nil {
h := &LibHandle{
Handle: handle,
Libname: name,
}
return h, nil
}
}
return nil, ErrSoNotFound
}
// GetSymbolPointer takes a symbol name and returns a pointer to the symbol.
func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) {
sym := C.CString(symbol)
defer C.free(unsafe.Pointer(sym))
C.dlerror()
p := C.dlsym(l.Handle, sym)
e := C.dlerror()
if e != nil {
return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e)))
}
return p, nil
}
// Close closes a LibHandle.
func (l *LibHandle) Close() error {
C.dlerror()
C.dlclose(l.Handle)
e := C.dlerror()
if e != nil {
return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e)))
}
return nil
}

View File

@ -1,56 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build linux
package dlopen
// #include <string.h>
// #include <stdlib.h>
//
// int
// my_strlen(void *f, const char *s)
// {
// size_t (*strlen)(const char *);
//
// strlen = (size_t (*)(const char *))f;
// return strlen(s);
// }
import "C"
import (
"fmt"
"unsafe"
)
func strlen(libs []string, s string) (int, error) {
h, err := GetHandle(libs)
if err != nil {
return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err)
}
defer h.Close()
f := "strlen"
cs := C.CString(s)
defer C.free(unsafe.Pointer(cs))
strlen, err := h.GetSymbolPointer(f)
if err != nil {
return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err)
}
len := C.my_strlen(strlen, cs)
return int(len), nil
}

View File

@ -1,28 +0,0 @@
Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
Copyright (C) 2017 SUSE LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,65 +0,0 @@
## `filepath-securejoin` ##
[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin)
An implementation of `SecureJoin`, a [candidate for inclusion in the Go
standard library][go#20126]. The purpose of this function is to be a "secure"
alternative to `filepath.Join`, and in particular it provides certain
guarantees that are not provided by `filepath.Join`.
This is the function prototype:
```go
func SecureJoin(root, unsafePath string) (string, error)
```
This library **guarantees** the following:
* If no error is set, the resulting string **must** be a child path of
`SecureJoin` and will not contain any symlink path components (they will all
be expanded).
* When expanding symlinks, all symlink path components **must** be resolved
relative to the provided root. In particular, this can be considered a
userspace implementation of how `chroot(2)` operates on file paths. Note that
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
called on the input before processing).
* Non-existant path components are unaffected by `SecureJoin` (similar to
`filepath.EvalSymlinks`'s semantics).
* The returned path will always be `filepath.Clean`ed and thus not contain any
`..` components.
A (trivial) implementation of this function on GNU/Linux systems could be done
with the following (note that this requires root privileges and is far more
opaque than the implementation in this library, and also requires that
`readlink` is inside the `root` path):
```go
package securejoin
import (
"os/exec"
"path/filepath"
)
func SecureJoin(root, unsafePath string) (string, error) {
unsafePath = string(filepath.Separator) + unsafePath
cmd := exec.Command("chroot", root,
"readlink", "--canonicalize-missing", "--no-newline", unsafePath)
output, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
expanded := string(output)
return filepath.Join(root, expanded), nil
}
```
[go#20126]: https://github.com/golang/go/issues/20126
### License ###
The license of this project is the same as Go, which is a BSD 3-clause license
available in the `LICENSE` file.

View File

@ -1,135 +0,0 @@
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package securejoin is an implementation of the hopefully-soon-to-be-included
// SecureJoin helper that is meant to be part of the "path/filepath" package.
// The purpose of this project is to provide a PoC implementation to make the
// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
// tangible.
package securejoin
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/pkg/errors"
)
// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
// evaluated in attempting to securely join the two given paths.
var ErrSymlinkLoop = fmt.Errorf("SecureJoin: too many links")
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
// effectively a more broad version of os.IsNotExist.
func IsNotExist(err error) bool {
// If it's a bone-fide ENOENT just bail.
if os.IsNotExist(errors.Cause(err)) {
return true
}
// Check that it's not actually an ENOTDIR, which in some cases is a more
// convoluted case of ENOENT (usually involving weird paths).
var errno error
switch err := errors.Cause(err).(type) {
case *os.PathError:
errno = err.Err
case *os.LinkError:
errno = err.Err
case *os.SyscallError:
errno = err.Err
}
return errno == syscall.ENOTDIR || errno == syscall.ENOENT
}
// SecureJoinVFS joins the two given path components (similar to Join) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
// given root treated as the root of the filesystem, similar to a chroot. The
// filesystem state is evaluated through the given VFS interface (if nil, the
// standard os.* family of functions are used).
//
// Note that the guarantees provided by this function only apply if the path
// components in the returned string are not modified (in other words are not
// replaced with symlinks on the filesystem) after this function has returned.
// Such a symlink race is necessarily out-of-scope of SecureJoin.
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// Use the os.* VFS implementation if none was specified.
if vfs == nil {
vfs = osVFS{}
}
var path bytes.Buffer
n := 0
for unsafePath != "" {
if n > 255 {
return "", ErrSymlinkLoop
}
// Next path component, p.
i := strings.IndexRune(unsafePath, filepath.Separator)
var p string
if i == -1 {
p, unsafePath = unsafePath, ""
} else {
p, unsafePath = unsafePath[:i], unsafePath[i+1:]
}
// Create a cleaned path, using the lexical semantics of /../a, to
// create a "scoped" path component which can safely be joined to fullP
// for evaluation. At this point, path.String() doesn't contain any
// symlink components.
cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p)
if cleanP == string(filepath.Separator) {
path.Reset()
continue
}
fullP := filepath.Clean(root + cleanP)
// Figure out whether the path is a symlink.
fi, err := vfs.Lstat(fullP)
if err != nil && !IsNotExist(err) {
return "", err
}
// Treat non-existent path components the same as non-symlinks (we
// can't do any better here).
if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
path.WriteString(p)
path.WriteRune(filepath.Separator)
continue
}
// Only increment when we actually dereference a link.
n++
// It's a symlink, expand it by prepending it to the yet-unparsed path.
dest, err := vfs.Readlink(fullP)
if err != nil {
return "", err
}
// Absolute symlinks reset any work we've already done.
if filepath.IsAbs(dest) {
path.Reset()
}
unsafePath = dest + string(filepath.Separator) + unsafePath
}
// We have to clean path.String() here because it may contain '..'
// components that are entirely lexical, but would be misleading otherwise.
// And finally do a final clean to ensure that root is also lexically
// clean.
fullP := filepath.Clean(string(filepath.Separator) + path.String())
return filepath.Clean(root + fullP), nil
}
// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
func SecureJoin(root, unsafePath string) (string, error) {
return SecureJoinVFS(root, unsafePath, nil)
}

View File

@ -1 +0,0 @@
github.com/pkg/errors v0.8.0

View File

@ -1,41 +0,0 @@
// Copyright (C) 2017 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import "os"
// In future this should be moved into a separate package, because now there
// are several projects (umoci and go-mtree) that are using this sort of
// interface.
// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
// equivalent to using the standard os.* family of functions. This is mainly
// used for the purposes of mock testing, but also can be used to otherwise use
// SecureJoin with VFS-like system.
type VFS interface {
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link. These semantics are identical to
// os.Lstat.
Lstat(name string) (os.FileInfo, error)
// Readlink returns the destination of the named symbolic link. These
// semantics are identical to os.Readlink.
Readlink(name string) (string, error)
}
// osVFS is the "nil" VFS, in that it just passes everything through to the os
// module.
type osVFS struct{}
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link. These semantics are identical to
// os.Lstat.
func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
// Readlink returns the destination of the named symbolic link. These
// semantics are identical to os.Readlink.
func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }

View File

@ -1,25 +0,0 @@
Copyright (c) 2013, Georg Reinke (<guelfey at gmail dot com>), Google
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,41 +0,0 @@
dbus
----
dbus is a simple library that implements native Go client bindings for the
D-Bus message bus system.
### Features
* Complete native implementation of the D-Bus message protocol
* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
* Subpackages that help with the introspection / property interfaces
### Installation
This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
```
go get github.com/godbus/dbus
```
If you want to use the subpackages, you can install them the same way.
### Usage
The complete package documentation and some simple examples are available at
[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
gives a short overview over the basic usage.
#### Projects using godbus
- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
Please note that the API is considered unstable for now and may change without
further notice.
### License
go.dbus is available under the Simplified BSD License; see LICENSE for the full
text.
Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.

253
vendor/github.com/godbus/dbus/auth.go generated vendored
View File

@ -1,253 +0,0 @@
package dbus
import (
"bufio"
"bytes"
"errors"
"io"
"os"
"strconv"
)
// AuthStatus represents the Status of an authentication mechanism.
type AuthStatus byte
const (
// AuthOk signals that authentication is finished; the next command
// from the server should be an OK.
AuthOk AuthStatus = iota
// AuthContinue signals that additional data is needed; the next command
// from the server should be a DATA.
AuthContinue
// AuthError signals an error; the server sent invalid data or some
// other unexpected thing happened and the current authentication
// process should be aborted.
AuthError
)
type authState byte
const (
waitingForData authState = iota
waitingForOk
waitingForReject
)
// Auth defines the behaviour of an authentication mechanism.
type Auth interface {
// Return the name of the mechnism, the argument to the first AUTH command
// and the next status.
FirstData() (name, resp []byte, status AuthStatus)
// Process the given DATA command, and return the argument to the DATA
// command and the next status. If len(resp) == 0, no DATA command is sent.
HandleData(data []byte) (resp []byte, status AuthStatus)
}
// Auth authenticates the connection, trying the given list of authentication
// mechanisms (in that order). If nil is passed, the EXTERNAL and
// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
// connections, this method must be called before sending any messages to the
// bus. Auth must not be called on shared connections.
func (conn *Conn) Auth(methods []Auth) error {
if methods == nil {
uid := strconv.Itoa(os.Getuid())
methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
}
in := bufio.NewReader(conn.transport)
err := conn.transport.SendNullByte()
if err != nil {
return err
}
err = authWriteLine(conn.transport, []byte("AUTH"))
if err != nil {
return err
}
s, err := authReadLine(in)
if err != nil {
return err
}
if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
return errors.New("dbus: authentication protocol error")
}
s = s[1:]
for _, v := range s {
for _, m := range methods {
if name, data, status := m.FirstData(); bytes.Equal(v, name) {
var ok bool
err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
if err != nil {
return err
}
switch status {
case AuthOk:
err, ok = conn.tryAuth(m, waitingForOk, in)
case AuthContinue:
err, ok = conn.tryAuth(m, waitingForData, in)
default:
panic("dbus: invalid authentication status")
}
if err != nil {
return err
}
if ok {
if conn.transport.SupportsUnixFDs() {
err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
if err != nil {
return err
}
line, err := authReadLine(in)
if err != nil {
return err
}
switch {
case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
conn.EnableUnixFDs()
conn.unixFD = true
case bytes.Equal(line[0], []byte("ERROR")):
default:
return errors.New("dbus: authentication protocol error")
}
}
err = authWriteLine(conn.transport, []byte("BEGIN"))
if err != nil {
return err
}
go conn.inWorker()
go conn.outWorker()
return nil
}
}
}
}
return errors.New("dbus: authentication failed")
}
// tryAuth tries to authenticate with m as the mechanism, using state as the
// initial authState and in for reading input. It returns (nil, true) on
// success, (nil, false) on a REJECTED and (someErr, false) if some other
// error occured.
func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
for {
s, err := authReadLine(in)
if err != nil {
return err, false
}
switch {
case state == waitingForData && string(s[0]) == "DATA":
if len(s) != 2 {
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
continue
}
data, status := m.HandleData(s[1])
switch status {
case AuthOk, AuthContinue:
if len(data) != 0 {
err = authWriteLine(conn.transport, []byte("DATA"), data)
if err != nil {
return err, false
}
}
if status == AuthOk {
state = waitingForOk
}
case AuthError:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
}
case state == waitingForData && string(s[0]) == "REJECTED":
return nil, false
case state == waitingForData && string(s[0]) == "ERROR":
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
case state == waitingForData && string(s[0]) == "OK":
if len(s) != 2 {
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForData:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
case state == waitingForOk && string(s[0]) == "OK":
if len(s) != 2 {
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForOk && string(s[0]) == "REJECTED":
return nil, false
case state == waitingForOk && (string(s[0]) == "DATA" ||
string(s[0]) == "ERROR"):
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
case state == waitingForOk:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
case state == waitingForReject && string(s[0]) == "REJECTED":
return nil, false
case state == waitingForReject:
return errors.New("dbus: authentication protocol error"), false
default:
panic("dbus: invalid auth state")
}
}
}
// authReadLine reads a line and separates it into its fields.
func authReadLine(in *bufio.Reader) ([][]byte, error) {
data, err := in.ReadBytes('\n')
if err != nil {
return nil, err
}
data = bytes.TrimSuffix(data, []byte("\r\n"))
return bytes.Split(data, []byte{' '}), nil
}
// authWriteLine writes the given line in the authentication protocol format
// (elements of data separated by a " " and terminated by "\r\n").
func authWriteLine(out io.Writer, data ...[]byte) error {
buf := make([]byte, 0)
for i, v := range data {
buf = append(buf, v...)
if i != len(data)-1 {
buf = append(buf, ' ')
}
}
buf = append(buf, '\r')
buf = append(buf, '\n')
n, err := out.Write(buf)
if err != nil {
return err
}
if n != len(buf) {
return io.ErrUnexpectedEOF
}
return nil
}

View File

@ -1,26 +0,0 @@
package dbus
import (
"encoding/hex"
)
// AuthExternal returns an Auth that authenticates as the given user with the
// EXTERNAL mechanism.
func AuthExternal(user string) Auth {
return authExternal{user}
}
// AuthExternal implements the EXTERNAL authentication mechanism.
type authExternal struct {
user string
}
func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
b := make([]byte, 2*len(a.user))
hex.Encode(b, []byte(a.user))
return []byte("EXTERNAL"), b, AuthOk
}
func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
return nil, AuthError
}

View File

@ -1,102 +0,0 @@
package dbus
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"os"
)
// AuthCookieSha1 returns an Auth that authenticates as the given user with the
// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
// directory of the user.
func AuthCookieSha1(user, home string) Auth {
return authCookieSha1{user, home}
}
type authCookieSha1 struct {
user, home string
}
func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
b := make([]byte, 2*len(a.user))
hex.Encode(b, []byte(a.user))
return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
}
func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
challenge := make([]byte, len(data)/2)
_, err := hex.Decode(challenge, data)
if err != nil {
return nil, AuthError
}
b := bytes.Split(challenge, []byte{' '})
if len(b) != 3 {
return nil, AuthError
}
context := b[0]
id := b[1]
svchallenge := b[2]
cookie := a.getCookie(context, id)
if cookie == nil {
return nil, AuthError
}
clchallenge := a.generateChallenge()
if clchallenge == nil {
return nil, AuthError
}
hash := sha1.New()
hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
hexhash := make([]byte, 2*hash.Size())
hex.Encode(hexhash, hash.Sum(nil))
data = append(clchallenge, ' ')
data = append(data, hexhash...)
resp := make([]byte, 2*len(data))
hex.Encode(resp, data)
return resp, AuthOk
}
// getCookie searches for the cookie identified by id in context and returns
// the cookie content or nil. (Since HandleData can't return a specific error,
// but only whether an error occured, this function also doesn't bother to
// return an error.)
func (a authCookieSha1) getCookie(context, id []byte) []byte {
file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
if err != nil {
return nil
}
defer file.Close()
rd := bufio.NewReader(file)
for {
line, err := rd.ReadBytes('\n')
if err != nil {
return nil
}
line = line[:len(line)-1]
b := bytes.Split(line, []byte{' '})
if len(b) != 3 {
return nil
}
if bytes.Equal(b[0], id) {
return b[2]
}
}
}
// generateChallenge returns a random, hex-encoded challenge, or nil on error
// (see above).
func (a authCookieSha1) generateChallenge() []byte {
b := make([]byte, 16)
n, err := rand.Read(b)
if err != nil {
return nil
}
if n != 16 {
return nil
}
enc := make([]byte, 32)
hex.Encode(enc, b)
return enc
}

View File

@ -1,36 +0,0 @@
package dbus
import (
"errors"
)
// Call represents a pending or completed method call.
type Call struct {
Destination string
Path ObjectPath
Method string
Args []interface{}
// Strobes when the call is complete.
Done chan *Call
// After completion, the error status. If this is non-nil, it may be an
// error message from the peer (with Error as its type) or some other error.
Err error
// Holds the response once the call is done.
Body []interface{}
}
var errSignature = errors.New("dbus: mismatched signature")
// Store stores the body of the reply into the provided pointers. It returns
// an error if the signatures of the body and retvalues don't match, or if
// the error status is not nil.
func (c *Call) Store(retvalues ...interface{}) error {
if c.Err != nil {
return c.Err
}
return Store(c.Body, retvalues...)
}

625
vendor/github.com/godbus/dbus/conn.go generated vendored
View File

@ -1,625 +0,0 @@
package dbus
import (
"errors"
"io"
"os"
"reflect"
"strings"
"sync"
)
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
var (
systemBus *Conn
systemBusLck sync.Mutex
sessionBus *Conn
sessionBusLck sync.Mutex
)
// ErrClosed is the error returned by calls on a closed connection.
var ErrClosed = errors.New("dbus: connection closed by user")
// Conn represents a connection to a message bus (usually, the system or
// session bus).
//
// Connections are either shared or private. Shared connections
// are shared between calls to the functions that return them. As a result,
// the methods Close, Auth and Hello must not be called on them.
//
// Multiple goroutines may invoke methods on a connection simultaneously.
type Conn struct {
transport
busObj BusObject
unixFD bool
uuid string
names []string
namesLck sync.RWMutex
serialLck sync.Mutex
nextSerial uint32
serialUsed map[uint32]bool
calls map[uint32]*Call
callsLck sync.RWMutex
handlers map[ObjectPath]map[string]exportWithMapping
handlersLck sync.RWMutex
out chan *Message
closed bool
outLck sync.RWMutex
signals []chan<- *Signal
signalsLck sync.Mutex
eavesdropped chan<- *Message
eavesdroppedLck sync.Mutex
}
// SessionBus returns a shared connection to the session bus, connecting to it
// if not already done.
func SessionBus() (conn *Conn, err error) {
sessionBusLck.Lock()
defer sessionBusLck.Unlock()
if sessionBus != nil {
return sessionBus, nil
}
defer func() {
if conn != nil {
sessionBus = conn
}
}()
conn, err = SessionBusPrivate()
if err != nil {
return
}
if err = conn.Auth(nil); err != nil {
conn.Close()
conn = nil
return
}
if err = conn.Hello(); err != nil {
conn.Close()
conn = nil
}
return
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
if address != "" && address != "autolaunch:" {
return Dial(address)
}
return sessionBusPlatform()
}
// SystemBus returns a shared connection to the system bus, connecting to it if
// not already done.
func SystemBus() (conn *Conn, err error) {
systemBusLck.Lock()
defer systemBusLck.Unlock()
if systemBus != nil {
return systemBus, nil
}
defer func() {
if conn != nil {
systemBus = conn
}
}()
conn, err = SystemBusPrivate()
if err != nil {
return
}
if err = conn.Auth(nil); err != nil {
conn.Close()
conn = nil
return
}
if err = conn.Hello(); err != nil {
conn.Close()
conn = nil
}
return
}
// SystemBusPrivate returns a new private connection to the system bus.
func SystemBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" {
return Dial(address)
}
return Dial(defaultSystemBusAddress)
}
// Dial establishes a new private connection to the message bus specified by address.
func Dial(address string) (*Conn, error) {
tr, err := getTransport(address)
if err != nil {
return nil, err
}
return newConn(tr)
}
// NewConn creates a new private *Conn from an already established connection.
func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
return newConn(genericTransport{conn})
}
// newConn creates a new *Conn from a transport.
func newConn(tr transport) (*Conn, error) {
conn := new(Conn)
conn.transport = tr
conn.calls = make(map[uint32]*Call)
conn.out = make(chan *Message, 10)
conn.handlers = make(map[ObjectPath]map[string]exportWithMapping)
conn.nextSerial = 1
conn.serialUsed = map[uint32]bool{0: true}
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
return conn, nil
}
// BusObject returns the object owned by the bus daemon which handles
// administrative requests.
func (conn *Conn) BusObject() BusObject {
return conn.busObj
}
// Close closes the connection. Any blocked operations will return with errors
// and the channels passed to Eavesdrop and Signal are closed. This method must
// not be called on shared connections.
func (conn *Conn) Close() error {
conn.outLck.Lock()
if conn.closed {
// inWorker calls Close on read error, the read error may
// be caused by another caller calling Close to shutdown the
// dbus connection, a double-close scenario we prevent here.
conn.outLck.Unlock()
return nil
}
close(conn.out)
conn.closed = true
conn.outLck.Unlock()
conn.signalsLck.Lock()
for _, ch := range conn.signals {
close(ch)
}
conn.signalsLck.Unlock()
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
close(conn.eavesdropped)
}
conn.eavesdroppedLck.Unlock()
return conn.transport.Close()
}
// Eavesdrop causes conn to send all incoming messages to the given channel
// without further processing. Method replies, errors and signals will not be
// sent to the appropiate channels and method calls will not be handled. If nil
// is passed, the normal behaviour is restored.
//
// The caller has to make sure that ch is sufficiently buffered;
// if a message arrives when a write to ch is not possible, the message is
// discarded.
func (conn *Conn) Eavesdrop(ch chan<- *Message) {
conn.eavesdroppedLck.Lock()
conn.eavesdropped = ch
conn.eavesdroppedLck.Unlock()
}
// getSerial returns an unused serial.
func (conn *Conn) getSerial() uint32 {
conn.serialLck.Lock()
defer conn.serialLck.Unlock()
n := conn.nextSerial
for conn.serialUsed[n] {
n++
}
conn.serialUsed[n] = true
conn.nextSerial = n + 1
return n
}
// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
// called after authentication, but before sending any other messages to the
// bus. Hello must not be called for shared connections.
func (conn *Conn) Hello() error {
var s string
err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
if err != nil {
return err
}
conn.namesLck.Lock()
conn.names = make([]string, 1)
conn.names[0] = s
conn.namesLck.Unlock()
return nil
}
// inWorker runs in an own goroutine, reading incoming messages from the
// transport and dispatching them appropiately.
func (conn *Conn) inWorker() {
for {
msg, err := conn.ReadMessage()
if err == nil {
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
select {
case conn.eavesdropped <- msg:
default:
}
conn.eavesdroppedLck.Unlock()
continue
}
conn.eavesdroppedLck.Unlock()
dest, _ := msg.Headers[FieldDestination].value.(string)
found := false
if dest == "" {
found = true
} else {
conn.namesLck.RLock()
if len(conn.names) == 0 {
found = true
}
for _, v := range conn.names {
if dest == v {
found = true
break
}
}
conn.namesLck.RUnlock()
}
if !found {
// Eavesdropped a message, but no channel for it is registered.
// Ignore it.
continue
}
switch msg.Type {
case TypeMethodReply, TypeError:
serial := msg.Headers[FieldReplySerial].value.(uint32)
conn.callsLck.Lock()
if c, ok := conn.calls[serial]; ok {
if msg.Type == TypeError {
name, _ := msg.Headers[FieldErrorName].value.(string)
c.Err = Error{name, msg.Body}
} else {
c.Body = msg.Body
}
c.Done <- c
conn.serialLck.Lock()
delete(conn.serialUsed, serial)
conn.serialLck.Unlock()
delete(conn.calls, serial)
}
conn.callsLck.Unlock()
case TypeSignal:
iface := msg.Headers[FieldInterface].value.(string)
member := msg.Headers[FieldMember].value.(string)
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
// sender is optional for signals.
sender, _ := msg.Headers[FieldSender].value.(string)
if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
if member == "NameLost" {
// If we lost the name on the bus, remove it from our
// tracking list.
name, ok := msg.Body[0].(string)
if !ok {
panic("Unable to read the lost name")
}
conn.namesLck.Lock()
for i, v := range conn.names {
if v == name {
conn.names = append(conn.names[:i],
conn.names[i+1:]...)
}
}
conn.namesLck.Unlock()
} else if member == "NameAcquired" {
// If we acquired the name on the bus, add it to our
// tracking list.
name, ok := msg.Body[0].(string)
if !ok {
panic("Unable to read the acquired name")
}
conn.namesLck.Lock()
conn.names = append(conn.names, name)
conn.namesLck.Unlock()
}
}
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalsLck.Lock()
for _, ch := range conn.signals {
ch <- signal
}
conn.signalsLck.Unlock()
case TypeMethodCall:
go conn.handleCall(msg)
}
} else if _, ok := err.(InvalidMessageError); !ok {
// Some read error occured (usually EOF); we can't really do
// anything but to shut down all stuff and returns errors to all
// pending replies.
conn.Close()
conn.callsLck.RLock()
for _, v := range conn.calls {
v.Err = err
v.Done <- v
}
conn.callsLck.RUnlock()
return
}
// invalid messages are ignored
}
}
// Names returns the list of all names that are currently owned by this
// connection. The slice is always at least one element long, the first element
// being the unique name of the connection.
func (conn *Conn) Names() []string {
conn.namesLck.RLock()
// copy the slice so it can't be modified
s := make([]string, len(conn.names))
copy(s, conn.names)
conn.namesLck.RUnlock()
return s
}
// Object returns the object identified by the given destination name and path.
func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
return &Object{conn, dest, path}
}
// outWorker runs in an own goroutine, encoding and sending messages that are
// sent to conn.out.
func (conn *Conn) outWorker() {
for msg := range conn.out {
err := conn.SendMessage(msg)
conn.callsLck.RLock()
if err != nil {
if c := conn.calls[msg.serial]; c != nil {
c.Err = err
c.Done <- c
}
conn.serialLck.Lock()
delete(conn.serialUsed, msg.serial)
conn.serialLck.Unlock()
} else if msg.Type != TypeMethodCall {
conn.serialLck.Lock()
delete(conn.serialUsed, msg.serial)
conn.serialLck.Unlock()
}
conn.callsLck.RUnlock()
}
}
// Send sends the given message to the message bus. You usually don't need to
// use this; use the higher-level equivalents (Call / Go, Emit and Export)
// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
// call is returned and the same value is sent to ch (which must be buffered)
// once the call is complete. Otherwise, ch is ignored and a Call structure is
// returned of which only the Err member is valid.
func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
var call *Call
msg.serial = conn.getSerial()
if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
ch = make(chan *Call, 5)
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Conn).Send")
}
call = new(Call)
call.Destination, _ = msg.Headers[FieldDestination].value.(string)
call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
iface, _ := msg.Headers[FieldInterface].value.(string)
member, _ := msg.Headers[FieldMember].value.(string)
call.Method = iface + "." + member
call.Args = msg.Body
call.Done = ch
conn.callsLck.Lock()
conn.calls[msg.serial] = call
conn.callsLck.Unlock()
conn.outLck.RLock()
if conn.closed {
call.Err = ErrClosed
call.Done <- call
} else {
conn.out <- msg
}
conn.outLck.RUnlock()
} else {
conn.outLck.RLock()
if conn.closed {
call = &Call{Err: ErrClosed}
} else {
conn.out <- msg
call = &Call{Err: nil}
}
conn.outLck.RUnlock()
}
return call
}
// sendError creates an error message corresponding to the parameters and sends
// it to conn.out.
func (conn *Conn) sendError(e Error, dest string, serial uint32) {
msg := new(Message)
msg.Type = TypeError
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
if dest != "" {
msg.Headers[FieldDestination] = MakeVariant(dest)
}
msg.Headers[FieldErrorName] = MakeVariant(e.Name)
msg.Headers[FieldReplySerial] = MakeVariant(serial)
msg.Body = e.Body
if len(e.Body) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- msg
}
conn.outLck.RUnlock()
}
// sendReply creates a method reply message corresponding to the parameters and
// sends it to conn.out.
func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
msg := new(Message)
msg.Type = TypeMethodReply
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
if dest != "" {
msg.Headers[FieldDestination] = MakeVariant(dest)
}
msg.Headers[FieldReplySerial] = MakeVariant(serial)
msg.Body = values
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- msg
}
conn.outLck.RUnlock()
}
// Signal registers the given channel to be passed all received signal messages.
// The caller has to make sure that ch is sufficiently buffered; if a message
// arrives when a write to c is not possible, it is discarded.
//
// Multiple of these channels can be registered at the same time. Passing a
// channel that already is registered will remove it from the list of the
// registered channels.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
// none of the channels passed to Signal will receive any signals.
func (conn *Conn) Signal(ch chan<- *Signal) {
conn.signalsLck.Lock()
conn.signals = append(conn.signals, ch)
conn.signalsLck.Unlock()
}
// SupportsUnixFDs returns whether the underlying transport supports passing of
// unix file descriptors. If this is false, method calls containing unix file
// descriptors will return an error and emitted signals containing them will
// not be sent.
func (conn *Conn) SupportsUnixFDs() bool {
return conn.unixFD
}
// Error represents a D-Bus message of type Error.
type Error struct {
Name string
Body []interface{}
}
func NewError(name string, body []interface{}) *Error {
return &Error{name, body}
}
func (e Error) Error() string {
if len(e.Body) >= 1 {
s, ok := e.Body[0].(string)
if ok {
return s
}
}
return e.Name
}
// Signal represents a D-Bus message of type Signal. The name member is given in
// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
type Signal struct {
Sender string
Path ObjectPath
Name string
Body []interface{}
}
// transport is a D-Bus transport.
type transport interface {
// Read and Write raw data (for example, for the authentication protocol).
io.ReadWriteCloser
// Send the initial null byte used for the EXTERNAL mechanism.
SendNullByte() error
// Returns whether this transport supports passing Unix FDs.
SupportsUnixFDs() bool
// Signal the transport that Unix FD passing is enabled for this connection.
EnableUnixFDs()
// Read / send a message, handling things like Unix FDs.
ReadMessage() (*Message, error)
SendMessage(*Message) error
}
var (
transports = make(map[string]func(string) (transport, error))
)
func getTransport(address string) (transport, error) {
var err error
var t transport
addresses := strings.Split(address, ";")
for _, v := range addresses {
i := strings.IndexRune(v, ':')
if i == -1 {
err = errors.New("dbus: invalid bus address (no transport)")
continue
}
f := transports[v[:i]]
if f == nil {
err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
continue
}
t, err = f(v[i+1:])
if err == nil {
return t, nil
}
}
return nil, err
}
// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
// of arbitrary types, containes the values that are obtained from dereferencing
// all elements in vs.
func dereferenceAll(vs []interface{}) []interface{} {
for i := range vs {
v := reflect.ValueOf(vs[i])
v = v.Elem()
vs[i] = v.Interface()
}
return vs
}
// getKey gets a key from a the list of keys. Returns "" on error / not found...
func getKey(s, key string) string {
i := strings.Index(s, key)
if i == -1 {
return ""
}
if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
return ""
}
j := strings.Index(s, ",")
if j == -1 {
j = len(s)
}
return s[i+len(key)+1 : j]
}

View File

@ -1,21 +0,0 @@
package dbus
import (
"errors"
"os/exec"
)
func sessionBusPlatform() (*Conn, error) {
cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
b, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
if len(b) == 0 {
return nil, errors.New("dbus: couldn't determine address of session bus")
}
return Dial("unix:path=" + string(b[:len(b)-1]))
}

View File

@ -1,27 +0,0 @@
// +build !darwin
package dbus
import (
"bytes"
"errors"
"os/exec"
)
func sessionBusPlatform() (*Conn, error) {
cmd := exec.Command("dbus-launch")
b, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
i := bytes.IndexByte(b, '=')
j := bytes.IndexByte(b, '\n')
if i == -1 || j == -1 {
return nil, errors.New("dbus: couldn't determine address of session bus")
}
return Dial(string(b[i+1 : j]))
}

258
vendor/github.com/godbus/dbus/dbus.go generated vendored
View File

@ -1,258 +0,0 @@
package dbus
import (
"errors"
"reflect"
"strings"
)
var (
byteType = reflect.TypeOf(byte(0))
boolType = reflect.TypeOf(false)
uint8Type = reflect.TypeOf(uint8(0))
int16Type = reflect.TypeOf(int16(0))
uint16Type = reflect.TypeOf(uint16(0))
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
stringType = reflect.TypeOf("")
signatureType = reflect.TypeOf(Signature{""})
objectPathType = reflect.TypeOf(ObjectPath(""))
variantType = reflect.TypeOf(Variant{Signature{""}, nil})
interfacesType = reflect.TypeOf([]interface{}{})
unixFDType = reflect.TypeOf(UnixFD(0))
unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
)
// An InvalidTypeError signals that a value which cannot be represented in the
// D-Bus wire format was passed to a function.
type InvalidTypeError struct {
Type reflect.Type
}
func (e InvalidTypeError) Error() string {
return "dbus: invalid type " + e.Type.String()
}
// Store copies the values contained in src to dest, which must be a slice of
// pointers. It converts slices of interfaces from src to corresponding structs
// in dest. An error is returned if the lengths of src and dest or the types of
// their elements don't match.
func Store(src []interface{}, dest ...interface{}) error {
if len(src) != len(dest) {
return errors.New("dbus.Store: length mismatch")
}
for i := range src {
if err := store(src[i], dest[i]); err != nil {
return err
}
}
return nil
}
func store(src, dest interface{}) error {
if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
return nil
} else if hasStruct(dest) {
rv := reflect.ValueOf(dest).Elem()
switch rv.Kind() {
case reflect.Struct:
vs, ok := src.([]interface{})
if !ok {
return errors.New("dbus.Store: type mismatch")
}
t := rv.Type()
ndest := make([]interface{}, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
ndest = append(ndest, rv.Field(i).Addr().Interface())
}
}
if len(vs) != len(ndest) {
return errors.New("dbus.Store: type mismatch")
}
err := Store(vs, ndest...)
if err != nil {
return errors.New("dbus.Store: type mismatch")
}
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(sv.Type()))
for _, key := range keys {
v := reflect.New(sv.Type().Elem())
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
} else {
return errors.New("dbus.Store: type mismatch")
}
}
func hasStruct(v interface{}) bool {
t := reflect.TypeOf(v)
for {
switch t.Kind() {
case reflect.Struct:
return true
case reflect.Slice, reflect.Ptr, reflect.Map:
t = t.Elem()
default:
return false
}
}
}
// An ObjectPath is an object path as defined by the D-Bus spec.
type ObjectPath string
// IsValid returns whether the object path is valid.
func (o ObjectPath) IsValid() bool {
s := string(o)
if len(s) == 0 {
return false
}
if s[0] != '/' {
return false
}
if s[len(s)-1] == '/' && len(s) != 1 {
return false
}
// probably not used, but technically possible
if s == "/" {
return true
}
split := strings.Split(s[1:], "/")
for _, v := range split {
if len(v) == 0 {
return false
}
for _, c := range v {
if !isMemberChar(c) {
return false
}
}
}
return true
}
// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
// documentation for more information about Unix file descriptor passsing.
type UnixFD int32
// A UnixFDIndex is the representation of a Unix file descriptor in a message.
type UnixFDIndex uint32
// alignment returns the alignment of values of type t.
func alignment(t reflect.Type) int {
switch t {
case variantType:
return 1
case objectPathType:
return 4
case signatureType:
return 1
case interfacesType: // sometimes used for structs
return 8
}
switch t.Kind() {
case reflect.Uint8:
return 1
case reflect.Uint16, reflect.Int16:
return 2
case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
return 4
case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
return 8
case reflect.Ptr:
return alignment(t.Elem())
}
return 1
}
// isKeyType returns whether t is a valid type for a D-Bus dict.
func isKeyType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
reflect.String:
return true
}
return false
}
// isValidInterface returns whether s is a valid name for an interface.
func isValidInterface(s string) bool {
if len(s) == 0 || len(s) > 255 || s[0] == '.' {
return false
}
elem := strings.Split(s, ".")
if len(elem) < 2 {
return false
}
for _, v := range elem {
if len(v) == 0 {
return false
}
if v[0] >= '0' && v[0] <= '9' {
return false
}
for _, c := range v {
if !isMemberChar(c) {
return false
}
}
}
return true
}
// isValidMember returns whether s is a valid name for a member.
func isValidMember(s string) bool {
if len(s) == 0 || len(s) > 255 {
return false
}
i := strings.Index(s, ".")
if i != -1 {
return false
}
if s[0] >= '0' && s[0] <= '9' {
return false
}
for _, c := range s {
if !isMemberChar(c) {
return false
}
}
return true
}
func isMemberChar(c rune) bool {
return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') || c == '_'
}

View File

@ -1,228 +0,0 @@
package dbus
import (
"encoding/binary"
"io"
"reflect"
)
type decoder struct {
in io.Reader
order binary.ByteOrder
pos int
}
// newDecoder returns a new decoder that reads values from in. The input is
// expected to be in the given byte order.
func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
dec := new(decoder)
dec.in = in
dec.order = order
return dec
}
// align aligns the input to the given boundary and panics on error.
func (dec *decoder) align(n int) {
if dec.pos%n != 0 {
newpos := (dec.pos + n - 1) & ^(n - 1)
empty := make([]byte, newpos-dec.pos)
if _, err := io.ReadFull(dec.in, empty); err != nil {
panic(err)
}
dec.pos = newpos
}
}
// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
func (dec *decoder) binread(v interface{}) {
if err := binary.Read(dec.in, dec.order, v); err != nil {
panic(err)
}
}
func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
defer func() {
var ok bool
v := recover()
if err, ok = v.(error); ok {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = FormatError("unexpected EOF")
}
}
}()
vs = make([]interface{}, 0)
s := sig.str
for s != "" {
err, rem := validSingle(s, 0)
if err != nil {
return nil, err
}
v := dec.decode(s[:len(s)-len(rem)], 0)
vs = append(vs, v)
s = rem
}
return vs, nil
}
func (dec *decoder) decode(s string, depth int) interface{} {
dec.align(alignment(typeFor(s)))
switch s[0] {
case 'y':
var b [1]byte
if _, err := dec.in.Read(b[:]); err != nil {
panic(err)
}
dec.pos++
return b[0]
case 'b':
i := dec.decode("u", depth).(uint32)
switch {
case i == 0:
return false
case i == 1:
return true
default:
panic(FormatError("invalid value for boolean"))
}
case 'n':
var i int16
dec.binread(&i)
dec.pos += 2
return i
case 'i':
var i int32
dec.binread(&i)
dec.pos += 4
return i
case 'x':
var i int64
dec.binread(&i)
dec.pos += 8
return i
case 'q':
var i uint16
dec.binread(&i)
dec.pos += 2
return i
case 'u':
var i uint32
dec.binread(&i)
dec.pos += 4
return i
case 't':
var i uint64
dec.binread(&i)
dec.pos += 8
return i
case 'd':
var f float64
dec.binread(&f)
dec.pos += 8
return f
case 's':
length := dec.decode("u", depth).(uint32)
b := make([]byte, int(length)+1)
if _, err := io.ReadFull(dec.in, b); err != nil {
panic(err)
}
dec.pos += int(length) + 1
return string(b[:len(b)-1])
case 'o':
return ObjectPath(dec.decode("s", depth).(string))
case 'g':
length := dec.decode("y", depth).(byte)
b := make([]byte, int(length)+1)
if _, err := io.ReadFull(dec.in, b); err != nil {
panic(err)
}
dec.pos += int(length) + 1
sig, err := ParseSignature(string(b[:len(b)-1]))
if err != nil {
panic(err)
}
return sig
case 'v':
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
var variant Variant
sig := dec.decode("g", depth).(Signature)
if len(sig.str) == 0 {
panic(FormatError("variant signature is empty"))
}
err, rem := validSingle(sig.str, 0)
if err != nil {
panic(err)
}
if rem != "" {
panic(FormatError("variant signature has multiple types"))
}
variant.sig = sig
variant.value = dec.decode(sig.str, depth+1)
return variant
case 'h':
return UnixFDIndex(dec.decode("u", depth).(uint32))
case 'a':
if len(s) > 1 && s[1] == '{' {
ksig := s[2:3]
vsig := s[3 : len(s)-1]
v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
if depth >= 63 {
panic(FormatError("input exceeds container depth limit"))
}
length := dec.decode("u", depth).(uint32)
// Even for empty maps, the correct padding must be included
dec.align(8)
spos := dec.pos
for dec.pos < spos+int(length) {
dec.align(8)
if !isKeyType(v.Type().Key()) {
panic(InvalidTypeError{v.Type()})
}
kv := dec.decode(ksig, depth+2)
vv := dec.decode(vsig, depth+2)
v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
}
return v.Interface()
}
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
length := dec.decode("u", depth).(uint32)
v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
// Even for empty arrays, the correct padding must be included
dec.align(alignment(typeFor(s[1:])))
spos := dec.pos
for dec.pos < spos+int(length) {
ev := dec.decode(s[1:], depth+1)
v = reflect.Append(v, reflect.ValueOf(ev))
}
return v.Interface()
case '(':
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
dec.align(8)
v := make([]interface{}, 0)
s = s[1 : len(s)-1]
for s != "" {
err, rem := validSingle(s, 0)
if err != nil {
panic(err)
}
ev := dec.decode(s[:len(s)-len(rem)], depth+1)
v = append(v, ev)
s = rem
}
return v
default:
panic(SignatureError{Sig: s})
}
}
// A FormatError is an error in the wire format.
type FormatError string
func (e FormatError) Error() string {
return "dbus: wire format error: " + string(e)
}

63
vendor/github.com/godbus/dbus/doc.go generated vendored
View File

@ -1,63 +0,0 @@
/*
Package dbus implements bindings to the D-Bus message bus system.
To use the message bus API, you first need to connect to a bus (usually the
session or system bus). The acquired connection then can be used to call methods
on remote objects and emit or receive signals. Using the Export method, you can
arrange D-Bus methods calls to be directly translated to method calls on a Go
value.
Conversion Rules
For outgoing messages, Go types are automatically converted to the
corresponding D-Bus types. The following types are directly encoded as their
respective D-Bus equivalents:
Go type | D-Bus type
------------+-----------
byte | BYTE
bool | BOOLEAN
int16 | INT16
uint16 | UINT16
int32 | INT32
uint32 | UINT32
int64 | INT64
uint64 | UINT64
float64 | DOUBLE
string | STRING
ObjectPath | OBJECT_PATH
Signature | SIGNATURE
Variant | VARIANT
UnixFDIndex | UNIX_FD
Slices and arrays encode as ARRAYs of their element type.
Maps encode as DICTs, provided that their key type can be used as a key for
a DICT.
Structs other than Variant and Signature encode as a STRUCT containing their
exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
be skipped.
Pointers encode as the value they're pointed to.
Trying to encode any other type or a slice, map or struct containing an
unsupported type will result in an InvalidTypeError.
For incoming messages, the inverse of these rules are used, with the exception
of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
containing the struct fields in the correct order. The Store function can be
used to convert such values to Go structs.
Unix FD passing
Handling Unix file descriptors deserves special mention. To use them, you should
first check that they are supported on a connection by calling SupportsUnixFDs.
If it returns true, all method of Connection will translate messages containing
UnixFD's to messages that are accompanied by the given file descriptors with the
UnixFD values being substituted by the correct indices. Similarily, the indices
of incoming messages are automatically resolved. It shouldn't be necessary to use
UnixFDIndex.
*/
package dbus

View File

@ -1,208 +0,0 @@
package dbus
import (
"bytes"
"encoding/binary"
"io"
"reflect"
)
// An encoder encodes values to the D-Bus wire format.
type encoder struct {
out io.Writer
order binary.ByteOrder
pos int
}
// NewEncoder returns a new encoder that writes to out in the given byte order.
func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
return newEncoderAtOffset(out, 0, order)
}
// newEncoderAtOffset returns a new encoder that writes to out in the given
// byte order. Specify the offset to initialize pos for proper alignment
// computation.
func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
enc := new(encoder)
enc.out = out
enc.order = order
enc.pos = offset
return enc
}
// Aligns the next output to be on a multiple of n. Panics on write errors.
func (enc *encoder) align(n int) {
pad := enc.padding(0, n)
if pad > 0 {
empty := make([]byte, pad)
if _, err := enc.out.Write(empty); err != nil {
panic(err)
}
enc.pos += pad
}
}
// pad returns the number of bytes of padding, based on current position and additional offset.
// and alignment.
func (enc *encoder) padding(offset, algn int) int {
abs := enc.pos + offset
if abs%algn != 0 {
newabs := (abs + algn - 1) & ^(algn - 1)
return newabs - abs
}
return 0
}
// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
func (enc *encoder) binwrite(v interface{}) {
if err := binary.Write(enc.out, enc.order, v); err != nil {
panic(err)
}
}
// Encode encodes the given values to the underyling reader. All written values
// are aligned properly as required by the D-Bus spec.
func (enc *encoder) Encode(vs ...interface{}) (err error) {
defer func() {
err, _ = recover().(error)
}()
for _, v := range vs {
enc.encode(reflect.ValueOf(v), 0)
}
return nil
}
// encode encodes the given value to the writer and panics on error. depth holds
// the depth of the container nesting.
func (enc *encoder) encode(v reflect.Value, depth int) {
enc.align(alignment(v.Type()))
switch v.Kind() {
case reflect.Uint8:
var b [1]byte
b[0] = byte(v.Uint())
if _, err := enc.out.Write(b[:]); err != nil {
panic(err)
}
enc.pos++
case reflect.Bool:
if v.Bool() {
enc.encode(reflect.ValueOf(uint32(1)), depth)
} else {
enc.encode(reflect.ValueOf(uint32(0)), depth)
}
case reflect.Int16:
enc.binwrite(int16(v.Int()))
enc.pos += 2
case reflect.Uint16:
enc.binwrite(uint16(v.Uint()))
enc.pos += 2
case reflect.Int32:
enc.binwrite(int32(v.Int()))
enc.pos += 4
case reflect.Uint32:
enc.binwrite(uint32(v.Uint()))
enc.pos += 4
case reflect.Int64:
enc.binwrite(v.Int())
enc.pos += 8
case reflect.Uint64:
enc.binwrite(v.Uint())
enc.pos += 8
case reflect.Float64:
enc.binwrite(v.Float())
enc.pos += 8
case reflect.String:
enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
b := make([]byte, v.Len()+1)
copy(b, v.String())
b[len(b)-1] = 0
n, err := enc.out.Write(b)
if err != nil {
panic(err)
}
enc.pos += n
case reflect.Ptr:
enc.encode(v.Elem(), depth)
case reflect.Slice, reflect.Array:
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
// Lookahead offset: 4 bytes for uint32 length (with alignment),
// plus alignment for elements.
n := enc.padding(0, 4) + 4
offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
var buf bytes.Buffer
bufenc := newEncoderAtOffset(&buf, offset, enc.order)
for i := 0; i < v.Len(); i++ {
bufenc.encode(v.Index(i), depth+1)
}
enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
length := buf.Len()
enc.align(alignment(v.Type().Elem()))
if _, err := buf.WriteTo(enc.out); err != nil {
panic(err)
}
enc.pos += length
case reflect.Struct:
if depth >= 64 && v.Type() != signatureType {
panic(FormatError("input exceeds container depth limit"))
}
switch t := v.Type(); t {
case signatureType:
str := v.Field(0)
enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
b := make([]byte, str.Len()+1)
copy(b, str.String())
b[len(b)-1] = 0
n, err := enc.out.Write(b)
if err != nil {
panic(err)
}
enc.pos += n
case variantType:
variant := v.Interface().(Variant)
enc.encode(reflect.ValueOf(variant.sig), depth+1)
enc.encode(reflect.ValueOf(variant.value), depth+1)
default:
for i := 0; i < v.Type().NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
enc.encode(v.Field(i), depth+1)
}
}
}
case reflect.Map:
// Maps are arrays of structures, so they actually increase the depth by
// 2.
if depth >= 63 {
panic(FormatError("input exceeds container depth limit"))
}
if !isKeyType(v.Type().Key()) {
panic(InvalidTypeError{v.Type()})
}
keys := v.MapKeys()
// Lookahead offset: 4 bytes for uint32 length (with alignment),
// plus 8-byte alignment
n := enc.padding(0, 4) + 4
offset := enc.pos + n + enc.padding(n, 8)
var buf bytes.Buffer
bufenc := newEncoderAtOffset(&buf, offset, enc.order)
for _, k := range keys {
bufenc.align(8)
bufenc.encode(k, depth+2)
bufenc.encode(v.MapIndex(k), depth+2)
}
enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
length := buf.Len()
enc.align(8)
if _, err := buf.WriteTo(enc.out); err != nil {
panic(err)
}
enc.pos += length
default:
panic(InvalidTypeError{v.Type()})
}
}

View File

@ -1,411 +0,0 @@
package dbus
import (
"errors"
"fmt"
"reflect"
"strings"
)
var (
errmsgInvalidArg = Error{
"org.freedesktop.DBus.Error.InvalidArgs",
[]interface{}{"Invalid type / number of args"},
}
errmsgNoObject = Error{
"org.freedesktop.DBus.Error.NoSuchObject",
[]interface{}{"No such object"},
}
errmsgUnknownMethod = Error{
"org.freedesktop.DBus.Error.UnknownMethod",
[]interface{}{"Unknown / invalid method"},
}
)
// exportWithMapping represents an exported struct along with a method name
// mapping to allow for exporting lower-case methods, etc.
type exportWithMapping struct {
export interface{}
// Method name mapping; key -> struct method, value -> dbus method.
mapping map[string]string
// Whether or not this export is for the entire subtree
includeSubtree bool
}
// Sender is a type which can be used in exported methods to receive the message
// sender.
type Sender string
func exportedMethod(export exportWithMapping, name string) reflect.Value {
if export.export == nil {
return reflect.Value{}
}
// If a mapping was included in the export, check the map to see if we
// should be looking for a different method in the export.
if export.mapping != nil {
for key, value := range export.mapping {
if value == name {
name = key
break
}
// Catch the case where a method is aliased but the client is calling
// the original, e.g. the "Foo" method was exported mapped to
// "foo," and dbus client called the original "Foo."
if key == name {
return reflect.Value{}
}
}
}
value := reflect.ValueOf(export.export)
m := value.MethodByName(name)
// Catch the case of attempting to call an unexported method
method, ok := value.Type().MethodByName(name)
if !m.IsValid() || !ok || method.PkgPath != "" {
return reflect.Value{}
}
t := m.Type()
if t.NumOut() == 0 ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
return reflect.Value{}
}
return m
}
// searchHandlers will look through all registered handlers looking for one
// to handle the given path. If a verbatim one isn't found, it will check for
// a subtree registration for the path as well.
func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) {
conn.handlersLck.RLock()
defer conn.handlersLck.RUnlock()
handlers, ok := conn.handlers[path]
if ok {
return handlers, ok
}
// If handlers weren't found for this exact path, look for a matching subtree
// registration
handlers = make(map[string]exportWithMapping)
path = path[:strings.LastIndex(string(path), "/")]
for len(path) > 0 {
var subtreeHandlers map[string]exportWithMapping
subtreeHandlers, ok = conn.handlers[path]
if ok {
for iface, handler := range subtreeHandlers {
// Only include this handler if it registered for the subtree
if handler.includeSubtree {
handlers[iface] = handler
}
}
break
}
path = path[:strings.LastIndex(string(path), "/")]
}
return handlers, ok
}
// handleCall handles the given method call (i.e. looks if it's one of the
// pre-implemented ones and searches for a corresponding handler if not).
func (conn *Conn) handleCall(msg *Message) {
name := msg.Headers[FieldMember].value.(string)
path := msg.Headers[FieldPath].value.(ObjectPath)
ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
sender, hasSender := msg.Headers[FieldSender].value.(string)
serial := msg.serial
if ifaceName == "org.freedesktop.DBus.Peer" {
switch name {
case "Ping":
conn.sendReply(sender, serial)
case "GetMachineId":
conn.sendReply(sender, serial, conn.uuid)
default:
conn.sendError(errmsgUnknownMethod, sender, serial)
}
return
}
if len(name) == 0 {
conn.sendError(errmsgUnknownMethod, sender, serial)
}
// Find the exported handler (if any) for this path
handlers, ok := conn.searchHandlers(path)
if !ok {
conn.sendError(errmsgNoObject, sender, serial)
return
}
var m reflect.Value
if hasIface {
iface := handlers[ifaceName]
m = exportedMethod(iface, name)
} else {
for _, v := range handlers {
m = exportedMethod(v, name)
if m.IsValid() {
break
}
}
}
if !m.IsValid() {
conn.sendError(errmsgUnknownMethod, sender, serial)
return
}
t := m.Type()
vs := msg.Body
pointers := make([]interface{}, t.NumIn())
decode := make([]interface{}, 0, len(vs))
for i := 0; i < t.NumIn(); i++ {
tp := t.In(i)
val := reflect.New(tp)
pointers[i] = val.Interface()
if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
val.Elem().SetString(sender)
} else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
val.Elem().Set(reflect.ValueOf(*msg))
} else {
decode = append(decode, pointers[i])
}
}
if len(decode) != len(vs) {
conn.sendError(errmsgInvalidArg, sender, serial)
return
}
if err := Store(vs, decode...); err != nil {
conn.sendError(errmsgInvalidArg, sender, serial)
return
}
// Extract parameters
params := make([]reflect.Value, len(pointers))
for i := 0; i < len(pointers); i++ {
params[i] = reflect.ValueOf(pointers[i]).Elem()
}
// Call method
ret := m.Call(params)
if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
conn.sendError(*em, sender, serial)
return
}
if msg.Flags&FlagNoReplyExpected == 0 {
reply := new(Message)
reply.Type = TypeMethodReply
reply.serial = conn.getSerial()
reply.Headers = make(map[HeaderField]Variant)
if hasSender {
reply.Headers[FieldDestination] = msg.Headers[FieldSender]
}
reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
reply.Body = make([]interface{}, len(ret)-1)
for i := 0; i < len(ret)-1; i++ {
reply.Body[i] = ret[i].Interface()
}
if len(ret) != 1 {
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- reply
}
conn.outLck.RUnlock()
}
}
// Emit emits the given signal on the message bus. The name parameter must be
// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
if !path.IsValid() {
return errors.New("dbus: invalid object path")
}
i := strings.LastIndex(name, ".")
if i == -1 {
return errors.New("dbus: invalid method name")
}
iface := name[:i]
member := name[i+1:]
if !isValidMember(member) {
return errors.New("dbus: invalid method name")
}
if !isValidInterface(iface) {
return errors.New("dbus: invalid interface name")
}
msg := new(Message)
msg.Type = TypeSignal
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
msg.Headers[FieldInterface] = MakeVariant(iface)
msg.Headers[FieldMember] = MakeVariant(member)
msg.Headers[FieldPath] = MakeVariant(path)
msg.Body = values
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
conn.outLck.RLock()
defer conn.outLck.RUnlock()
if conn.closed {
return ErrClosed
}
conn.out <- msg
return nil
}
// Export registers the given value to be exported as an object on the
// message bus.
//
// If a method call on the given path and interface is received, an exported
// method with the same name is called with v as the receiver if the
// parameters match and the last return value is of type *Error. If this
// *Error is not nil, it is sent back to the caller as an error.
// Otherwise, a method reply is sent with the other return values as its body.
//
// Any parameters with the special type Sender are set to the sender of the
// dbus message when the method is called. Parameters of this type do not
// contribute to the dbus signature of the method (i.e. the method is exposed
// as if the parameters of type Sender were not there).
//
// Similarly, any parameters with the type Message are set to the raw message
// received on the bus. Again, parameters of this type do not contribute to the
// dbus signature of the method.
//
// Every method call is executed in a new goroutine, so the method may be called
// in multiple goroutines at once.
//
// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
// handled for every object.
//
// Passing nil as the first parameter will cause conn to cease handling calls on
// the given combination of path and interface.
//
// Export returns an error if path is not a valid path name.
func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
return conn.ExportWithMap(v, nil, path, iface)
}
// ExportWithMap works exactly like Export but provides the ability to remap
// method names (e.g. export a lower-case method).
//
// The keys in the map are the real method names (exported on the struct), and
// the values are the method names to be exported on DBus.
func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
return conn.exportWithMap(v, mapping, path, iface, false)
}
// ExportSubtree works exactly like Export but registers the given value for
// an entire subtree rather under the root path provided.
//
// In order to make this useful, one parameter in each of the value's exported
// methods should be a Message, in which case it will contain the raw message
// (allowing one to get access to the path that caused the method to be called).
//
// Note that more specific export paths take precedence over less specific. For
// example, a method call using the ObjectPath /foo/bar/baz will call a method
// exported on /foo/bar before a method exported on /foo.
func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
return conn.ExportSubtreeWithMap(v, nil, path, iface)
}
// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
// ability to remap method names (e.g. export a lower-case method).
//
// The keys in the map are the real method names (exported on the struct), and
// the values are the method names to be exported on DBus.
func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
return conn.exportWithMap(v, mapping, path, iface, true)
}
// exportWithMap is the worker function for all exports/registrations.
func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error {
if !path.IsValid() {
return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
}
conn.handlersLck.Lock()
defer conn.handlersLck.Unlock()
// Remove a previous export if the interface is nil
if v == nil {
if _, ok := conn.handlers[path]; ok {
delete(conn.handlers[path], iface)
if len(conn.handlers[path]) == 0 {
delete(conn.handlers, path)
}
}
return nil
}
// If this is the first handler for this path, make a new map to hold all
// handlers for this path.
if _, ok := conn.handlers[path]; !ok {
conn.handlers[path] = make(map[string]exportWithMapping)
}
// Finally, save this handler
conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree}
return nil
}
// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
var r uint32
err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
if err != nil {
return 0, err
}
return ReleaseNameReply(r), nil
}
// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
var r uint32
err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
if err != nil {
return 0, err
}
return RequestNameReply(r), nil
}
// ReleaseNameReply is the reply to a ReleaseName call.
type ReleaseNameReply uint32
const (
ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
ReleaseNameReplyNonExistent
ReleaseNameReplyNotOwner
)
// RequestNameFlags represents the possible flags for a RequestName call.
type RequestNameFlags uint32
const (
NameFlagAllowReplacement RequestNameFlags = 1 << iota
NameFlagReplaceExisting
NameFlagDoNotQueue
)
// RequestNameReply is the reply to a RequestName call.
type RequestNameReply uint32
const (
RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
RequestNameReplyInQueue
RequestNameReplyExists
RequestNameReplyAlreadyOwner
)

View File

@ -1,28 +0,0 @@
package dbus
import (
"os"
"sync"
)
var (
homeDir string
homeDirLock sync.Mutex
)
func getHomeDir() string {
homeDirLock.Lock()
defer homeDirLock.Unlock()
if homeDir != "" {
return homeDir
}
homeDir = os.Getenv("HOME")
if homeDir != "" {
return homeDir
}
homeDir = lookupHomeDir()
return homeDir
}

View File

@ -1,15 +0,0 @@
// +build !static_build
package dbus
import (
"os/user"
)
func lookupHomeDir() string {
u, err := user.Current()
if err != nil {
return "/"
}
return u.HomeDir
}

View File

@ -1,45 +0,0 @@
// +build static_build
package dbus
import (
"bufio"
"os"
"strconv"
"strings"
)
func lookupHomeDir() string {
myUid := os.Getuid()
f, err := os.Open("/etc/passwd")
if err != nil {
return "/"
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
break
}
line := strings.TrimSpace(s.Text())
if line == "" {
continue
}
parts := strings.Split(line, ":")
if len(parts) >= 6 {
uid, err := strconv.Atoi(parts[2])
if err == nil && uid == myUid {
return parts[5]
}
}
}
// Default to / if we can't get a better value
return "/"
}

View File

@ -1,346 +0,0 @@
package dbus
import (
"bytes"
"encoding/binary"
"errors"
"io"
"reflect"
"strconv"
)
const protoVersion byte = 1
// Flags represents the possible flags of a D-Bus message.
type Flags byte
const (
// FlagNoReplyExpected signals that the message is not expected to generate
// a reply. If this flag is set on outgoing messages, any possible reply
// will be discarded.
FlagNoReplyExpected Flags = 1 << iota
// FlagNoAutoStart signals that the message bus should not automatically
// start an application when handling this message.
FlagNoAutoStart
)
// Type represents the possible types of a D-Bus message.
type Type byte
const (
TypeMethodCall Type = 1 + iota
TypeMethodReply
TypeError
TypeSignal
typeMax
)
func (t Type) String() string {
switch t {
case TypeMethodCall:
return "method call"
case TypeMethodReply:
return "reply"
case TypeError:
return "error"
case TypeSignal:
return "signal"
}
return "invalid"
}
// HeaderField represents the possible byte codes for the headers
// of a D-Bus message.
type HeaderField byte
const (
FieldPath HeaderField = 1 + iota
FieldInterface
FieldMember
FieldErrorName
FieldReplySerial
FieldDestination
FieldSender
FieldSignature
FieldUnixFDs
fieldMax
)
// An InvalidMessageError describes the reason why a D-Bus message is regarded as
// invalid.
type InvalidMessageError string
func (e InvalidMessageError) Error() string {
return "dbus: invalid message: " + string(e)
}
// fieldType are the types of the various header fields.
var fieldTypes = [fieldMax]reflect.Type{
FieldPath: objectPathType,
FieldInterface: stringType,
FieldMember: stringType,
FieldErrorName: stringType,
FieldReplySerial: uint32Type,
FieldDestination: stringType,
FieldSender: stringType,
FieldSignature: signatureType,
FieldUnixFDs: uint32Type,
}
// requiredFields lists the header fields that are required by the different
// message types.
var requiredFields = [typeMax][]HeaderField{
TypeMethodCall: {FieldPath, FieldMember},
TypeMethodReply: {FieldReplySerial},
TypeError: {FieldErrorName, FieldReplySerial},
TypeSignal: {FieldPath, FieldInterface, FieldMember},
}
// Message represents a single D-Bus message.
type Message struct {
Type
Flags
Headers map[HeaderField]Variant
Body []interface{}
serial uint32
}
type header struct {
Field byte
Variant
}
// DecodeMessage tries to decode a single message in the D-Bus wire format
// from the given reader. The byte order is figured out from the first byte.
// The possibly returned error can be an error of the underlying reader, an
// InvalidMessageError or a FormatError.
func DecodeMessage(rd io.Reader) (msg *Message, err error) {
var order binary.ByteOrder
var hlength, length uint32
var typ, flags, proto byte
var headers []header
b := make([]byte, 1)
_, err = rd.Read(b)
if err != nil {
return
}
switch b[0] {
case 'l':
order = binary.LittleEndian
case 'B':
order = binary.BigEndian
default:
return nil, InvalidMessageError("invalid byte order")
}
dec := newDecoder(rd, order)
dec.pos = 1
msg = new(Message)
vs, err := dec.Decode(Signature{"yyyuu"})
if err != nil {
return nil, err
}
if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
return nil, err
}
msg.Type = Type(typ)
msg.Flags = Flags(flags)
// get the header length separately because we need it later
b = make([]byte, 4)
_, err = io.ReadFull(rd, b)
if err != nil {
return nil, err
}
binary.Read(bytes.NewBuffer(b), order, &hlength)
if hlength+length+16 > 1<<27 {
return nil, InvalidMessageError("message is too long")
}
dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
dec.pos = 12
vs, err = dec.Decode(Signature{"a(yv)"})
if err != nil {
return nil, err
}
if err = Store(vs, &headers); err != nil {
return nil, err
}
msg.Headers = make(map[HeaderField]Variant)
for _, v := range headers {
msg.Headers[HeaderField(v.Field)] = v.Variant
}
dec.align(8)
body := make([]byte, int(length))
if length != 0 {
_, err := io.ReadFull(rd, body)
if err != nil {
return nil, err
}
}
if err = msg.IsValid(); err != nil {
return nil, err
}
sig, _ := msg.Headers[FieldSignature].value.(Signature)
if sig.str != "" {
buf := bytes.NewBuffer(body)
dec = newDecoder(buf, order)
vs, err := dec.Decode(sig)
if err != nil {
return nil, err
}
msg.Body = vs
}
return
}
// EncodeTo encodes and sends a message to the given writer. The byte order must
// be either binary.LittleEndian or binary.BigEndian. If the message is not
// valid or an error occurs when writing, an error is returned.
func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
if err := msg.IsValid(); err != nil {
return err
}
var vs [7]interface{}
switch order {
case binary.LittleEndian:
vs[0] = byte('l')
case binary.BigEndian:
vs[0] = byte('B')
default:
return errors.New("dbus: invalid byte order")
}
body := new(bytes.Buffer)
enc := newEncoder(body, order)
if len(msg.Body) != 0 {
enc.Encode(msg.Body...)
}
vs[1] = msg.Type
vs[2] = msg.Flags
vs[3] = protoVersion
vs[4] = uint32(len(body.Bytes()))
vs[5] = msg.serial
headers := make([]header, 0, len(msg.Headers))
for k, v := range msg.Headers {
headers = append(headers, header{byte(k), v})
}
vs[6] = headers
var buf bytes.Buffer
enc = newEncoder(&buf, order)
enc.Encode(vs[:]...)
enc.align(8)
body.WriteTo(&buf)
if buf.Len() > 1<<27 {
return InvalidMessageError("message is too long")
}
if _, err := buf.WriteTo(out); err != nil {
return err
}
return nil
}
// IsValid checks whether msg is a valid message and returns an
// InvalidMessageError if it is not.
func (msg *Message) IsValid() error {
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
return InvalidMessageError("invalid flags")
}
if msg.Type == 0 || msg.Type >= typeMax {
return InvalidMessageError("invalid message type")
}
for k, v := range msg.Headers {
if k == 0 || k >= fieldMax {
return InvalidMessageError("invalid header")
}
if reflect.TypeOf(v.value) != fieldTypes[k] {
return InvalidMessageError("invalid type of header field")
}
}
for _, v := range requiredFields[msg.Type] {
if _, ok := msg.Headers[v]; !ok {
return InvalidMessageError("missing required header")
}
}
if path, ok := msg.Headers[FieldPath]; ok {
if !path.value.(ObjectPath).IsValid() {
return InvalidMessageError("invalid path name")
}
}
if iface, ok := msg.Headers[FieldInterface]; ok {
if !isValidInterface(iface.value.(string)) {
return InvalidMessageError("invalid interface name")
}
}
if member, ok := msg.Headers[FieldMember]; ok {
if !isValidMember(member.value.(string)) {
return InvalidMessageError("invalid member name")
}
}
if errname, ok := msg.Headers[FieldErrorName]; ok {
if !isValidInterface(errname.value.(string)) {
return InvalidMessageError("invalid error name")
}
}
if len(msg.Body) != 0 {
if _, ok := msg.Headers[FieldSignature]; !ok {
return InvalidMessageError("missing signature")
}
}
return nil
}
// Serial returns the message's serial number. The returned value is only valid
// for messages received by eavesdropping.
func (msg *Message) Serial() uint32 {
return msg.serial
}
// String returns a string representation of a message similar to the format of
// dbus-monitor.
func (msg *Message) String() string {
if err := msg.IsValid(); err != nil {
return "<invalid>"
}
s := msg.Type.String()
if v, ok := msg.Headers[FieldSender]; ok {
s += " from " + v.value.(string)
}
if v, ok := msg.Headers[FieldDestination]; ok {
s += " to " + v.value.(string)
}
s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
if v, ok := msg.Headers[FieldReplySerial]; ok {
s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
}
if v, ok := msg.Headers[FieldUnixFDs]; ok {
s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
}
if v, ok := msg.Headers[FieldPath]; ok {
s += " path " + string(v.value.(ObjectPath))
}
if v, ok := msg.Headers[FieldInterface]; ok {
s += " interface " + v.value.(string)
}
if v, ok := msg.Headers[FieldErrorName]; ok {
s += " error " + v.value.(string)
}
if v, ok := msg.Headers[FieldMember]; ok {
s += " member " + v.value.(string)
}
if len(msg.Body) != 0 {
s += "\n"
}
for i, v := range msg.Body {
s += " " + MakeVariant(v).String()
if i != len(msg.Body)-1 {
s += "\n"
}
}
return s
}

View File

@ -1,126 +0,0 @@
package dbus
import (
"errors"
"strings"
)
// BusObject is the interface of a remote object on which methods can be
// invoked.
type BusObject interface {
Call(method string, flags Flags, args ...interface{}) *Call
Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
GetProperty(p string) (Variant, error)
Destination() string
Path() ObjectPath
}
// Object represents a remote object on which methods can be invoked.
type Object struct {
conn *Conn
dest string
path ObjectPath
}
// Call calls a method with (*Object).Go and waits for its reply.
func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
}
// Go calls a method with the given arguments asynchronously. It returns a
// Call structure representing this method call. The passed channel will
// return the same value once the call is done. If ch is nil, a new channel
// will be allocated. Otherwise, ch has to be buffered or Go will panic.
//
// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
// is returned of which only the Err member is valid.
//
// If the method parameter contains a dot ('.'), the part before the last dot
// specifies the interface on which the method is called.
func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
iface := ""
i := strings.LastIndex(method, ".")
if i != -1 {
iface = method[:i]
}
method = method[i+1:]
msg := new(Message)
msg.Type = TypeMethodCall
msg.serial = o.conn.getSerial()
msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
msg.Headers = make(map[HeaderField]Variant)
msg.Headers[FieldPath] = MakeVariant(o.path)
msg.Headers[FieldDestination] = MakeVariant(o.dest)
msg.Headers[FieldMember] = MakeVariant(method)
if iface != "" {
msg.Headers[FieldInterface] = MakeVariant(iface)
}
msg.Body = args
if len(args) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
}
if msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
ch = make(chan *Call, 10)
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Object).Go")
}
call := &Call{
Destination: o.dest,
Path: o.path,
Method: method,
Args: args,
Done: ch,
}
o.conn.callsLck.Lock()
o.conn.calls[msg.serial] = call
o.conn.callsLck.Unlock()
o.conn.outLck.RLock()
if o.conn.closed {
call.Err = ErrClosed
call.Done <- call
} else {
o.conn.out <- msg
}
o.conn.outLck.RUnlock()
return call
}
o.conn.outLck.RLock()
defer o.conn.outLck.RUnlock()
if o.conn.closed {
return &Call{Err: ErrClosed}
}
o.conn.out <- msg
return &Call{Err: nil}
}
// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
// object. The property name must be given in interface.member notation.
func (o *Object) GetProperty(p string) (Variant, error) {
idx := strings.LastIndex(p, ".")
if idx == -1 || idx+1 == len(p) {
return Variant{}, errors.New("dbus: invalid property " + p)
}
iface := p[:idx]
prop := p[idx+1:]
result := Variant{}
err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
if err != nil {
return Variant{}, err
}
return result, nil
}
// Destination returns the destination that calls on o are sent to.
func (o *Object) Destination() string {
return o.dest
}
// Path returns the path that calls on o are sent to.
func (o *Object) Path() ObjectPath {
return o.path
}

257
vendor/github.com/godbus/dbus/sig.go generated vendored
View File

@ -1,257 +0,0 @@
package dbus
import (
"fmt"
"reflect"
"strings"
)
var sigToType = map[byte]reflect.Type{
'y': byteType,
'b': boolType,
'n': int16Type,
'q': uint16Type,
'i': int32Type,
'u': uint32Type,
'x': int64Type,
't': uint64Type,
'd': float64Type,
's': stringType,
'g': signatureType,
'o': objectPathType,
'v': variantType,
'h': unixFDIndexType,
}
// Signature represents a correct type signature as specified by the D-Bus
// specification. The zero value represents the empty signature, "".
type Signature struct {
str string
}
// SignatureOf returns the concatenation of all the signatures of the given
// values. It panics if one of them is not representable in D-Bus.
func SignatureOf(vs ...interface{}) Signature {
var s string
for _, v := range vs {
s += getSignature(reflect.TypeOf(v))
}
return Signature{s}
}
// SignatureOfType returns the signature of the given type. It panics if the
// type is not representable in D-Bus.
func SignatureOfType(t reflect.Type) Signature {
return Signature{getSignature(t)}
}
// getSignature returns the signature of the given type and panics on unknown types.
func getSignature(t reflect.Type) string {
// handle simple types first
switch t.Kind() {
case reflect.Uint8:
return "y"
case reflect.Bool:
return "b"
case reflect.Int16:
return "n"
case reflect.Uint16:
return "q"
case reflect.Int32:
if t == unixFDType {
return "h"
}
return "i"
case reflect.Uint32:
if t == unixFDIndexType {
return "h"
}
return "u"
case reflect.Int64:
return "x"
case reflect.Uint64:
return "t"
case reflect.Float64:
return "d"
case reflect.Ptr:
return getSignature(t.Elem())
case reflect.String:
if t == objectPathType {
return "o"
}
return "s"
case reflect.Struct:
if t == variantType {
return "v"
} else if t == signatureType {
return "g"
}
var s string
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
s += getSignature(t.Field(i).Type)
}
}
return "(" + s + ")"
case reflect.Array, reflect.Slice:
return "a" + getSignature(t.Elem())
case reflect.Map:
if !isKeyType(t.Key()) {
panic(InvalidTypeError{t})
}
return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
}
panic(InvalidTypeError{t})
}
// ParseSignature returns the signature represented by this string, or a
// SignatureError if the string is not a valid signature.
func ParseSignature(s string) (sig Signature, err error) {
if len(s) == 0 {
return
}
if len(s) > 255 {
return Signature{""}, SignatureError{s, "too long"}
}
sig.str = s
for err == nil && len(s) != 0 {
err, s = validSingle(s, 0)
}
if err != nil {
sig = Signature{""}
}
return
}
// ParseSignatureMust behaves like ParseSignature, except that it panics if s
// is not valid.
func ParseSignatureMust(s string) Signature {
sig, err := ParseSignature(s)
if err != nil {
panic(err)
}
return sig
}
// Empty retruns whether the signature is the empty signature.
func (s Signature) Empty() bool {
return s.str == ""
}
// Single returns whether the signature represents a single, complete type.
func (s Signature) Single() bool {
err, r := validSingle(s.str, 0)
return err != nil && r == ""
}
// String returns the signature's string representation.
func (s Signature) String() string {
return s.str
}
// A SignatureError indicates that a signature passed to a function or received
// on a connection is not a valid signature.
type SignatureError struct {
Sig string
Reason string
}
func (e SignatureError) Error() string {
return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
}
// Try to read a single type from this string. If it was successfull, err is nil
// and rem is the remaining unparsed part. Otherwise, err is a non-nil
// SignatureError and rem is "". depth is the current recursion depth which may
// not be greater than 64 and should be given as 0 on the first call.
func validSingle(s string, depth int) (err error, rem string) {
if s == "" {
return SignatureError{Sig: s, Reason: "empty signature"}, ""
}
if depth > 64 {
return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
}
switch s[0] {
case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
return nil, s[1:]
case 'a':
if len(s) > 1 && s[1] == '{' {
i := findMatching(s[1:], '{', '}')
if i == -1 {
return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
}
i++
rem = s[i+1:]
s = s[2:i]
if err, _ = validSingle(s[:1], depth+1); err != nil {
return err, ""
}
err, nr := validSingle(s[1:], depth+1)
if err != nil {
return err, ""
}
if nr != "" {
return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
}
return nil, rem
}
return validSingle(s[1:], depth+1)
case '(':
i := findMatching(s, '(', ')')
if i == -1 {
return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
}
rem = s[i+1:]
s = s[1:i]
for err == nil && s != "" {
err, s = validSingle(s, depth+1)
}
if err != nil {
rem = ""
}
return
}
return SignatureError{Sig: s, Reason: "invalid type character"}, ""
}
func findMatching(s string, left, right rune) int {
n := 0
for i, v := range s {
if v == left {
n++
} else if v == right {
n--
}
if n == 0 {
return i
}
}
return -1
}
// typeFor returns the type of the given signature. It ignores any left over
// characters and panics if s doesn't start with a valid type signature.
func typeFor(s string) (t reflect.Type) {
err, _ := validSingle(s, 0)
if err != nil {
panic(err)
}
if t, ok := sigToType[s[0]]; ok {
return t
}
switch s[0] {
case 'a':
if s[1] == '{' {
i := strings.LastIndex(s, "}")
t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
} else {
t = reflect.SliceOf(typeFor(s[1:]))
}
case '(':
t = interfacesType
}
return
}

View File

@ -1,6 +0,0 @@
package dbus
func (t *unixTransport) SendNullByte() error {
_, err := t.Write([]byte{0})
return err
}

View File

@ -1,35 +0,0 @@
package dbus
import (
"encoding/binary"
"errors"
"io"
)
type genericTransport struct {
io.ReadWriteCloser
}
func (t genericTransport) SendNullByte() error {
_, err := t.Write([]byte{0})
return err
}
func (t genericTransport) SupportsUnixFDs() bool {
return false
}
func (t genericTransport) EnableUnixFDs() {}
func (t genericTransport) ReadMessage() (*Message, error) {
return DecodeMessage(t)
}
func (t genericTransport) SendMessage(msg *Message) error {
for _, v := range msg.Body {
if _, ok := v.(UnixFD); ok {
return errors.New("dbus: unix fd passing not enabled")
}
}
return msg.EncodeTo(t, binary.LittleEndian)
}

View File

@ -1,196 +0,0 @@
//+build !windows
package dbus
import (
"bytes"
"encoding/binary"
"errors"
"io"
"net"
"syscall"
)
type oobReader struct {
conn *net.UnixConn
oob []byte
buf [4096]byte
}
func (o *oobReader) Read(b []byte) (n int, err error) {
n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
if err != nil {
return n, err
}
if flags&syscall.MSG_CTRUNC != 0 {
return n, errors.New("dbus: control data truncated (too many fds received)")
}
o.oob = append(o.oob, o.buf[:oobn]...)
return n, nil
}
type unixTransport struct {
*net.UnixConn
hasUnixFDs bool
}
func newUnixTransport(keys string) (transport, error) {
var err error
t := new(unixTransport)
abstract := getKey(keys, "abstract")
path := getKey(keys, "path")
switch {
case abstract == "" && path == "":
return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
case abstract != "" && path == "":
t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
if err != nil {
return nil, err
}
return t, nil
case abstract == "" && path != "":
t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
if err != nil {
return nil, err
}
return t, nil
default:
return nil, errors.New("dbus: invalid address (both path and abstract set)")
}
}
func init() {
transports["unix"] = newUnixTransport
}
func (t *unixTransport) EnableUnixFDs() {
t.hasUnixFDs = true
}
func (t *unixTransport) ReadMessage() (*Message, error) {
var (
blen, hlen uint32
csheader [16]byte
headers []header
order binary.ByteOrder
unixfds uint32
)
// To be sure that all bytes of out-of-band data are read, we use a special
// reader that uses ReadUnix on the underlying connection instead of Read
// and gathers the out-of-band data in a buffer.
rd := &oobReader{conn: t.UnixConn}
// read the first 16 bytes (the part of the header that has a constant size),
// from which we can figure out the length of the rest of the message
if _, err := io.ReadFull(rd, csheader[:]); err != nil {
return nil, err
}
switch csheader[0] {
case 'l':
order = binary.LittleEndian
case 'B':
order = binary.BigEndian
default:
return nil, InvalidMessageError("invalid byte order")
}
// csheader[4:8] -> length of message body, csheader[12:16] -> length of
// header fields (without alignment)
binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
if hlen%8 != 0 {
hlen += 8 - (hlen % 8)
}
// decode headers and look for unix fds
headerdata := make([]byte, hlen+4)
copy(headerdata, csheader[12:])
if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
return nil, err
}
dec := newDecoder(bytes.NewBuffer(headerdata), order)
dec.pos = 12
vs, err := dec.Decode(Signature{"a(yv)"})
if err != nil {
return nil, err
}
Store(vs, &headers)
for _, v := range headers {
if v.Field == byte(FieldUnixFDs) {
unixfds, _ = v.Variant.value.(uint32)
}
}
all := make([]byte, 16+hlen+blen)
copy(all, csheader[:])
copy(all[16:], headerdata[4:])
if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
return nil, err
}
if unixfds != 0 {
if !t.hasUnixFDs {
return nil, errors.New("dbus: got unix fds on unsupported transport")
}
// read the fds from the OOB data
scms, err := syscall.ParseSocketControlMessage(rd.oob)
if err != nil {
return nil, err
}
if len(scms) != 1 {
return nil, errors.New("dbus: received more than one socket control message")
}
fds, err := syscall.ParseUnixRights(&scms[0])
if err != nil {
return nil, err
}
msg, err := DecodeMessage(bytes.NewBuffer(all))
if err != nil {
return nil, err
}
// substitute the values in the message body (which are indices for the
// array receiver via OOB) with the actual values
for i, v := range msg.Body {
if j, ok := v.(UnixFDIndex); ok {
if uint32(j) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
msg.Body[i] = UnixFD(fds[j])
}
}
return msg, nil
}
return DecodeMessage(bytes.NewBuffer(all))
}
func (t *unixTransport) SendMessage(msg *Message) error {
fds := make([]int, 0)
for i, v := range msg.Body {
if fd, ok := v.(UnixFD); ok {
msg.Body[i] = UnixFDIndex(len(fds))
fds = append(fds, int(fd))
}
}
if len(fds) != 0 {
if !t.hasUnixFDs {
return errors.New("dbus: unix fd passing not enabled")
}
msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
oob := syscall.UnixRights(fds...)
buf := new(bytes.Buffer)
msg.EncodeTo(buf, binary.LittleEndian)
n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
if err != nil {
return err
}
if n != buf.Len() || oobn != len(oob) {
return io.ErrShortWrite
}
} else {
if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
return nil
}
}
return nil
}
func (t *unixTransport) SupportsUnixFDs() bool {
return true
}

View File

@ -1,95 +0,0 @@
// The UnixCredentials system call is currently only implemented on Linux
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// https://golang.org/s/go1.4-syscall
// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
// Local implementation of the UnixCredentials system call for DragonFly BSD
package dbus
/*
#include <sys/ucred.h>
*/
import "C"
import (
"io"
"os"
"syscall"
"unsafe"
)
// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
type Ucred struct {
Pid int32
Uid uint32
Gid uint32
}
// http://golang.org/src/pkg/syscall/types_linux.go
// http://golang.org/src/pkg/syscall/types_dragonfly.go
// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
const (
SizeofUcred = C.sizeof_struct_ucred
)
// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
func cmsgAlignOf(salen int) int {
// From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
//salign := sizeofPtr
// NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
// still require 32-bit aligned access to network subsystem.
//if darwin64Bit || dragonfly64Bit {
// salign = 4
//}
salign := 4
return (salen + salign - 1) & ^(salign - 1)
}
// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
}
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// UnixCredentials encodes credentials into a socket control message
// for sending to another process. This can be used for
// authentication.
func UnixCredentials(ucred *Ucred) []byte {
b := make([]byte, syscall.CmsgSpace(SizeofUcred))
h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
h.Level = syscall.SOL_SOCKET
h.Type = syscall.SCM_CREDS
h.SetLen(syscall.CmsgLen(SizeofUcred))
*((*Ucred)(cmsgData(h))) = *ucred
return b
}
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// ParseUnixCredentials decodes a socket control message that contains
// credentials in a Ucred structure. To receive such a message, the
// SO_PASSCRED option must be enabled on the socket.
func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
if m.Header.Level != syscall.SOL_SOCKET {
return nil, syscall.EINVAL
}
if m.Header.Type != syscall.SCM_CREDS {
return nil, syscall.EINVAL
}
ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
return &ucred, nil
}
func (t *unixTransport) SendNullByte() error {
ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
b := UnixCredentials(ucred)
_, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
if err != nil {
return err
}
if oobn != len(b) {
return io.ErrShortWrite
}
return nil
}

View File

@ -1,25 +0,0 @@
// The UnixCredentials system call is currently only implemented on Linux
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// https://golang.org/s/go1.4-syscall
// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
package dbus
import (
"io"
"os"
"syscall"
)
func (t *unixTransport) SendNullByte() error {
ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
b := syscall.UnixCredentials(ucred)
_, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
if err != nil {
return err
}
if oobn != len(b) {
return io.ErrShortWrite
}
return nil
}

View File

@ -1,139 +0,0 @@
package dbus
import (
"bytes"
"fmt"
"reflect"
"sort"
"strconv"
)
// Variant represents the D-Bus variant type.
type Variant struct {
sig Signature
value interface{}
}
// MakeVariant converts the given value to a Variant. It panics if v cannot be
// represented as a D-Bus type.
func MakeVariant(v interface{}) Variant {
return Variant{SignatureOf(v), v}
}
// ParseVariant parses the given string as a variant as described at
// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
// empty, it is taken to be the expected signature for the variant.
func ParseVariant(s string, sig Signature) (Variant, error) {
tokens := varLex(s)
p := &varParser{tokens: tokens}
n, err := varMakeNode(p)
if err != nil {
return Variant{}, err
}
if sig.str == "" {
sig, err = varInfer(n)
if err != nil {
return Variant{}, err
}
}
v, err := n.Value(sig)
if err != nil {
return Variant{}, err
}
return MakeVariant(v), nil
}
// format returns a formatted version of v and whether this string can be parsed
// unambigously.
func (v Variant) format() (string, bool) {
switch v.sig.str[0] {
case 'b', 'i':
return fmt.Sprint(v.value), true
case 'n', 'q', 'u', 'x', 't', 'd', 'h':
return fmt.Sprint(v.value), false
case 's':
return strconv.Quote(v.value.(string)), true
case 'o':
return strconv.Quote(string(v.value.(ObjectPath))), false
case 'g':
return strconv.Quote(v.value.(Signature).str), false
case 'v':
s, unamb := v.value.(Variant).format()
if !unamb {
return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
}
return "<" + s + ">", true
case 'y':
return fmt.Sprintf("%#x", v.value.(byte)), false
}
rv := reflect.ValueOf(v.value)
switch rv.Kind() {
case reflect.Slice:
if rv.Len() == 0 {
return "[]", false
}
unamb := true
buf := bytes.NewBuffer([]byte("["))
for i := 0; i < rv.Len(); i++ {
// TODO: slooow
s, b := MakeVariant(rv.Index(i).Interface()).format()
unamb = unamb && b
buf.WriteString(s)
if i != rv.Len()-1 {
buf.WriteString(", ")
}
}
buf.WriteByte(']')
return buf.String(), unamb
case reflect.Map:
if rv.Len() == 0 {
return "{}", false
}
unamb := true
var buf bytes.Buffer
kvs := make([]string, rv.Len())
for i, k := range rv.MapKeys() {
s, b := MakeVariant(k.Interface()).format()
unamb = unamb && b
buf.Reset()
buf.WriteString(s)
buf.WriteString(": ")
s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
unamb = unamb && b
buf.WriteString(s)
kvs[i] = buf.String()
}
buf.Reset()
buf.WriteByte('{')
sort.Strings(kvs)
for i, kv := range kvs {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(kv)
}
buf.WriteByte('}')
return buf.String(), unamb
}
return `"INVALID"`, true
}
// Signature returns the D-Bus signature of the underlying value of v.
func (v Variant) Signature() Signature {
return v.sig
}
// String returns the string representation of the underlying value of v as
// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
func (v Variant) String() string {
s, unamb := v.format()
if !unamb {
return "@" + v.sig.str + " " + s
}
return s
}
// Value returns the underlying value of v.
func (v Variant) Value() interface{} {
return v.value
}

View File

@ -1,284 +0,0 @@
package dbus
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// Heavily inspired by the lexer from text/template.
type varToken struct {
typ varTokenType
val string
}
type varTokenType byte
const (
tokEOF varTokenType = iota
tokError
tokNumber
tokString
tokBool
tokArrayStart
tokArrayEnd
tokDictStart
tokDictEnd
tokVariantStart
tokVariantEnd
tokComma
tokColon
tokType
tokByteString
)
type varLexer struct {
input string
start int
pos int
width int
tokens []varToken
}
type lexState func(*varLexer) lexState
func varLex(s string) []varToken {
l := &varLexer{input: s}
l.run()
return l.tokens
}
func (l *varLexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
return true
}
l.backup()
return false
}
func (l *varLexer) backup() {
l.pos -= l.width
}
func (l *varLexer) emit(t varTokenType) {
l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
l.start = l.pos
}
func (l *varLexer) errorf(format string, v ...interface{}) lexState {
l.tokens = append(l.tokens, varToken{
tokError,
fmt.Sprintf(format, v...),
})
return nil
}
func (l *varLexer) ignore() {
l.start = l.pos
}
func (l *varLexer) next() rune {
var r rune
if l.pos >= len(l.input) {
l.width = 0
return -1
}
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += l.width
return r
}
func (l *varLexer) run() {
for state := varLexNormal; state != nil; {
state = state(l)
}
}
func (l *varLexer) peek() rune {
r := l.next()
l.backup()
return r
}
func varLexNormal(l *varLexer) lexState {
for {
r := l.next()
switch {
case r == -1:
l.emit(tokEOF)
return nil
case r == '[':
l.emit(tokArrayStart)
case r == ']':
l.emit(tokArrayEnd)
case r == '{':
l.emit(tokDictStart)
case r == '}':
l.emit(tokDictEnd)
case r == '<':
l.emit(tokVariantStart)
case r == '>':
l.emit(tokVariantEnd)
case r == ':':
l.emit(tokColon)
case r == ',':
l.emit(tokComma)
case r == '\'' || r == '"':
l.backup()
return varLexString
case r == '@':
l.backup()
return varLexType
case unicode.IsSpace(r):
l.ignore()
case unicode.IsNumber(r) || r == '+' || r == '-':
l.backup()
return varLexNumber
case r == 'b':
pos := l.start
if n := l.peek(); n == '"' || n == '\'' {
return varLexByteString
}
// not a byte string; try to parse it as a type or bool below
l.pos = pos + 1
l.width = 1
fallthrough
default:
// either a bool or a type. Try bools first.
l.backup()
if l.pos+4 <= len(l.input) {
if l.input[l.pos:l.pos+4] == "true" {
l.pos += 4
l.emit(tokBool)
continue
}
}
if l.pos+5 <= len(l.input) {
if l.input[l.pos:l.pos+5] == "false" {
l.pos += 5
l.emit(tokBool)
continue
}
}
// must be a type.
return varLexType
}
}
}
var varTypeMap = map[string]string{
"boolean": "b",
"byte": "y",
"int16": "n",
"uint16": "q",
"int32": "i",
"uint32": "u",
"int64": "x",
"uint64": "t",
"double": "f",
"string": "s",
"objectpath": "o",
"signature": "g",
}
func varLexByteString(l *varLexer) lexState {
q := l.next()
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != -1 {
break
}
fallthrough
case -1:
return l.errorf("unterminated bytestring")
case q:
break Loop
}
}
l.emit(tokByteString)
return varLexNormal
}
func varLexNumber(l *varLexer) lexState {
l.accept("+-")
digits := "0123456789"
if l.accept("0") {
if l.accept("x") {
digits = "0123456789abcdefABCDEF"
} else {
digits = "01234567"
}
}
for strings.IndexRune(digits, l.next()) >= 0 {
}
l.backup()
if l.accept(".") {
for strings.IndexRune(digits, l.next()) >= 0 {
}
l.backup()
}
if l.accept("eE") {
l.accept("+-")
for strings.IndexRune("0123456789", l.next()) >= 0 {
}
l.backup()
}
if r := l.peek(); unicode.IsLetter(r) {
l.next()
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
l.emit(tokNumber)
return varLexNormal
}
func varLexString(l *varLexer) lexState {
q := l.next()
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != -1 {
break
}
fallthrough
case -1:
return l.errorf("unterminated string")
case q:
break Loop
}
}
l.emit(tokString)
return varLexNormal
}
func varLexType(l *varLexer) lexState {
at := l.accept("@")
for {
r := l.next()
if r == -1 {
break
}
if unicode.IsSpace(r) {
l.backup()
break
}
}
if at {
if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
return l.errorf("%s", err)
}
} else {
if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
l.emit(tokType)
return varLexNormal
}
return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
}
l.emit(tokType)
return varLexNormal
}

View File

@ -1,817 +0,0 @@
package dbus
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type varParser struct {
tokens []varToken
i int
}
func (p *varParser) backup() {
p.i--
}
func (p *varParser) next() varToken {
if p.i < len(p.tokens) {
t := p.tokens[p.i]
p.i++
return t
}
return varToken{typ: tokEOF}
}
type varNode interface {
Infer() (Signature, error)
String() string
Sigs() sigSet
Value(Signature) (interface{}, error)
}
func varMakeNode(p *varParser) (varNode, error) {
var sig Signature
for {
t := p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokNumber:
return varMakeNumNode(t, sig)
case tokString:
return varMakeStringNode(t, sig)
case tokBool:
if sig.str != "" && sig.str != "b" {
return nil, varTypeError{t.val, sig}
}
b, err := strconv.ParseBool(t.val)
if err != nil {
return nil, err
}
return boolNode(b), nil
case tokArrayStart:
return varMakeArrayNode(p, sig)
case tokVariantStart:
return varMakeVariantNode(p, sig)
case tokDictStart:
return varMakeDictNode(p, sig)
case tokType:
if sig.str != "" {
return nil, errors.New("unexpected type annotation")
}
if t.val[0] == '@' {
sig.str = t.val[1:]
} else {
sig.str = varTypeMap[t.val]
}
case tokByteString:
if sig.str != "" && sig.str != "ay" {
return nil, varTypeError{t.val, sig}
}
b, err := varParseByteString(t.val)
if err != nil {
return nil, err
}
return byteStringNode(b), nil
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
}
}
type varTypeError struct {
val string
sig Signature
}
func (e varTypeError) Error() string {
return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
}
type sigSet map[Signature]bool
func (s sigSet) Empty() bool {
return len(s) == 0
}
func (s sigSet) Intersect(s2 sigSet) sigSet {
r := make(sigSet)
for k := range s {
if s2[k] {
r[k] = true
}
}
return r
}
func (s sigSet) Single() (Signature, bool) {
if len(s) == 1 {
for k := range s {
return k, true
}
}
return Signature{}, false
}
func (s sigSet) ToArray() sigSet {
r := make(sigSet, len(s))
for k := range s {
r[Signature{"a" + k.str}] = true
}
return r
}
type numNode struct {
sig Signature
str string
val interface{}
}
var numSigSet = sigSet{
Signature{"y"}: true,
Signature{"n"}: true,
Signature{"q"}: true,
Signature{"i"}: true,
Signature{"u"}: true,
Signature{"x"}: true,
Signature{"t"}: true,
Signature{"d"}: true,
}
func (n numNode) Infer() (Signature, error) {
if strings.ContainsAny(n.str, ".e") {
return Signature{"d"}, nil
}
return Signature{"i"}, nil
}
func (n numNode) String() string {
return n.str
}
func (n numNode) Sigs() sigSet {
if n.sig.str != "" {
return sigSet{n.sig: true}
}
if strings.ContainsAny(n.str, ".e") {
return sigSet{Signature{"d"}: true}
}
return numSigSet
}
func (n numNode) Value(sig Signature) (interface{}, error) {
if n.sig.str != "" && n.sig != sig {
return nil, varTypeError{n.str, sig}
}
if n.val != nil {
return n.val, nil
}
return varNumAs(n.str, sig)
}
func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
if sig.str == "" {
return numNode{str: tok.val}, nil
}
num, err := varNumAs(tok.val, sig)
if err != nil {
return nil, err
}
return numNode{sig: sig, val: num}, nil
}
func varNumAs(s string, sig Signature) (interface{}, error) {
isUnsigned := false
size := 32
switch sig.str {
case "n":
size = 16
case "i":
case "x":
size = 64
case "y":
size = 8
isUnsigned = true
case "q":
size = 16
isUnsigned = true
case "u":
isUnsigned = true
case "t":
size = 64
isUnsigned = true
case "d":
d, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, err
}
return d, nil
default:
return nil, varTypeError{s, sig}
}
base := 10
if strings.HasPrefix(s, "0x") {
base = 16
s = s[2:]
}
if strings.HasPrefix(s, "0") && len(s) != 1 {
base = 8
s = s[1:]
}
if isUnsigned {
i, err := strconv.ParseUint(s, base, size)
if err != nil {
return nil, err
}
var v interface{} = i
switch sig.str {
case "y":
v = byte(i)
case "q":
v = uint16(i)
case "u":
v = uint32(i)
}
return v, nil
}
i, err := strconv.ParseInt(s, base, size)
if err != nil {
return nil, err
}
var v interface{} = i
switch sig.str {
case "n":
v = int16(i)
case "i":
v = int32(i)
}
return v, nil
}
type stringNode struct {
sig Signature
str string // parsed
val interface{} // has correct type
}
var stringSigSet = sigSet{
Signature{"s"}: true,
Signature{"g"}: true,
Signature{"o"}: true,
}
func (n stringNode) Infer() (Signature, error) {
return Signature{"s"}, nil
}
func (n stringNode) String() string {
return n.str
}
func (n stringNode) Sigs() sigSet {
if n.sig.str != "" {
return sigSet{n.sig: true}
}
return stringSigSet
}
func (n stringNode) Value(sig Signature) (interface{}, error) {
if n.sig.str != "" && n.sig != sig {
return nil, varTypeError{n.str, sig}
}
if n.val != nil {
return n.val, nil
}
switch {
case sig.str == "g":
return Signature{n.str}, nil
case sig.str == "o":
return ObjectPath(n.str), nil
case sig.str == "s":
return n.str, nil
default:
return nil, varTypeError{n.str, sig}
}
}
func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
return nil, fmt.Errorf("invalid type %q for string", sig.str)
}
s, err := varParseString(tok.val)
if err != nil {
return nil, err
}
n := stringNode{str: s}
if sig.str == "" {
return stringNode{str: s}, nil
}
n.sig = sig
switch sig.str {
case "o":
n.val = ObjectPath(s)
case "g":
n.val = Signature{s}
case "s":
n.val = s
}
return n, nil
}
func varParseString(s string) (string, error) {
// quotes are guaranteed to be there
s = s[1 : len(s)-1]
buf := new(bytes.Buffer)
for len(s) != 0 {
r, size := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && size == 1 {
return "", errors.New("invalid UTF-8")
}
s = s[size:]
if r != '\\' {
buf.WriteRune(r)
continue
}
r, size = utf8.DecodeRuneInString(s)
if r == utf8.RuneError && size == 1 {
return "", errors.New("invalid UTF-8")
}
s = s[size:]
switch r {
case 'a':
buf.WriteRune(0x7)
case 'b':
buf.WriteRune(0x8)
case 'f':
buf.WriteRune(0xc)
case 'n':
buf.WriteRune('\n')
case 'r':
buf.WriteRune('\r')
case 't':
buf.WriteRune('\t')
case '\n':
case 'u':
if len(s) < 4 {
return "", errors.New("short unicode escape")
}
r, err := strconv.ParseUint(s[:4], 16, 32)
if err != nil {
return "", err
}
buf.WriteRune(rune(r))
s = s[4:]
case 'U':
if len(s) < 8 {
return "", errors.New("short unicode escape")
}
r, err := strconv.ParseUint(s[:8], 16, 32)
if err != nil {
return "", err
}
buf.WriteRune(rune(r))
s = s[8:]
default:
buf.WriteRune(r)
}
}
return buf.String(), nil
}
var boolSigSet = sigSet{Signature{"b"}: true}
type boolNode bool
func (boolNode) Infer() (Signature, error) {
return Signature{"b"}, nil
}
func (b boolNode) String() string {
if b {
return "true"
}
return "false"
}
func (boolNode) Sigs() sigSet {
return boolSigSet
}
func (b boolNode) Value(sig Signature) (interface{}, error) {
if sig.str != "b" {
return nil, varTypeError{b.String(), sig}
}
return bool(b), nil
}
type arrayNode struct {
set sigSet
children []varNode
val interface{}
}
func (n arrayNode) Infer() (Signature, error) {
for _, v := range n.children {
csig, err := varInfer(v)
if err != nil {
continue
}
return Signature{"a" + csig.str}, nil
}
return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
}
func (n arrayNode) String() string {
s := "["
for i, v := range n.children {
s += v.String()
if i != len(n.children)-1 {
s += ", "
}
}
return s + "]"
}
func (n arrayNode) Sigs() sigSet {
return n.set
}
func (n arrayNode) Value(sig Signature) (interface{}, error) {
if n.set.Empty() {
// no type information whatsoever, so this must be an empty slice
return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
}
if !n.set[sig] {
return nil, varTypeError{n.String(), sig}
}
s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
for i, v := range n.children {
rv, err := v.Value(Signature{sig.str[1:]})
if err != nil {
return nil, err
}
s.Index(i).Set(reflect.ValueOf(rv))
}
return s.Interface(), nil
}
func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
var n arrayNode
if sig.str != "" {
n.set = sigSet{sig: true}
}
if t := p.next(); t.typ == tokArrayEnd {
return n, nil
} else {
p.backup()
}
Loop:
for {
t := p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
}
p.backup()
cn, err := varMakeNode(p)
if err != nil {
return nil, err
}
if cset := cn.Sigs(); !cset.Empty() {
if n.set.Empty() {
n.set = cset.ToArray()
} else {
nset := cset.ToArray().Intersect(n.set)
if nset.Empty() {
return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
}
n.set = nset
}
}
n.children = append(n.children, cn)
switch t := p.next(); t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokArrayEnd:
break Loop
case tokComma:
continue
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
}
return n, nil
}
type variantNode struct {
n varNode
}
var variantSet = sigSet{
Signature{"v"}: true,
}
func (variantNode) Infer() (Signature, error) {
return Signature{"v"}, nil
}
func (n variantNode) String() string {
return "<" + n.n.String() + ">"
}
func (variantNode) Sigs() sigSet {
return variantSet
}
func (n variantNode) Value(sig Signature) (interface{}, error) {
if sig.str != "v" {
return nil, varTypeError{n.String(), sig}
}
sig, err := varInfer(n.n)
if err != nil {
return nil, err
}
v, err := n.n.Value(sig)
if err != nil {
return nil, err
}
return MakeVariant(v), nil
}
func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
n, err := varMakeNode(p)
if err != nil {
return nil, err
}
if t := p.next(); t.typ != tokVariantEnd {
return nil, fmt.Errorf("unexpected %q", t.val)
}
vn := variantNode{n}
if sig.str != "" && sig.str != "v" {
return nil, varTypeError{vn.String(), sig}
}
return variantNode{n}, nil
}
type dictEntry struct {
key, val varNode
}
type dictNode struct {
kset, vset sigSet
children []dictEntry
val interface{}
}
func (n dictNode) Infer() (Signature, error) {
for _, v := range n.children {
ksig, err := varInfer(v.key)
if err != nil {
continue
}
vsig, err := varInfer(v.val)
if err != nil {
continue
}
return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
}
return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
}
func (n dictNode) String() string {
s := "{"
for i, v := range n.children {
s += v.key.String() + ": " + v.val.String()
if i != len(n.children)-1 {
s += ", "
}
}
return s + "}"
}
func (n dictNode) Sigs() sigSet {
r := sigSet{}
for k := range n.kset {
for v := range n.vset {
sig := "a{" + k.str + v.str + "}"
r[Signature{sig}] = true
}
}
return r
}
func (n dictNode) Value(sig Signature) (interface{}, error) {
set := n.Sigs()
if set.Empty() {
// no type information -> empty dict
return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
}
if !set[sig] {
return nil, varTypeError{n.String(), sig}
}
m := reflect.MakeMap(typeFor(sig.str))
ksig := Signature{sig.str[2:3]}
vsig := Signature{sig.str[3 : len(sig.str)-1]}
for _, v := range n.children {
kv, err := v.key.Value(ksig)
if err != nil {
return nil, err
}
vv, err := v.val.Value(vsig)
if err != nil {
return nil, err
}
m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
}
return m.Interface(), nil
}
func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
var n dictNode
if sig.str != "" {
if len(sig.str) < 5 {
return nil, fmt.Errorf("invalid signature %q for dict type", sig)
}
ksig := Signature{string(sig.str[2])}
vsig := Signature{sig.str[3 : len(sig.str)-1]}
n.kset = sigSet{ksig: true}
n.vset = sigSet{vsig: true}
}
if t := p.next(); t.typ == tokDictEnd {
return n, nil
} else {
p.backup()
}
Loop:
for {
t := p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
}
p.backup()
kn, err := varMakeNode(p)
if err != nil {
return nil, err
}
if kset := kn.Sigs(); !kset.Empty() {
if n.kset.Empty() {
n.kset = kset
} else {
n.kset = kset.Intersect(n.kset)
if n.kset.Empty() {
return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
}
}
}
t = p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokColon:
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
t = p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
}
p.backup()
vn, err := varMakeNode(p)
if err != nil {
return nil, err
}
if vset := vn.Sigs(); !vset.Empty() {
if n.vset.Empty() {
n.vset = vset
} else {
n.vset = n.vset.Intersect(vset)
if n.vset.Empty() {
return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
}
}
}
n.children = append(n.children, dictEntry{kn, vn})
t = p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokDictEnd:
break Loop
case tokComma:
continue
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
}
return n, nil
}
type byteStringNode []byte
var byteStringSet = sigSet{
Signature{"ay"}: true,
}
func (byteStringNode) Infer() (Signature, error) {
return Signature{"ay"}, nil
}
func (b byteStringNode) String() string {
return string(b)
}
func (b byteStringNode) Sigs() sigSet {
return byteStringSet
}
func (b byteStringNode) Value(sig Signature) (interface{}, error) {
if sig.str != "ay" {
return nil, varTypeError{b.String(), sig}
}
return []byte(b), nil
}
func varParseByteString(s string) ([]byte, error) {
// quotes and b at start are guaranteed to be there
b := make([]byte, 0, 1)
s = s[2 : len(s)-1]
for len(s) != 0 {
c := s[0]
s = s[1:]
if c != '\\' {
b = append(b, c)
continue
}
c = s[0]
s = s[1:]
switch c {
case 'a':
b = append(b, 0x7)
case 'b':
b = append(b, 0x8)
case 'f':
b = append(b, 0xc)
case 'n':
b = append(b, '\n')
case 'r':
b = append(b, '\r')
case 't':
b = append(b, '\t')
case 'x':
if len(s) < 2 {
return nil, errors.New("short escape")
}
n, err := strconv.ParseUint(s[:2], 16, 8)
if err != nil {
return nil, err
}
b = append(b, byte(n))
s = s[2:]
case '0':
if len(s) < 3 {
return nil, errors.New("short escape")
}
n, err := strconv.ParseUint(s[:3], 8, 8)
if err != nil {
return nil, err
}
b = append(b, byte(n))
s = s[3:]
default:
b = append(b, c)
}
}
return append(b, 0), nil
}
func varInfer(n varNode) (Signature, error) {
if sig, ok := n.Sigs().Single(); ok {
return sig, nil
}
return n.Infer()
}

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +0,0 @@
# fileutils
Collection of utilities for file manipulation in golang
The library is based on docker pkg/archive but does copies instead of handling archive formats.

View File

@ -1,161 +0,0 @@
package fileutils
import (
"fmt"
"io"
"os"
"path/filepath"
"syscall"
)
// CopyFile copies the file at source to dest
func CopyFile(source string, dest string) error {
si, err := os.Lstat(source)
if err != nil {
return err
}
st, ok := si.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("could not convert to syscall.Stat_t")
}
uid := int(st.Uid)
gid := int(st.Gid)
// Handle symlinks
if si.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(source)
if err != nil {
return err
}
if err := os.Symlink(target, dest); err != nil {
return err
}
}
// Handle device files
if st.Mode&syscall.S_IFMT == syscall.S_IFBLK || st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
devMajor := int64(major(uint64(st.Rdev)))
devMinor := int64(minor(uint64(st.Rdev)))
mode := uint32(si.Mode() & 07777)
if st.Mode&syscall.S_IFMT == syscall.S_IFBLK {
mode |= syscall.S_IFBLK
}
if st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
mode |= syscall.S_IFCHR
}
if err := syscall.Mknod(dest, mode, int(mkdev(devMajor, devMinor))); err != nil {
return err
}
}
// Handle regular files
if si.Mode().IsRegular() {
sf, err := os.Open(source)
if err != nil {
return err
}
defer sf.Close()
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
_, err = io.Copy(df, sf)
if err != nil {
return err
}
}
// Chown the file
if err := os.Lchown(dest, uid, gid); err != nil {
return err
}
// Chmod the file
if !(si.Mode()&os.ModeSymlink == os.ModeSymlink) {
if err := os.Chmod(dest, si.Mode()); err != nil {
return err
}
}
return nil
}
// CopyDirectory copies the files under the source directory
// to dest directory. The dest directory is created if it
// does not exist.
func CopyDirectory(source string, dest string) error {
fi, err := os.Stat(source)
if err != nil {
return err
}
// Get owner.
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("could not convert to syscall.Stat_t")
}
// We have to pick an owner here anyway.
if err := MkdirAllNewAs(dest, fi.Mode(), int(st.Uid), int(st.Gid)); err != nil {
return err
}
return filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Get the relative path
relPath, err := filepath.Rel(source, path)
if err != nil {
return nil
}
if info.IsDir() {
// Skip the source directory.
if path != source {
// Get the owner.
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("could not convert to syscall.Stat_t")
}
uid := int(st.Uid)
gid := int(st.Gid)
if err := os.Mkdir(filepath.Join(dest, relPath), info.Mode()); err != nil {
return err
}
if err := os.Lchown(filepath.Join(dest, relPath), uid, gid); err != nil {
return err
}
}
return nil
}
// Copy the file.
if err := CopyFile(path, filepath.Join(dest, relPath)); err != nil {
return err
}
return nil
})
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
func mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}

View File

@ -1,49 +0,0 @@
package fileutils
import (
"os"
"path/filepath"
)
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership will be performed
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil {
// nothing to do; directory path fully exists already
return nil
}
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
return nil
}

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,17 +0,0 @@
runc
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -1,330 +0,0 @@
# libcontainer
[![GoDoc](https://godoc.org/github.com/opencontainers/runc/libcontainer?status.svg)](https://godoc.org/github.com/opencontainers/runc/libcontainer)
Libcontainer provides a native Go implementation for creating containers
with namespaces, cgroups, capabilities, and filesystem access controls.
It allows you to manage the lifecycle of the container performing additional operations
after the container is created.
#### Container
A container is a self contained execution environment that shares the kernel of the
host system and which is (optionally) isolated from other containers in the system.
#### Using libcontainer
Because containers are spawned in a two step process you will need a binary that
will be executed as the init process for the container. In libcontainer, we use
the current binary (/proc/self/exe) to be executed as the init process, and use
arg "init", we call the first step process "bootstrap", so you always need a "init"
function as the entry of "bootstrap".
In addition to the go init function the early stage bootstrap is handled by importing
[nsenter](https://github.com/opencontainers/runc/blob/master/libcontainer/nsenter/README.md).
```go
import (
_ "github.com/opencontainers/runc/libcontainer/nsenter"
)
func init() {
if len(os.Args) > 1 && os.Args[1] == "init" {
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
factory, _ := libcontainer.New("")
if err := factory.StartInitialization(); err != nil {
logrus.Fatal(err)
}
panic("--this line should have never been executed, congratulations--")
}
}
```
Then to create a container you first have to initialize an instance of a factory
that will handle the creation and initialization for a container.
```go
factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
if err != nil {
logrus.Fatal(err)
return
}
```
Once you have an instance of the factory created we can create a configuration
struct describing how the container is to be created. A sample would look similar to this:
```go
defaultMountFlags := unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
config := &configs.Config{
Rootfs: "/your/path/to/rootfs",
Capabilities: &configs.Capabilities{
Bounding: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Effective: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Inheritable: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Permitted: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Ambient: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
},
Namespaces: configs.Namespaces([]configs.Namespace{
{Type: configs.NEWNS},
{Type: configs.NEWUTS},
{Type: configs.NEWIPC},
{Type: configs.NEWPID},
{Type: configs.NEWUSER},
{Type: configs.NEWNET},
{Type: configs.NEWCGROUP},
}),
Cgroups: &configs.Cgroup{
Name: "test-container",
Parent: "system",
Resources: &configs.Resources{
MemorySwappiness: nil,
AllowAllDevices: nil,
AllowedDevices: configs.DefaultAllowedDevices,
},
},
MaskPaths: []string{
"/proc/kcore",
"/sys/firmware",
},
ReadonlyPaths: []string{
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
},
Devices: configs.DefaultAutoCreatedDevices,
Hostname: "testing",
Mounts: []*configs.Mount{
{
Source: "proc",
Destination: "/proc",
Device: "proc",
Flags: defaultMountFlags,
},
{
Source: "tmpfs",
Destination: "/dev",
Device: "tmpfs",
Flags: unix.MS_NOSUID | unix.MS_STRICTATIME,
Data: "mode=755",
},
{
Source: "devpts",
Destination: "/dev/pts",
Device: "devpts",
Flags: unix.MS_NOSUID | unix.MS_NOEXEC,
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
},
{
Device: "tmpfs",
Source: "shm",
Destination: "/dev/shm",
Data: "mode=1777,size=65536k",
Flags: defaultMountFlags,
},
{
Source: "mqueue",
Destination: "/dev/mqueue",
Device: "mqueue",
Flags: defaultMountFlags,
},
{
Source: "sysfs",
Destination: "/sys",
Device: "sysfs",
Flags: defaultMountFlags | unix.MS_RDONLY,
},
},
UidMappings: []configs.IDMap{
{
ContainerID: 0,
HostID: 1000,
Size: 65536,
},
},
GidMappings: []configs.IDMap{
{
ContainerID: 0,
HostID: 1000,
Size: 65536,
},
},
Networks: []*configs.Network{
{
Type: "loopback",
Address: "127.0.0.1/0",
Gateway: "localhost",
},
},
Rlimits: []configs.Rlimit{
{
Type: unix.RLIMIT_NOFILE,
Hard: uint64(1025),
Soft: uint64(1025),
},
},
}
```
Once you have the configuration populated you can create a container:
```go
container, err := factory.Create("container-id", config)
if err != nil {
logrus.Fatal(err)
return
}
```
To spawn bash as the initial process inside the container and have the
processes pid returned in order to wait, signal, or kill the process:
```go
process := &libcontainer.Process{
Args: []string{"/bin/bash"},
Env: []string{"PATH=/bin"},
User: "daemon",
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
err := container.Run(process)
if err != nil {
container.Destroy()
logrus.Fatal(err)
return
}
// wait for the process to finish.
_, err := process.Wait()
if err != nil {
logrus.Fatal(err)
}
// destroy the container.
container.Destroy()
```
Additional ways to interact with a running container are:
```go
// return all the pids for all processes running inside the container.
processes, err := container.Processes()
// get detailed cpu, memory, io, and network statistics for the container and
// it's processes.
stats, err := container.Stats()
// pause all processes inside the container.
container.Pause()
// resume all paused processes.
container.Resume()
// send signal to container's init process.
container.Signal(signal)
// update container resource constraints.
container.Set(config)
// get current status of the container.
status, err := container.Status()
// get current container's state information.
state, err := container.State()
```
#### Checkpoint & Restore
libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
This let's you save the state of a process running inside a container to disk, and then restore
that state into a new process, on the same machine or on another machine.
`criu` version 1.5.2 or higher is required to use checkpoint and restore.
If you don't already have `criu` installed, you can build it from source, following the
[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
generated when building libcontainer with docker.
## Copyright and license
Code and documentation copyright 2014 Docker, inc.
The code and documentation are released under the [Apache 2.0 license](../LICENSE).
The documentation is also released under Creative Commons Attribution 4.0 International License.
You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.

View File

@ -1,465 +0,0 @@
## Container Specification - v1
This is the standard configuration for version 1 containers. It includes
namespaces, standard filesystem setup, a default Linux capability set, and
information about resource reservations. It also has information about any
populated environment settings for the processes running inside a container.
Along with the configuration of how a container is created the standard also
discusses actions that can be performed on a container to manage and inspect
information about the processes running inside.
The v1 profile is meant to be able to accommodate the majority of applications
with a strong security configuration.
### System Requirements and Compatibility
Minimum requirements:
* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
* Mounted cgroups with each subsystem in its own hierarchy
### Namespaces
| Flag | Enabled |
| --------------- | ------- |
| CLONE_NEWPID | 1 |
| CLONE_NEWUTS | 1 |
| CLONE_NEWIPC | 1 |
| CLONE_NEWNET | 1 |
| CLONE_NEWNS | 1 |
| CLONE_NEWUSER | 1 |
| CLONE_NEWCGROUP | 1 |
Namespaces are created for the container via the `unshare` syscall.
### Filesystem
A root filesystem must be provided to a container for execution. The container
will use this root filesystem (rootfs) to jail and spawn processes inside where
the binaries and system libraries are local to that directory. Any binaries
to be executed must be contained within this rootfs.
Mounts that happen inside the container are automatically cleaned up when the
container exits as the mount namespace is destroyed and the kernel will
unmount all the mounts that were setup within that namespace.
For a container to execute properly there are certain filesystems that
are required to be mounted within the rootfs that the runtime will setup.
| Path | Type | Flags | Data |
| ----------- | ------ | -------------------------------------- | ---------------------------------------- |
| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 |
| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k |
| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 |
| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | |
After a container's filesystems are mounted within the newly created
mount namespace `/dev` will need to be populated with a set of device nodes.
It is expected that a rootfs does not need to have any device nodes specified
for `/dev` within the rootfs as the container will setup the correct devices
that are required for executing a container's process.
| Path | Mode | Access |
| ------------ | ---- | ---------- |
| /dev/null | 0666 | rwm |
| /dev/zero | 0666 | rwm |
| /dev/full | 0666 | rwm |
| /dev/tty | 0666 | rwm |
| /dev/random | 0666 | rwm |
| /dev/urandom | 0666 | rwm |
**ptmx**
`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within
the container.
The use of a pseudo TTY is optional within a container and it should support both.
If a pseudo is provided to the container `/dev/console` will need to be
setup by binding the console in `/dev/` after it has been populated and mounted
in tmpfs.
| Source | Destination | UID GID | Mode | Type |
| --------------- | ------------ | ------- | ---- | ---- |
| *pty host path* | /dev/console | 0 0 | 0600 | bind |
After `/dev/null` has been setup we check for any external links between
the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing
to `/dev/null` outside the container we close and `dup2` the `/dev/null`
that is local to the container's rootfs.
After the container has `/proc` mounted a few standard symlinks are setup
within `/dev/` for the io.
| Source | Destination |
| --------------- | ----------- |
| /proc/self/fd | /dev/fd |
| /proc/self/fd/0 | /dev/stdin |
| /proc/self/fd/1 | /dev/stdout |
| /proc/self/fd/2 | /dev/stderr |
A `pivot_root` is used to change the root for the process, effectively
jailing the process inside the rootfs.
```c
put_old = mkdir(...);
pivot_root(rootfs, put_old);
chdir("/");
unmount(put_old, MS_DETACH);
rmdir(put_old);
```
For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined
with a `chroot` is required as `pivot_root` is not supported in `ramfs`.
```c
mount(rootfs, "/", NULL, MS_MOVE, NULL);
chroot(".");
chdir("/");
```
The `umask` is set back to `0022` after the filesystem setup has been completed.
### Resources
Cgroups are used to handle resource allocation for containers. This includes
system resources like cpu, memory, and device access.
| Subsystem | Enabled |
| ---------- | ------- |
| devices | 1 |
| memory | 1 |
| cpu | 1 |
| cpuacct | 1 |
| cpuset | 1 |
| blkio | 1 |
| perf_event | 1 |
| freezer | 1 |
| hugetlb | 1 |
| pids | 1 |
All cgroup subsystem are joined so that statistics can be collected from
each of the subsystems. Freezer does not expose any stats but is joined
so that containers can be paused and resumed.
The parent process of the container's init must place the init pid inside
the correct cgroups before the initialization begins. This is done so
that no processes or threads escape the cgroups. This sync is
done via a pipe ( specified in the runtime section below ) that the container's
init process will block waiting for the parent to finish setup.
### IntelRdt
Intel platforms with new Xeon CPU support Resource Director Technology (RDT).
Cache Allocation Technology (CAT) and Memory Bandwidth Allocation (MBA) are
two sub-features of RDT.
Cache Allocation Technology (CAT) provides a way for the software to restrict
cache allocation to a defined 'subset' of L3 cache which may be overlapping
with other 'subsets'. The different subsets are identified by class of
service (CLOS) and each CLOS has a capacity bitmask (CBM).
Memory Bandwidth Allocation (MBA) provides indirect and approximate throttle
over memory bandwidth for the software. A user controls the resource by
indicating the percentage of maximum memory bandwidth or memory bandwidth limit
in MBps unit if MBA Software Controller is enabled.
It can be used to handle L3 cache and memory bandwidth resources allocation
for containers if hardware and kernel support Intel RDT CAT and MBA features.
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
CAT and MBA features are introduced in Linux 4.10 and 4.12 kernel via
"resource control" filesystem.
Intel RDT "resource control" filesystem hierarchy:
```
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| | |-- cbm_mask
| | |-- min_cbm_bits
| | |-- num_closids
| |-- MB
| |-- bandwidth_gran
| |-- delay_linear
| |-- min_bandwidth
| |-- num_closids
|-- ...
|-- schemata
|-- tasks
|-- <container_id>
|-- ...
|-- schemata
|-- tasks
```
For runc, we can make use of `tasks` and `schemata` configuration for L3
cache and memory bandwidth resources constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent.
The file `schemata` has a list of all the resources available to this group.
Each resource (L3 cache, memory bandwidth) has its own line and format.
L3 cache schema:
It has allocation bitmasks/values for L3 cache on each socket, which
contains L3 cache id and capacity bitmask (CBM).
```
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
```
For example, on a two-socket machine, the schema line could be "L3:0=ff;1=c0"
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel CPU models. Kernel will check if it is valid when writing.
e.g., default value 0xfffff in root indicates the max bits of CBM is 20
bits, which mapping to entire L3 cache capacity. Some valid CBM values to
set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
Memory bandwidth schema:
It has allocation values for memory bandwidth on each socket, which contains
L3 cache id and memory bandwidth.
```
Format: "MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;..."
```
For example, on a two-socket machine, the schema line could be "MB:0=20;1=70"
The minimum bandwidth percentage value for each CPU model is predefined and
can be looked up through "info/MB/min_bandwidth". The bandwidth granularity
that is allocated is also dependent on the CPU model and can be looked up at
"info/MB/bandwidth_gran". The available bandwidth control steps are:
min_bw + N * bw_gran. Intermediate values are rounded to the next control
step available on the hardware.
If MBA Software Controller is enabled through mount option "-o mba_MBps"
mount -t resctrl resctrl -o mba_MBps /sys/fs/resctrl
We could specify memory bandwidth in "MBps" (Mega Bytes per second) unit
instead of "percentages". The kernel underneath would use a software feedback
mechanism or a "Software Controller" which reads the actual bandwidth using
MBM counters and adjust the memory bandwidth percentages to ensure:
"actual memory bandwidth < user specified memory bandwidth".
For example, on a two-socket machine, the schema line could be
"MB:0=5000;1=7000" which means 5000 MBps memory bandwidth limit on socket 0
and 7000 MBps memory bandwidth limit on socket 1.
For more information about Intel RDT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
```
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0x7ff and the max CBM length is 11 bits, and minimum memory bandwidth of 10%
with a memory bandwidth granularity of 10%.
Tasks inside the container only have access to the "upper" 7/11 of L3 cache
on socket 0 and the "lower" 5/11 L3 cache on socket 1, and may use a
maximum memory bandwidth of 20% on socket 0 and 70% on socket 1.
"linux": {
"intelRdt": {
"closID": "guaranteed_group",
"l3CacheSchema": "L3:0=7f0;1=1f",
"memBwSchema": "MB:0=20;1=70"
}
}
```
### Security
The standard set of Linux capabilities that are set in a container
provide a good default for security and flexibility for the applications.
| Capability | Enabled |
| -------------------- | ------- |
| CAP_NET_RAW | 1 |
| CAP_NET_BIND_SERVICE | 1 |
| CAP_AUDIT_READ | 1 |
| CAP_AUDIT_WRITE | 1 |
| CAP_DAC_OVERRIDE | 1 |
| CAP_SETFCAP | 1 |
| CAP_SETPCAP | 1 |
| CAP_SETGID | 1 |
| CAP_SETUID | 1 |
| CAP_MKNOD | 1 |
| CAP_CHOWN | 1 |
| CAP_FOWNER | 1 |
| CAP_FSETID | 1 |
| CAP_KILL | 1 |
| CAP_SYS_CHROOT | 1 |
| CAP_NET_BROADCAST | 0 |
| CAP_SYS_MODULE | 0 |
| CAP_SYS_RAWIO | 0 |
| CAP_SYS_PACCT | 0 |
| CAP_SYS_ADMIN | 0 |
| CAP_SYS_NICE | 0 |
| CAP_SYS_RESOURCE | 0 |
| CAP_SYS_TIME | 0 |
| CAP_SYS_TTY_CONFIG | 0 |
| CAP_AUDIT_CONTROL | 0 |
| CAP_MAC_OVERRIDE | 0 |
| CAP_MAC_ADMIN | 0 |
| CAP_NET_ADMIN | 0 |
| CAP_SYSLOG | 0 |
| CAP_DAC_READ_SEARCH | 0 |
| CAP_LINUX_IMMUTABLE | 0 |
| CAP_IPC_LOCK | 0 |
| CAP_IPC_OWNER | 0 |
| CAP_SYS_PTRACE | 0 |
| CAP_SYS_BOOT | 0 |
| CAP_LEASE | 0 |
| CAP_WAKE_ALARM | 0 |
| CAP_BLOCK_SUSPEND | 0 |
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
and [selinux](http://selinuxproject.org/page/Main_Page) can be used with
the containers. A container should support setting an apparmor profile or
selinux process and mount labels if provided in the configuration.
Standard apparmor profile:
```c
#include <tunables/global>
profile <profile_name> flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network,
capability,
file,
umount,
deny @{PROC}/sys/fs/** wklx,
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
deny @{PROC}/sys/kernel/*/** wklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/efi/efivars/** rwklx,
deny /sys/kernel/security/** rwklx,
}
```
*TODO: seccomp work is being done to find a good default config*
### Runtime and Init Process
During container creation the parent process needs to talk to the container's init
process and have a form of synchronization. This is accomplished by creating
a pipe that is passed to the container's init. When the init process first spawns
it will block on its side of the pipe until the parent closes its side. This
allows the parent to have time to set the new process inside a cgroup hierarchy
and/or write any uid/gid mappings required for user namespaces.
The pipe is passed to the init process via FD 3.
The application consuming libcontainer should be compiled statically. libcontainer
does not define any init process and the arguments provided are used to `exec` the
process inside the application. There should be no long running init within the
container spec.
If a pseudo tty is provided to a container it will open and `dup2` the console
as the container's STDIN, STDOUT, STDERR as well as mounting the console
as `/dev/console`.
An extra set of mounts are provided to a container and setup for use. A container's
rootfs can contain some non portable files inside that can cause side effects during
execution of a process. These files are usually created and populated with the container
specific information via the runtime.
**Extra runtime files:**
* /etc/hosts
* /etc/resolv.conf
* /etc/hostname
* /etc/localtime
#### Defaults
There are a few defaults that can be overridden by users, but in their omission
these apply to processes within a container.
| Type | Value |
| ------------------- | ------------------------------ |
| Parent Death Signal | SIGKILL |
| UID | 0 |
| GID | 0 |
| GROUPS | 0, NULL |
| CWD | "/" |
| $HOME | Current user's home dir or "/" |
| Readonly rootfs | false |
| Pseudo TTY | false |
## Actions
After a container is created there is a standard set of actions that can
be done to the container. These actions are part of the public API for
a container.
| Action | Description |
| -------------- | ------------------------------------------------------------------ |
| Get processes | Return all the pids for processes running inside a container |
| Get Stats | Return resource statistics for the container as a whole |
| Wait | Waits on the container's init process ( pid 1 ) |
| Wait Process | Wait on any of the container's processes returning the exit status |
| Destroy | Kill the container's init process and remove any filesystem state |
| Signal | Send a signal to the container's init process |
| Signal Process | Send a signal to any of the container's processes |
| Pause | Pause all processes inside the container |
| Resume | Resume all processes inside the container if paused |
| Exec | Execute a new process inside of the container ( requires setns ) |
| Set | Setup configs of the container after it's created |
### Execute a new process inside of a running container
User can execute a new process inside of a running container. Any binaries to be
executed must be accessible within the container's rootfs.
The started process will run inside the container's rootfs. Any changes
made by the process to the container's filesystem will persist after the
process finished executing.
The started process will join all the container's existing namespaces. When the
container is paused, the process will also be paused and will resume when
the container is unpaused. The started process will only run when the container's
primary process (PID 1) is running, and will not be restarted when the container
is restarted.
#### Planned additions
The started process will have its own cgroups nested inside the container's
cgroups. This is used for process tracking and optionally resource allocation
handling for the new process. Freezer cgroup is required, the rest of the cgroups
are optional. The process executor must place its pid inside the correct
cgroups before starting the process. This is done so that no child processes or
threads can escape the cgroups.
When the process is stopped, the process executor will try (in a best-effort way)
to stop all its children and remove the sub-cgroups.

View File

@ -1,54 +0,0 @@
// +build apparmor,linux
package apparmor
import (
"fmt"
"io/ioutil"
"os"
)
// IsEnabled returns true if apparmor is enabled for the host.
func IsEnabled() bool {
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
return err == nil && len(buf) > 1 && buf[0] == 'Y'
}
}
return false
}
func setprocattr(attr, value string) error {
// Under AppArmor you can only change your own attr, so use /proc/self/
// instead of /proc/<tid>/ like libapparmor does
path := fmt.Sprintf("/proc/self/attr/%s", attr)
f, err := os.OpenFile(path, os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
_, err = fmt.Fprintf(f, "%s", value)
return err
}
// changeOnExec reimplements aa_change_onexec from libapparmor in Go
func changeOnExec(name string) error {
value := "exec " + name
if err := setprocattr("exec", value); err != nil {
return fmt.Errorf("apparmor failed to apply profile: %s", err)
}
return nil
}
// ApplyProfile will apply the profile with the specified name to the process after
// the next exec.
func ApplyProfile(name string) error {
if name == "" {
return nil
}
return changeOnExec(name)
}

View File

@ -1,20 +0,0 @@
// +build !apparmor !linux
package apparmor
import (
"errors"
)
var ErrApparmorNotEnabled = errors.New("apparmor: config provided but apparmor not supported")
func IsEnabled() bool {
return false
}
func ApplyProfile(name string) error {
if name != "" {
return ErrApparmorNotEnabled
}
return nil
}

View File

@ -1,113 +0,0 @@
// +build linux
package libcontainer
import (
"fmt"
"strings"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/syndtr/gocapability/capability"
)
const allCapabilityTypes = capability.CAPS | capability.BOUNDS | capability.AMBS
var capabilityMap map[string]capability.Cap
func init() {
capabilityMap = make(map[string]capability.Cap)
last := capability.CAP_LAST_CAP
// workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap
if last == capability.Cap(63) {
last = capability.CAP_BLOCK_SUSPEND
}
for _, cap := range capability.List() {
if cap > last {
continue
}
capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))
capabilityMap[capKey] = cap
}
}
func newContainerCapList(capConfig *configs.Capabilities) (*containerCapabilities, error) {
bounding := []capability.Cap{}
for _, c := range capConfig.Bounding {
v, ok := capabilityMap[c]
if !ok {
return nil, fmt.Errorf("unknown capability %q", c)
}
bounding = append(bounding, v)
}
effective := []capability.Cap{}
for _, c := range capConfig.Effective {
v, ok := capabilityMap[c]
if !ok {
return nil, fmt.Errorf("unknown capability %q", c)
}
effective = append(effective, v)
}
inheritable := []capability.Cap{}
for _, c := range capConfig.Inheritable {
v, ok := capabilityMap[c]
if !ok {
return nil, fmt.Errorf("unknown capability %q", c)
}
inheritable = append(inheritable, v)
}
permitted := []capability.Cap{}
for _, c := range capConfig.Permitted {
v, ok := capabilityMap[c]
if !ok {
return nil, fmt.Errorf("unknown capability %q", c)
}
permitted = append(permitted, v)
}
ambient := []capability.Cap{}
for _, c := range capConfig.Ambient {
v, ok := capabilityMap[c]
if !ok {
return nil, fmt.Errorf("unknown capability %q", c)
}
ambient = append(ambient, v)
}
pid, err := capability.NewPid(0)
if err != nil {
return nil, err
}
return &containerCapabilities{
bounding: bounding,
effective: effective,
inheritable: inheritable,
permitted: permitted,
ambient: ambient,
pid: pid,
}, nil
}
type containerCapabilities struct {
pid capability.Capabilities
bounding []capability.Cap
effective []capability.Cap
inheritable []capability.Cap
permitted []capability.Cap
ambient []capability.Cap
}
// ApplyBoundingSet sets the capability bounding set to those specified in the whitelist.
func (c *containerCapabilities) ApplyBoundingSet() error {
c.pid.Clear(capability.BOUNDS)
c.pid.Set(capability.BOUNDS, c.bounding...)
return c.pid.Apply(capability.BOUNDS)
}
// Apply sets all the capabilities for the current process in the config.
func (c *containerCapabilities) ApplyCaps() error {
c.pid.Clear(allCapabilityTypes)
c.pid.Set(capability.BOUNDS, c.bounding...)
c.pid.Set(capability.PERMITTED, c.permitted...)
c.pid.Set(capability.INHERITABLE, c.inheritable...)
c.pid.Set(capability.EFFECTIVE, c.effective...)
c.pid.Set(capability.AMBIENT, c.ambient...)
return c.pid.Apply(allCapabilityTypes)
}

View File

@ -1,64 +0,0 @@
// +build linux
package cgroups
import (
"fmt"
"github.com/opencontainers/runc/libcontainer/configs"
)
type Manager interface {
// Applies cgroup configuration to the process with the specified pid
Apply(pid int) error
// Returns the PIDs inside the cgroup set
GetPids() ([]int, error)
// Returns the PIDs inside the cgroup set & all sub-cgroups
GetAllPids() ([]int, error)
// Returns statistics for the cgroup set
GetStats() (*Stats, error)
// Toggles the freezer cgroup according with specified state
Freeze(state configs.FreezerState) error
// Destroys the cgroup set
Destroy() error
// The option func SystemdCgroups() and Cgroupfs() require following attributes:
// Paths map[string]string
// Cgroups *configs.Cgroup
// Paths maps cgroup subsystem to path at which it is mounted.
// Cgroups specifies specific cgroup settings for the various subsystems
// Returns cgroup paths to save in a state file and to be able to
// restore the object later.
GetPaths() map[string]string
// Sets the cgroup as configured.
Set(container *configs.Config) error
}
type NotFoundError struct {
Subsystem string
}
func (e *NotFoundError) Error() string {
return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
}
func NewNotFoundError(sub string) error {
return &NotFoundError{
Subsystem: sub,
}
}
func IsNotFound(err error) bool {
if err == nil {
return false
}
_, ok := err.(*NotFoundError)
return ok
}

View File

@ -1,3 +0,0 @@
// +build !linux
package cgroups

View File

@ -1,409 +0,0 @@
// +build linux
package fs
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
var (
subsystems = subsystemSet{
&CpusetGroup{},
&DevicesGroup{},
&MemoryGroup{},
&CpuGroup{},
&CpuacctGroup{},
&PidsGroup{},
&BlkioGroup{},
&HugetlbGroup{},
&NetClsGroup{},
&NetPrioGroup{},
&PerfEventGroup{},
&FreezerGroup{},
&NameGroup{GroupName: "name=systemd", Join: true},
}
HugePageSizes, _ = cgroups.GetHugePageSize()
)
var errSubsystemDoesNotExist = fmt.Errorf("cgroup: subsystem does not exist")
type subsystemSet []subsystem
func (s subsystemSet) Get(name string) (subsystem, error) {
for _, ss := range s {
if ss.Name() == name {
return ss, nil
}
}
return nil, errSubsystemDoesNotExist
}
type subsystem interface {
// Name returns the name of the subsystem.
Name() string
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
GetStats(path string, stats *cgroups.Stats) error
// Removes the cgroup represented by 'cgroupData'.
Remove(*cgroupData) error
// Creates and joins the cgroup represented by 'cgroupData'.
Apply(*cgroupData) error
// Set the cgroup represented by cgroup.
Set(path string, cgroup *configs.Cgroup) error
}
type Manager struct {
mu sync.Mutex
Cgroups *configs.Cgroup
Rootless bool // ignore permission-related errors
Paths map[string]string
}
// The absolute path to the root of the cgroup hierarchies.
var cgroupRootLock sync.Mutex
var cgroupRoot string
// Gets the cgroupRoot.
func getCgroupRoot() (string, error) {
cgroupRootLock.Lock()
defer cgroupRootLock.Unlock()
if cgroupRoot != "" {
return cgroupRoot, nil
}
root, err := cgroups.FindCgroupMountpointDir()
if err != nil {
return "", err
}
if _, err := os.Stat(root); err != nil {
return "", err
}
cgroupRoot = root
return cgroupRoot, nil
}
type cgroupData struct {
root string
innerPath string
config *configs.Cgroup
pid int
}
// isIgnorableError returns whether err is a permission error (in the loose
// sense of the word). This includes EROFS (which for an unprivileged user is
// basically a permission error) and EACCES (for similar reasons) as well as
// the normal EPERM.
func isIgnorableError(rootless bool, err error) bool {
// We do not ignore errors if we are root.
if !rootless {
return false
}
// Is it an ordinary EPERM?
if os.IsPermission(errors.Cause(err)) {
return true
}
// Try to handle other errnos.
var errno error
switch err := errors.Cause(err).(type) {
case *os.PathError:
errno = err.Err
case *os.LinkError:
errno = err.Err
case *os.SyscallError:
errno = err.Err
}
return errno == unix.EROFS || errno == unix.EPERM || errno == unix.EACCES
}
func (m *Manager) Apply(pid int) (err error) {
if m.Cgroups == nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
var c = m.Cgroups
d, err := getCgroupData(m.Cgroups, pid)
if err != nil {
return err
}
m.Paths = make(map[string]string)
if c.Paths != nil {
for name, path := range c.Paths {
_, err := d.path(name)
if err != nil {
if cgroups.IsNotFound(err) {
continue
}
return err
}
m.Paths[name] = path
}
return cgroups.EnterPid(m.Paths, pid)
}
for _, sys := range subsystems {
// TODO: Apply should, ideally, be reentrant or be broken up into a separate
// create and join phase so that the cgroup hierarchy for a container can be
// created then join consists of writing the process pids to cgroup.procs
p, err := d.path(sys.Name())
if err != nil {
// The non-presence of the devices subsystem is
// considered fatal for security reasons.
if cgroups.IsNotFound(err) && sys.Name() != "devices" {
continue
}
return err
}
m.Paths[sys.Name()] = p
if err := sys.Apply(d); err != nil {
// In the case of rootless (including euid=0 in userns), where an explicit cgroup path hasn't
// been set, we don't bail on error in case of permission problems.
// Cases where limits have been set (and we couldn't create our own
// cgroup) are handled by Set.
if isIgnorableError(m.Rootless, err) && m.Cgroups.Path == "" {
delete(m.Paths, sys.Name())
continue
}
return err
}
}
return nil
}
func (m *Manager) Destroy() error {
if m.Cgroups == nil || m.Cgroups.Paths != nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
if err := cgroups.RemovePaths(m.Paths); err != nil {
return err
}
m.Paths = make(map[string]string)
return nil
}
func (m *Manager) GetPaths() map[string]string {
m.mu.Lock()
paths := m.Paths
m.mu.Unlock()
return paths
}
func (m *Manager) GetStats() (*cgroups.Stats, error) {
m.mu.Lock()
defer m.mu.Unlock()
stats := cgroups.NewStats()
for name, path := range m.Paths {
sys, err := subsystems.Get(name)
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
continue
}
if err := sys.GetStats(path, stats); err != nil {
return nil, err
}
}
return stats, nil
}
func (m *Manager) Set(container *configs.Config) error {
// If Paths are set, then we are just joining cgroups paths
// and there is no need to set any values.
if m.Cgroups.Paths != nil {
return nil
}
paths := m.GetPaths()
for _, sys := range subsystems {
path := paths[sys.Name()]
if err := sys.Set(path, container.Cgroups); err != nil {
if m.Rootless && sys.Name() == "devices" {
continue
}
// When m.Rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
// However, errors from other subsystems are not ignored.
// see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
if path == "" {
// We never created a path for this cgroup, so we cannot set
// limits for it (though we have already tried at this point).
return fmt.Errorf("cannot set %s limit: container could not join or create cgroup", sys.Name())
}
return err
}
}
if m.Paths["cpu"] != "" {
if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
return err
}
}
return nil
}
// Freeze toggles the container's freezer cgroup depending on the state
// provided
func (m *Manager) Freeze(state configs.FreezerState) error {
paths := m.GetPaths()
dir := paths["freezer"]
prevState := m.Cgroups.Resources.Freezer
m.Cgroups.Resources.Freezer = state
freezer, err := subsystems.Get("freezer")
if err != nil {
return err
}
err = freezer.Set(dir, m.Cgroups)
if err != nil {
m.Cgroups.Resources.Freezer = prevState
return err
}
return nil
}
func (m *Manager) GetPids() ([]int, error) {
paths := m.GetPaths()
return cgroups.GetPids(paths["devices"])
}
func (m *Manager) GetAllPids() ([]int, error) {
paths := m.GetPaths()
return cgroups.GetAllPids(paths["devices"])
}
func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
root, err := getCgroupRoot()
if err != nil {
return nil, err
}
if (c.Name != "" || c.Parent != "") && c.Path != "" {
return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
}
// XXX: Do not remove this code. Path safety is important! -- cyphar
cgPath := libcontainerUtils.CleanPath(c.Path)
cgParent := libcontainerUtils.CleanPath(c.Parent)
cgName := libcontainerUtils.CleanPath(c.Name)
innerPath := cgPath
if innerPath == "" {
innerPath = filepath.Join(cgParent, cgName)
}
return &cgroupData{
root: root,
innerPath: innerPath,
config: c,
pid: pid,
}, nil
}
func (raw *cgroupData) path(subsystem string) (string, error) {
mnt, err := cgroups.FindCgroupMountpoint(raw.root, subsystem)
// If we didn't mount the subsystem, there is no point we make the path.
if err != nil {
return "", err
}
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
if filepath.IsAbs(raw.innerPath) {
// Sometimes subsystems can be mounted together as 'cpu,cpuacct'.
return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
}
// Use GetOwnCgroupPath instead of GetInitCgroupPath, because the creating
// process could in container and shared pid namespace with host, and
// /proc/1/cgroup could point to whole other world of cgroups.
parentPath, err := cgroups.GetOwnCgroupPath(subsystem)
if err != nil {
return "", err
}
return filepath.Join(parentPath, raw.innerPath), nil
}
func (raw *cgroupData) join(subsystem string) (string, error) {
path, err := raw.path(subsystem)
if err != nil {
return "", err
}
if err := os.MkdirAll(path, 0755); err != nil {
return "", err
}
if err := cgroups.WriteCgroupProc(path, raw.pid); err != nil {
return "", err
}
return path, nil
}
func writeFile(dir, file, data string) error {
// Normally dir should not be empty, one case is that cgroup subsystem
// is not mounted, we will get empty dir, and we want it fail here.
if dir == "" {
return fmt.Errorf("no such directory for %s", file)
}
if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700); err != nil {
return fmt.Errorf("failed to write %v to %v: %v", data, file, err)
}
return nil
}
func readFile(dir, file string) (string, error) {
data, err := ioutil.ReadFile(filepath.Join(dir, file))
return string(data), err
}
func removePath(p string, err error) error {
if err != nil {
return err
}
if p != "" {
return os.RemoveAll(p)
}
return nil
}
func CheckCpushares(path string, c uint64) error {
var cpuShares uint64
if c == 0 {
return nil
}
fd, err := os.Open(filepath.Join(path, "cpu.shares"))
if err != nil {
return err
}
defer fd.Close()
_, err = fmt.Fscanf(fd, "%d", &cpuShares)
if err != nil && err != io.EOF {
return err
}
if c > cpuShares {
return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
} else if c < cpuShares {
return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
}
return nil
}

View File

@ -1,237 +0,0 @@
// +build linux
package fs
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type BlkioGroup struct {
}
func (s *BlkioGroup) Name() string {
return "blkio"
}
func (s *BlkioGroup) Apply(d *cgroupData) error {
_, err := d.join("blkio")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.BlkioWeight != 0 {
if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
return err
}
}
if cgroup.Resources.BlkioLeafWeight != 0 {
if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
return err
}
}
for _, wd := range cgroup.Resources.BlkioWeightDevice {
if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
return err
}
if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
return err
}
}
for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
return err
}
}
return nil
}
func (s *BlkioGroup) Remove(d *cgroupData) error {
return removePath(d.path("blkio"))
}
/*
examples:
blkio.sectors
8:0 6792
blkio.io_service_bytes
8:0 Read 1282048
8:0 Write 2195456
8:0 Sync 2195456
8:0 Async 1282048
8:0 Total 3477504
Total 3477504
blkio.io_serviced
8:0 Read 124
8:0 Write 104
8:0 Sync 104
8:0 Async 124
8:0 Total 228
Total 228
blkio.io_queued
8:0 Read 0
8:0 Write 0
8:0 Sync 0
8:0 Async 0
8:0 Total 0
Total 0
*/
func splitBlkioStatLine(r rune) bool {
return r == ' ' || r == ':'
}
func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
var blkioStats []cgroups.BlkioStatEntry
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return blkioStats, nil
}
return nil, err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
// format: dev type amount
fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
if len(fields) < 3 {
if len(fields) == 2 && fields[0] == "Total" {
// skip total line
continue
} else {
return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
}
}
v, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, err
}
major := v
v, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
minor := v
op := ""
valueField := 2
if len(fields) == 4 {
op = fields[2]
valueField = 3
}
v, err = strconv.ParseUint(fields[valueField], 10, 64)
if err != nil {
return nil, err
}
blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
}
return blkioStats, nil
}
func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
// Try to read CFQ stats available on all CFQ enabled kernels first
if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
return getCFQStats(path, stats)
}
return getStats(path, stats) // Use generic stats as fallback
}
func getCFQStats(path string, stats *cgroups.Stats) error {
var blkioStats []cgroups.BlkioStatEntry
var err error
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
return err
}
stats.BlkioStats.SectorsRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
return err
}
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
return err
}
stats.BlkioStats.IoServicedRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
return err
}
stats.BlkioStats.IoQueuedRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
return err
}
stats.BlkioStats.IoServiceTimeRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
return err
}
stats.BlkioStats.IoWaitTimeRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
return err
}
stats.BlkioStats.IoMergedRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
return err
}
stats.BlkioStats.IoTimeRecursive = blkioStats
return nil
}
func getStats(path string, stats *cgroups.Stats) error {
var blkioStats []cgroups.BlkioStatEntry
var err error
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
return err
}
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
return err
}
stats.BlkioStats.IoServicedRecursive = blkioStats
return nil
}

View File

@ -1,117 +0,0 @@
// +build linux
package fs
import (
"bufio"
"os"
"path/filepath"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type CpuGroup struct {
}
func (s *CpuGroup) Name() string {
return "cpu"
}
func (s *CpuGroup) Apply(d *cgroupData) error {
// We always want to join the cpu group, to allow fair cpu scheduling
// on a container basis
path, err := d.path("cpu")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return s.ApplyDir(path, d.config, d.pid)
}
func (s *CpuGroup) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error {
// This might happen if we have no cpu cgroup mounted.
// Just do nothing and don't fail.
if path == "" {
return nil
}
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
// We should set the real-Time group scheduling settings before moving
// in the process because if the process is already in SCHED_RR mode
// and no RT bandwidth is set, adding it will fail.
if err := s.SetRtSched(path, cgroup); err != nil {
return err
}
// because we are not using d.join we need to place the pid into the procs file
// unlike the other subsystems
return cgroups.WriteCgroupProc(path, pid)
}
func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.CpuRtPeriod != 0 {
if err := writeFile(path, "cpu.rt_period_us", strconv.FormatUint(cgroup.Resources.CpuRtPeriod, 10)); err != nil {
return err
}
}
if cgroup.Resources.CpuRtRuntime != 0 {
if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil {
return err
}
}
return nil
}
func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.CpuShares != 0 {
if err := writeFile(path, "cpu.shares", strconv.FormatUint(cgroup.Resources.CpuShares, 10)); err != nil {
return err
}
}
if cgroup.Resources.CpuPeriod != 0 {
if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatUint(cgroup.Resources.CpuPeriod, 10)); err != nil {
return err
}
}
if cgroup.Resources.CpuQuota != 0 {
if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil {
return err
}
}
return s.SetRtSched(path, cgroup)
}
func (s *CpuGroup) Remove(d *cgroupData) error {
return removePath(d.path("cpu"))
}
func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
f, err := os.Open(filepath.Join(path, "cpu.stat"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
t, v, err := getCgroupParamKeyValue(sc.Text())
if err != nil {
return err
}
switch t {
case "nr_periods":
stats.CpuStats.ThrottlingData.Periods = v
case "nr_throttled":
stats.CpuStats.ThrottlingData.ThrottledPeriods = v
case "throttled_time":
stats.CpuStats.ThrottlingData.ThrottledTime = v
}
}
return nil
}

View File

@ -1,121 +0,0 @@
// +build linux
package fs
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
)
const (
cgroupCpuacctStat = "cpuacct.stat"
nanosecondsInSecond = 1000000000
)
var clockTicks = uint64(system.GetClockTicks())
type CpuacctGroup struct {
}
func (s *CpuacctGroup) Name() string {
return "cpuacct"
}
func (s *CpuacctGroup) Apply(d *cgroupData) error {
// we just want to join this group even though we don't set anything
if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
return nil
}
func (s *CpuacctGroup) Remove(d *cgroupData) error {
return removePath(d.path("cpuacct"))
}
func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
if err != nil {
return err
}
totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
if err != nil {
return err
}
percpuUsage, err := getPercpuUsage(path)
if err != nil {
return err
}
stats.CpuStats.CpuUsage.TotalUsage = totalUsage
stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
return nil
}
// Returns user and kernel usage breakdown in nanoseconds.
func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
userModeUsage := uint64(0)
kernelModeUsage := uint64(0)
const (
userField = "user"
systemField = "system"
)
// Expected format:
// user <usage in ticks>
// system <usage in ticks>
data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
if err != nil {
return 0, 0, err
}
fields := strings.Fields(string(data))
if len(fields) != 4 {
return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
}
if fields[0] != userField {
return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
}
if fields[2] != systemField {
return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
}
if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
return 0, 0, err
}
if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
return 0, 0, err
}
return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
}
func getPercpuUsage(path string) ([]uint64, error) {
percpuUsage := []uint64{}
data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
if err != nil {
return percpuUsage, err
}
for _, value := range strings.Fields(string(data)) {
value, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
}
percpuUsage = append(percpuUsage, value)
}
return percpuUsage, nil
}

View File

@ -1,159 +0,0 @@
// +build linux
package fs
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
)
type CpusetGroup struct {
}
func (s *CpusetGroup) Name() string {
return "cpuset"
}
func (s *CpusetGroup) Apply(d *cgroupData) error {
dir, err := d.path("cpuset")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return s.ApplyDir(dir, d.config, d.pid)
}
func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.CpusetCpus != "" {
if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
return err
}
}
if cgroup.Resources.CpusetMems != "" {
if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
return err
}
}
return nil
}
func (s *CpusetGroup) Remove(d *cgroupData) error {
return removePath(d.path("cpuset"))
}
func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}
func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
// This might happen if we have no cpuset cgroup mounted.
// Just do nothing and don't fail.
if dir == "" {
return nil
}
mountInfo, err := ioutil.ReadFile("/proc/self/mountinfo")
if err != nil {
return err
}
root := filepath.Dir(cgroups.GetClosestMountpointAncestor(dir, string(mountInfo)))
// 'ensureParent' start with parent because we don't want to
// explicitly inherit from parent, it could conflict with
// 'cpuset.cpu_exclusive'.
if err := s.ensureParent(filepath.Dir(dir), root); err != nil {
return err
}
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
// We didn't inherit cpuset configs from parent, but we have
// to ensure cpuset configs are set before moving task into the
// cgroup.
// The logic is, if user specified cpuset configs, use these
// specified configs, otherwise, inherit from parent. This makes
// cpuset configs work correctly with 'cpuset.cpu_exclusive', and
// keep backward compatibility.
if err := s.ensureCpusAndMems(dir, cgroup); err != nil {
return err
}
// because we are not using d.join we need to place the pid into the procs file
// unlike the other subsystems
return cgroups.WriteCgroupProc(dir, pid)
}
func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
return
}
if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
return
}
return cpus, mems, nil
}
// ensureParent makes sure that the parent directory of current is created
// and populated with the proper cpus and mems files copied from
// it's parent.
func (s *CpusetGroup) ensureParent(current, root string) error {
parent := filepath.Dir(current)
if libcontainerUtils.CleanPath(parent) == root {
return nil
}
// Avoid infinite recursion.
if parent == current {
return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
}
if err := s.ensureParent(parent, root); err != nil {
return err
}
if err := os.MkdirAll(current, 0755); err != nil {
return err
}
return s.copyIfNeeded(current, parent)
}
// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
// directory to the current directory if the file's contents are 0
func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
var (
err error
currentCpus, currentMems []byte
parentCpus, parentMems []byte
)
if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
return err
}
if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
return err
}
if s.isEmpty(currentCpus) {
if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
return err
}
}
if s.isEmpty(currentMems) {
if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
return err
}
}
return nil
}
func (s *CpusetGroup) isEmpty(b []byte) bool {
return len(bytes.Trim(b, "\n")) == 0
}
func (s *CpusetGroup) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error {
if err := s.Set(path, cgroup); err != nil {
return err
}
return s.copyIfNeeded(path, filepath.Dir(path))
}

View File

@ -1,80 +0,0 @@
// +build linux
package fs
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
)
type DevicesGroup struct {
}
func (s *DevicesGroup) Name() string {
return "devices"
}
func (s *DevicesGroup) Apply(d *cgroupData) error {
_, err := d.join("devices")
if err != nil {
// We will return error even it's `not found` error, devices
// cgroup is hard requirement for container's security.
return err
}
return nil
}
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
if system.RunningInUserNS() {
return nil
}
devices := cgroup.Resources.Devices
if len(devices) > 0 {
for _, dev := range devices {
file := "devices.deny"
if dev.Allow {
file = "devices.allow"
}
if err := writeFile(path, file, dev.CgroupString()); err != nil {
return err
}
}
return nil
}
if cgroup.Resources.AllowAllDevices != nil {
if *cgroup.Resources.AllowAllDevices == false {
if err := writeFile(path, "devices.deny", "a"); err != nil {
return err
}
for _, dev := range cgroup.Resources.AllowedDevices {
if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
return err
}
}
return nil
}
if err := writeFile(path, "devices.allow", "a"); err != nil {
return err
}
}
for _, dev := range cgroup.Resources.DeniedDevices {
if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
return err
}
}
return nil
}
func (s *DevicesGroup) Remove(d *cgroupData) error {
return removePath(d.path("devices"))
}
func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}

View File

@ -1,66 +0,0 @@
// +build linux
package fs
import (
"fmt"
"strings"
"time"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type FreezerGroup struct {
}
func (s *FreezerGroup) Name() string {
return "freezer"
}
func (s *FreezerGroup) Apply(d *cgroupData) error {
_, err := d.join("freezer")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
switch cgroup.Resources.Freezer {
case configs.Frozen, configs.Thawed:
for {
// In case this loop does not exit because it doesn't get the expected
// state, let's write again this state, hoping it's going to be properly
// set this time. Otherwise, this loop could run infinitely, waiting for
// a state change that would never happen.
if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil {
return err
}
state, err := readFile(path, "freezer.state")
if err != nil {
return err
}
if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) {
break
}
time.Sleep(1 * time.Millisecond)
}
case configs.Undefined:
return nil
default:
return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
}
return nil
}
func (s *FreezerGroup) Remove(d *cgroupData) error {
return removePath(d.path("freezer"))
}
func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}

View File

@ -1,3 +0,0 @@
// +build !linux
package fs

View File

@ -1,71 +0,0 @@
// +build linux
package fs
import (
"fmt"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type HugetlbGroup struct {
}
func (s *HugetlbGroup) Name() string {
return "hugetlb"
}
func (s *HugetlbGroup) Apply(d *cgroupData) error {
_, err := d.join("hugetlb")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
for _, hugetlb := range cgroup.Resources.HugetlbLimit {
if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
return err
}
}
return nil
}
func (s *HugetlbGroup) Remove(d *cgroupData) error {
return removePath(d.path("hugetlb"))
}
func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
hugetlbStats := cgroups.HugetlbStats{}
for _, pageSize := range HugePageSizes {
usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
value, err := getCgroupParamUint(path, usage)
if err != nil {
return fmt.Errorf("failed to parse %s - %v", usage, err)
}
hugetlbStats.Usage = value
maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".")
value, err = getCgroupParamUint(path, maxUsage)
if err != nil {
return fmt.Errorf("failed to parse %s - %v", maxUsage, err)
}
hugetlbStats.MaxUsage = value
failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".")
value, err = getCgroupParamUint(path, failcnt)
if err != nil {
return fmt.Errorf("failed to parse %s - %v", failcnt, err)
}
hugetlbStats.Failcnt = value
stats.HugetlbStats[pageSize] = hugetlbStats
}
return nil
}

View File

@ -1,62 +0,0 @@
// +build linux,!nokmem
package fs
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"syscall" // for Errno type only
"github.com/opencontainers/runc/libcontainer/cgroups"
"golang.org/x/sys/unix"
)
const cgroupKernelMemoryLimit = "memory.kmem.limit_in_bytes"
func EnableKernelMemoryAccounting(path string) error {
// Ensure that kernel memory is available in this kernel build. If it
// isn't, we just ignore it because EnableKernelMemoryAccounting is
// automatically called for all memory limits.
if !cgroups.PathExists(filepath.Join(path, cgroupKernelMemoryLimit)) {
return nil
}
// We have to limit the kernel memory here as it won't be accounted at all
// until a limit is set on the cgroup and limit cannot be set once the
// cgroup has children, or if there are already tasks in the cgroup.
for _, i := range []int64{1, -1} {
if err := setKernelMemory(path, i); err != nil {
return err
}
}
return nil
}
func setKernelMemory(path string, kernelMemoryLimit int64) error {
if path == "" {
return fmt.Errorf("no such directory for %s", cgroupKernelMemoryLimit)
}
if !cgroups.PathExists(filepath.Join(path, cgroupKernelMemoryLimit)) {
// We have specifically been asked to set a kmem limit. If the kernel
// doesn't support it we *must* error out.
return errors.New("kernel memory accounting not supported by this kernel")
}
if err := ioutil.WriteFile(filepath.Join(path, cgroupKernelMemoryLimit), []byte(strconv.FormatInt(kernelMemoryLimit, 10)), 0700); err != nil {
// Check if the error number returned by the syscall is "EBUSY"
// The EBUSY signal is returned on attempts to write to the
// memory.kmem.limit_in_bytes file if the cgroup has children or
// once tasks have been attached to the cgroup
if pathErr, ok := err.(*os.PathError); ok {
if errNo, ok := pathErr.Err.(syscall.Errno); ok {
if errNo == unix.EBUSY {
return fmt.Errorf("failed to set %s, because either tasks have already joined this cgroup or it has children", cgroupKernelMemoryLimit)
}
}
}
return fmt.Errorf("failed to write %v to %v: %v", kernelMemoryLimit, cgroupKernelMemoryLimit, err)
}
return nil
}

View File

@ -1,15 +0,0 @@
// +build linux,nokmem
package fs
import (
"errors"
)
func EnableKernelMemoryAccounting(path string) error {
return nil
}
func setKernelMemory(path string, kernelMemoryLimit int64) error {
return errors.New("kernel memory accounting disabled in this runc build")
}

View File

@ -1,270 +0,0 @@
// +build linux
package fs
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
const (
cgroupMemorySwapLimit = "memory.memsw.limit_in_bytes"
cgroupMemoryLimit = "memory.limit_in_bytes"
)
type MemoryGroup struct {
}
func (s *MemoryGroup) Name() string {
return "memory"
}
func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
path, err := d.path("memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
} else if path == "" {
return nil
}
if memoryAssigned(d.config) {
if _, err := os.Stat(path); os.IsNotExist(err) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
// Only enable kernel memory accouting when this cgroup
// is created by libcontainer, otherwise we might get
// error when people use `cgroupsPath` to join an existed
// cgroup whose kernel memory is not initialized.
if err := EnableKernelMemoryAccounting(path); err != nil {
return err
}
}
}
defer func() {
if err != nil {
os.RemoveAll(path)
}
}()
// We need to join memory cgroup after set memory limits, because
// kmem.limit_in_bytes can only be set when the cgroup is empty.
_, err = d.join("memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
// If the memory update is set to -1 we should also
// set swap to -1, it means unlimited memory.
if cgroup.Resources.Memory == -1 {
// Only set swap if it's enabled in kernel
if cgroups.PathExists(filepath.Join(path, cgroupMemorySwapLimit)) {
cgroup.Resources.MemorySwap = -1
}
}
// When memory and swap memory are both set, we need to handle the cases
// for updating container.
if cgroup.Resources.Memory != 0 && cgroup.Resources.MemorySwap != 0 {
memoryUsage, err := getMemoryData(path, "")
if err != nil {
return err
}
// When update memory limit, we should adapt the write sequence
// for memory and swap memory, so it won't fail because the new
// value and the old value don't fit kernel's validation.
if cgroup.Resources.MemorySwap == -1 || memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) {
if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
return err
}
if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
return err
}
} else {
if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
return err
}
if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
return err
}
}
} else {
if cgroup.Resources.Memory != 0 {
if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
return err
}
}
if cgroup.Resources.MemorySwap != 0 {
if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
return err
}
}
}
return nil
}
func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
if err := setMemoryAndSwap(path, cgroup); err != nil {
return err
}
if cgroup.Resources.KernelMemory != 0 {
if err := setKernelMemory(path, cgroup.Resources.KernelMemory); err != nil {
return err
}
}
if cgroup.Resources.MemoryReservation != 0 {
if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
return err
}
}
if cgroup.Resources.KernelMemoryTCP != 0 {
if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
return err
}
}
if cgroup.Resources.OomKillDisable {
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
return err
}
}
if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
return nil
} else if *cgroup.Resources.MemorySwappiness <= 100 {
if err := writeFile(path, "memory.swappiness", strconv.FormatUint(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
return err
}
} else {
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", *cgroup.Resources.MemorySwappiness)
}
return nil
}
func (s *MemoryGroup) Remove(d *cgroupData) error {
return removePath(d.path("memory"))
}
func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
// Set stats from memory.stat.
statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer statsFile.Close()
sc := bufio.NewScanner(statsFile)
for sc.Scan() {
t, v, err := getCgroupParamKeyValue(sc.Text())
if err != nil {
return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
}
stats.MemoryStats.Stats[t] = v
}
stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
memoryUsage, err := getMemoryData(path, "")
if err != nil {
return err
}
stats.MemoryStats.Usage = memoryUsage
swapUsage, err := getMemoryData(path, "memsw")
if err != nil {
return err
}
stats.MemoryStats.SwapUsage = swapUsage
kernelUsage, err := getMemoryData(path, "kmem")
if err != nil {
return err
}
stats.MemoryStats.KernelUsage = kernelUsage
kernelTCPUsage, err := getMemoryData(path, "kmem.tcp")
if err != nil {
return err
}
stats.MemoryStats.KernelTCPUsage = kernelTCPUsage
useHierarchy := strings.Join([]string{"memory", "use_hierarchy"}, ".")
value, err := getCgroupParamUint(path, useHierarchy)
if err != nil {
return err
}
if value == 1 {
stats.MemoryStats.UseHierarchy = true
}
return nil
}
func memoryAssigned(cgroup *configs.Cgroup) bool {
return cgroup.Resources.Memory != 0 ||
cgroup.Resources.MemoryReservation != 0 ||
cgroup.Resources.MemorySwap > 0 ||
cgroup.Resources.KernelMemory > 0 ||
cgroup.Resources.KernelMemoryTCP > 0 ||
cgroup.Resources.OomKillDisable ||
(cgroup.Resources.MemorySwappiness != nil && int64(*cgroup.Resources.MemorySwappiness) != -1)
}
func getMemoryData(path, name string) (cgroups.MemoryData, error) {
memoryData := cgroups.MemoryData{}
moduleName := "memory"
if name != "" {
moduleName = strings.Join([]string{"memory", name}, ".")
}
usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
value, err := getCgroupParamUint(path, usage)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
}
memoryData.Usage = value
value, err = getCgroupParamUint(path, maxUsage)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
}
memoryData.MaxUsage = value
value, err = getCgroupParamUint(path, failcnt)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
}
memoryData.Failcnt = value
value, err = getCgroupParamUint(path, limit)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
}
memoryData.Limit = value
return memoryData, nil
}

View File

@ -1,40 +0,0 @@
// +build linux
package fs
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type NameGroup struct {
GroupName string
Join bool
}
func (s *NameGroup) Name() string {
return s.GroupName
}
func (s *NameGroup) Apply(d *cgroupData) error {
if s.Join {
// ignore errors if the named cgroup does not exist
d.join(s.GroupName)
}
return nil
}
func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
return nil
}
func (s *NameGroup) Remove(d *cgroupData) error {
if s.Join {
removePath(d.path(s.GroupName))
}
return nil
}
func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}

View File

@ -1,43 +0,0 @@
// +build linux
package fs
import (
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type NetClsGroup struct {
}
func (s *NetClsGroup) Name() string {
return "net_cls"
}
func (s *NetClsGroup) Apply(d *cgroupData) error {
_, err := d.join("net_cls")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.NetClsClassid != 0 {
if err := writeFile(path, "net_cls.classid", strconv.FormatUint(uint64(cgroup.Resources.NetClsClassid), 10)); err != nil {
return err
}
}
return nil
}
func (s *NetClsGroup) Remove(d *cgroupData) error {
return removePath(d.path("net_cls"))
}
func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}

View File

@ -1,41 +0,0 @@
// +build linux
package fs
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type NetPrioGroup struct {
}
func (s *NetPrioGroup) Name() string {
return "net_prio"
}
func (s *NetPrioGroup) Apply(d *cgroupData) error {
_, err := d.join("net_prio")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
for _, prioMap := range cgroup.Resources.NetPrioIfpriomap {
if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
return err
}
}
return nil
}
func (s *NetPrioGroup) Remove(d *cgroupData) error {
return removePath(d.path("net_prio"))
}
func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}

View File

@ -1,35 +0,0 @@
// +build linux
package fs
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type PerfEventGroup struct {
}
func (s *PerfEventGroup) Name() string {
return "perf_event"
}
func (s *PerfEventGroup) Apply(d *cgroupData) error {
// we just want to join this group even though we don't set anything
if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
return nil
}
func (s *PerfEventGroup) Remove(d *cgroupData) error {
return removePath(d.path("perf_event"))
}
func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}

View File

@ -1,73 +0,0 @@
// +build linux
package fs
import (
"fmt"
"path/filepath"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type PidsGroup struct {
}
func (s *PidsGroup) Name() string {
return "pids"
}
func (s *PidsGroup) Apply(d *cgroupData) error {
_, err := d.join("pids")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.PidsLimit != 0 {
// "max" is the fallback value.
limit := "max"
if cgroup.Resources.PidsLimit > 0 {
limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
}
if err := writeFile(path, "pids.max", limit); err != nil {
return err
}
}
return nil
}
func (s *PidsGroup) Remove(d *cgroupData) error {
return removePath(d.path("pids"))
}
func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
current, err := getCgroupParamUint(path, "pids.current")
if err != nil {
return fmt.Errorf("failed to parse pids.current - %s", err)
}
maxString, err := getCgroupParamString(path, "pids.max")
if err != nil {
return fmt.Errorf("failed to parse pids.max - %s", err)
}
// Default if pids.max == "max" is 0 -- which represents "no limit".
var max uint64
if maxString != "max" {
max, err = parseUint(maxString, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max"))
}
}
stats.PidsStats.Current = current
stats.PidsStats.Limit = max
return nil
}

View File

@ -1,78 +0,0 @@
// +build linux
package fs
import (
"errors"
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
var (
ErrNotValidFormat = errors.New("line is not a valid key value format")
)
// Saturates negative values at zero and returns a uint64.
// Due to kernel bugs, some of the memory cgroup stats can be negative.
func parseUint(s string, base, bitSize int) (uint64, error) {
value, err := strconv.ParseUint(s, base, bitSize)
if err != nil {
intValue, intErr := strconv.ParseInt(s, base, bitSize)
// 1. Handle negative values greater than MinInt64 (and)
// 2. Handle negative values lesser than MinInt64
if intErr == nil && intValue < 0 {
return 0, nil
} else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
return 0, nil
}
return value, err
}
return value, nil
}
// Parses a cgroup param and returns as name, value
// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
func getCgroupParamKeyValue(t string) (string, uint64, error) {
parts := strings.Fields(t)
switch len(parts) {
case 2:
value, err := parseUint(parts[1], 10, 64)
if err != nil {
return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err)
}
return parts[0], value, nil
default:
return "", 0, ErrNotValidFormat
}
}
// Gets a single uint64 value from the specified cgroup file.
func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
fileName := filepath.Join(cgroupPath, cgroupFile)
contents, err := ioutil.ReadFile(fileName)
if err != nil {
return 0, err
}
res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64)
if err != nil {
return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName)
}
return res, nil
}
// Gets a string value from the specified cgroup file
func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) {
contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
if err != nil {
return "", err
}
return strings.TrimSpace(string(contents)), nil
}

View File

@ -1,108 +0,0 @@
// +build linux
package cgroups
type ThrottlingData struct {
// Number of periods with throttling active
Periods uint64 `json:"periods,omitempty"`
// Number of periods when the container hit its throttling limit.
ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
// Aggregate time the container was throttled for in nanoseconds.
ThrottledTime uint64 `json:"throttled_time,omitempty"`
}
// CpuUsage denotes the usage of a CPU.
// All CPU stats are aggregate since container inception.
type CpuUsage struct {
// Total CPU time consumed.
// Units: nanoseconds.
TotalUsage uint64 `json:"total_usage,omitempty"`
// Total CPU time consumed per core.
// Units: nanoseconds.
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
// Time spent by tasks of the cgroup in kernel mode.
// Units: nanoseconds.
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
// Time spent by tasks of the cgroup in user mode.
// Units: nanoseconds.
UsageInUsermode uint64 `json:"usage_in_usermode"`
}
type CpuStats struct {
CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}
type MemoryData struct {
Usage uint64 `json:"usage,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty"`
Failcnt uint64 `json:"failcnt"`
Limit uint64 `json:"limit"`
}
type MemoryStats struct {
// memory used for cache
Cache uint64 `json:"cache,omitempty"`
// usage of memory
Usage MemoryData `json:"usage,omitempty"`
// usage of memory + swap
SwapUsage MemoryData `json:"swap_usage,omitempty"`
// usage of kernel memory
KernelUsage MemoryData `json:"kernel_usage,omitempty"`
// usage of kernel TCP memory
KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
// if true, memory usage is accounted for throughout a hierarchy of cgroups.
UseHierarchy bool `json:"use_hierarchy"`
Stats map[string]uint64 `json:"stats,omitempty"`
}
type PidsStats struct {
// number of pids in the cgroup
Current uint64 `json:"current,omitempty"`
// active pids hard limit
Limit uint64 `json:"limit,omitempty"`
}
type BlkioStatEntry struct {
Major uint64 `json:"major,omitempty"`
Minor uint64 `json:"minor,omitempty"`
Op string `json:"op,omitempty"`
Value uint64 `json:"value,omitempty"`
}
type BlkioStats struct {
// number of bytes tranferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
}
type HugetlbStats struct {
// current res_counter usage for hugetlb
Usage uint64 `json:"usage,omitempty"`
// maximum usage ever recorded.
MaxUsage uint64 `json:"max_usage,omitempty"`
// number of times hugetlb usage allocation failure.
Failcnt uint64 `json:"failcnt"`
}
type Stats struct {
CpuStats CpuStats `json:"cpu_stats,omitempty"`
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
PidsStats PidsStats `json:"pids_stats,omitempty"`
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
// the map is in the format "size of hugepage: stats of the hugepage"
HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
}
func NewStats() *Stats {
memoryStats := MemoryStats{Stats: make(map[string]uint64)}
hugetlbStats := make(map[string]HugetlbStats)
return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
}

View File

@ -1,59 +0,0 @@
// +build !linux static_build
package systemd
import (
"fmt"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type Manager struct {
Cgroups *configs.Cgroup
Paths map[string]string
}
func UseSystemd() bool {
return false
}
func NewSystemdCgroupsManager() (func(config *configs.Cgroup, paths map[string]string) cgroups.Manager, error) {
return nil, fmt.Errorf("Systemd not supported")
}
func (m *Manager) Apply(pid int) error {
return fmt.Errorf("Systemd not supported")
}
func (m *Manager) GetPids() ([]int, error) {
return nil, fmt.Errorf("Systemd not supported")
}
func (m *Manager) GetAllPids() ([]int, error) {
return nil, fmt.Errorf("Systemd not supported")
}
func (m *Manager) Destroy() error {
return fmt.Errorf("Systemd not supported")
}
func (m *Manager) GetPaths() map[string]string {
return nil
}
func (m *Manager) GetStats() (*cgroups.Stats, error) {
return nil, fmt.Errorf("Systemd not supported")
}
func (m *Manager) Set(container *configs.Config) error {
return fmt.Errorf("Systemd not supported")
}
func (m *Manager) Freeze(state configs.FreezerState) error {
return fmt.Errorf("Systemd not supported")
}
func Freeze(c *configs.Cgroup, state configs.FreezerState) error {
return fmt.Errorf("Systemd not supported")
}

View File

@ -1,574 +0,0 @@
// +build linux,!static_build
package systemd
import (
"errors"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
"strings"
"sync"
"time"
systemdDbus "github.com/coreos/go-systemd/dbus"
systemdUtil "github.com/coreos/go-systemd/util"
"github.com/godbus/dbus"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
)
type Manager struct {
mu sync.Mutex
Cgroups *configs.Cgroup
Paths map[string]string
}
type subsystem interface {
// Name returns the name of the subsystem.
Name() string
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
GetStats(path string, stats *cgroups.Stats) error
// Set the cgroup represented by cgroup.
Set(path string, cgroup *configs.Cgroup) error
}
var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
type subsystemSet []subsystem
func (s subsystemSet) Get(name string) (subsystem, error) {
for _, ss := range s {
if ss.Name() == name {
return ss, nil
}
}
return nil, errSubsystemDoesNotExist
}
var subsystems = subsystemSet{
&fs.CpusetGroup{},
&fs.DevicesGroup{},
&fs.MemoryGroup{},
&fs.CpuGroup{},
&fs.CpuacctGroup{},
&fs.PidsGroup{},
&fs.BlkioGroup{},
&fs.HugetlbGroup{},
&fs.PerfEventGroup{},
&fs.FreezerGroup{},
&fs.NetPrioGroup{},
&fs.NetClsGroup{},
&fs.NameGroup{GroupName: "name=systemd"},
}
const (
testScopeWait = 4
testSliceWait = 4
)
var (
connLock sync.Mutex
theConn *systemdDbus.Conn
hasStartTransientUnit bool
hasStartTransientSliceUnit bool
hasDelegateSlice bool
)
func newProp(name string, units interface{}) systemdDbus.Property {
return systemdDbus.Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
func UseSystemd() bool {
if !systemdUtil.IsRunningSystemd() {
return false
}
connLock.Lock()
defer connLock.Unlock()
if theConn == nil {
var err error
theConn, err = systemdDbus.New()
if err != nil {
return false
}
// Assume we have StartTransientUnit
hasStartTransientUnit = true
// But if we get UnknownMethod error we don't
if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
hasStartTransientUnit = false
return hasStartTransientUnit
}
}
}
// Assume we have the ability to start a transient unit as a slice
// This was broken until systemd v229, but has been back-ported on RHEL environments >= 219
// For details, see: https://bugzilla.redhat.com/show_bug.cgi?id=1370299
hasStartTransientSliceUnit = true
// To ensure simple clean-up, we create a slice off the root with no hierarchy
slice := fmt.Sprintf("libcontainer_%d_systemd_test_default.slice", os.Getpid())
if _, err := theConn.StartTransientUnit(slice, "replace", nil, nil); err != nil {
if _, ok := err.(dbus.Error); ok {
hasStartTransientSliceUnit = false
}
}
for i := 0; i <= testSliceWait; i++ {
if _, err := theConn.StopUnit(slice, "replace", nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
hasStartTransientSliceUnit = false
break
}
}
} else {
break
}
time.Sleep(time.Millisecond)
}
// Not critical because of the stop unit logic above.
theConn.StopUnit(slice, "replace", nil)
// Assume StartTransientUnit on a slice allows Delegate
hasDelegateSlice = true
dlSlice := newProp("Delegate", true)
if _, err := theConn.StartTransientUnit(slice, "replace", []systemdDbus.Property{dlSlice}, nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
// Starting with systemd v237, Delegate is not even a property of slices anymore,
// so the D-Bus call fails with "InvalidArgs" error.
if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") {
hasDelegateSlice = false
}
}
}
// Not critical because of the stop unit logic above.
theConn.StopUnit(slice, "replace", nil)
}
return hasStartTransientUnit
}
func NewSystemdCgroupsManager() (func(config *configs.Cgroup, paths map[string]string) cgroups.Manager, error) {
if !systemdUtil.IsRunningSystemd() {
return nil, fmt.Errorf("systemd not running on this host, can't use systemd as a cgroups.Manager")
}
return func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
return &Manager{
Cgroups: config,
Paths: paths,
}
}, nil
}
func (m *Manager) Apply(pid int) error {
var (
c = m.Cgroups
unitName = getUnitName(c)
slice = "system.slice"
properties []systemdDbus.Property
)
if c.Paths != nil {
paths := make(map[string]string)
for name, path := range c.Paths {
_, err := getSubsystemPath(m.Cgroups, name)
if err != nil {
// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
if cgroups.IsNotFound(err) {
continue
}
return err
}
paths[name] = path
}
m.Paths = paths
return cgroups.EnterPid(m.Paths, pid)
}
if c.Parent != "" {
slice = c.Parent
}
properties = append(properties, systemdDbus.PropDescription("libcontainer container "+c.Name))
// if we create a slice, the parent is defined via a Wants=
if strings.HasSuffix(unitName, ".slice") {
// This was broken until systemd v229, but has been back-ported on RHEL environments >= 219
if !hasStartTransientSliceUnit {
return fmt.Errorf("systemd version does not support ability to start a slice as transient unit")
}
properties = append(properties, systemdDbus.PropWants(slice))
} else {
// otherwise, we use Slice=
properties = append(properties, systemdDbus.PropSlice(slice))
}
// only add pid if its valid, -1 is used w/ general slice creation.
if pid != -1 {
properties = append(properties, newProp("PIDs", []uint32{uint32(pid)}))
}
// Check if we can delegate. This is only supported on systemd versions 218 and above.
if strings.HasSuffix(unitName, ".slice") {
if hasDelegateSlice {
// systemd 237 and above no longer allows delegation on a slice
properties = append(properties, newProp("Delegate", true))
}
} else {
// Assume scopes always support delegation.
properties = append(properties, newProp("Delegate", true))
}
// Always enable accounting, this gets us the same behaviour as the fs implementation,
// plus the kernel has some problems with joining the memory cgroup at a later time.
properties = append(properties,
newProp("MemoryAccounting", true),
newProp("CPUAccounting", true),
newProp("BlockIOAccounting", true))
// Assume DefaultDependencies= will always work (the check for it was previously broken.)
properties = append(properties,
newProp("DefaultDependencies", false))
if c.Resources.Memory != 0 {
properties = append(properties,
newProp("MemoryLimit", uint64(c.Resources.Memory)))
}
if c.Resources.CpuShares != 0 {
properties = append(properties,
newProp("CPUShares", c.Resources.CpuShares))
}
// cpu.cfs_quota_us and cpu.cfs_period_us are controlled by systemd.
if c.Resources.CpuQuota != 0 && c.Resources.CpuPeriod != 0 {
// corresponds to USEC_INFINITY in systemd
// if USEC_INFINITY is provided, CPUQuota is left unbound by systemd
// always setting a property value ensures we can apply a quota and remove it later
cpuQuotaPerSecUSec := uint64(math.MaxUint64)
if c.Resources.CpuQuota > 0 {
// systemd converts CPUQuotaPerSecUSec (microseconds per CPU second) to CPUQuota
// (integer percentage of CPU) internally. This means that if a fractional percent of
// CPU is indicated by Resources.CpuQuota, we need to round up to the nearest
// 10ms (1% of a second) such that child cgroups can set the cpu.cfs_quota_us they expect.
cpuQuotaPerSecUSec = uint64(c.Resources.CpuQuota*1000000) / c.Resources.CpuPeriod
if cpuQuotaPerSecUSec%10000 != 0 {
cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000
}
}
properties = append(properties,
newProp("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec))
}
if c.Resources.BlkioWeight != 0 {
properties = append(properties,
newProp("BlockIOWeight", uint64(c.Resources.BlkioWeight)))
}
if c.Resources.PidsLimit > 0 {
properties = append(properties,
newProp("TasksAccounting", true),
newProp("TasksMax", uint64(c.Resources.PidsLimit)))
}
// We have to set kernel memory here, as we can't change it once
// processes have been attached to the cgroup.
if c.Resources.KernelMemory != 0 {
if err := setKernelMemory(c); err != nil {
return err
}
}
statusChan := make(chan string, 1)
if _, err := theConn.StartTransientUnit(unitName, "replace", properties, statusChan); err == nil {
select {
case <-statusChan:
case <-time.After(time.Second):
logrus.Warnf("Timed out while waiting for StartTransientUnit(%s) completion signal from dbus. Continuing...", unitName)
}
} else if !isUnitExists(err) {
return err
}
if err := joinCgroups(c, pid); err != nil {
return err
}
paths := make(map[string]string)
for _, s := range subsystems {
subsystemPath, err := getSubsystemPath(m.Cgroups, s.Name())
if err != nil {
// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
if cgroups.IsNotFound(err) {
continue
}
return err
}
paths[s.Name()] = subsystemPath
}
m.Paths = paths
return nil
}
func (m *Manager) Destroy() error {
if m.Cgroups.Paths != nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
theConn.StopUnit(getUnitName(m.Cgroups), "replace", nil)
if err := cgroups.RemovePaths(m.Paths); err != nil {
return err
}
m.Paths = make(map[string]string)
return nil
}
func (m *Manager) GetPaths() map[string]string {
m.mu.Lock()
paths := m.Paths
m.mu.Unlock()
return paths
}
func join(c *configs.Cgroup, subsystem string, pid int) (string, error) {
path, err := getSubsystemPath(c, subsystem)
if err != nil {
return "", err
}
if err := os.MkdirAll(path, 0755); err != nil {
return "", err
}
if err := cgroups.WriteCgroupProc(path, pid); err != nil {
return "", err
}
return path, nil
}
func joinCgroups(c *configs.Cgroup, pid int) error {
for _, sys := range subsystems {
name := sys.Name()
switch name {
case "name=systemd":
// let systemd handle this
case "cpuset":
path, err := getSubsystemPath(c, name)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
s := &fs.CpusetGroup{}
if err := s.ApplyDir(path, c, pid); err != nil {
return err
}
default:
_, err := join(c, name, pid)
if err != nil {
// Even if it's `not found` error, we'll return err
// because devices cgroup is hard requirement for
// container security.
if name == "devices" {
return err
}
// For other subsystems, omit the `not found` error
// because they are optional.
if !cgroups.IsNotFound(err) {
return err
}
}
}
}
return nil
}
// systemd represents slice hierarchy using `-`, so we need to follow suit when
// generating the path of slice. Essentially, test-a-b.slice becomes
// /test.slice/test-a.slice/test-a-b.slice.
func ExpandSlice(slice string) (string, error) {
suffix := ".slice"
// Name has to end with ".slice", but can't be just ".slice".
if len(slice) < len(suffix) || !strings.HasSuffix(slice, suffix) {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
// Path-separators are not allowed.
if strings.Contains(slice, "/") {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
var path, prefix string
sliceName := strings.TrimSuffix(slice, suffix)
// if input was -.slice, we should just return root now
if sliceName == "-" {
return "/", nil
}
for _, component := range strings.Split(sliceName, "-") {
// test--a.slice isn't permitted, nor is -test.slice.
if component == "" {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
// Append the component to the path and to the prefix.
path += "/" + prefix + component + suffix
prefix += component + "-"
}
return path, nil
}
func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) {
mountpoint, err := cgroups.FindCgroupMountpoint(c.Path, subsystem)
if err != nil {
return "", err
}
initPath, err := cgroups.GetInitCgroup(subsystem)
if err != nil {
return "", err
}
// if pid 1 is systemd 226 or later, it will be in init.scope, not the root
initPath = strings.TrimSuffix(filepath.Clean(initPath), "init.scope")
slice := "system.slice"
if c.Parent != "" {
slice = c.Parent
}
slice, err = ExpandSlice(slice)
if err != nil {
return "", err
}
return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
}
func (m *Manager) Freeze(state configs.FreezerState) error {
path, err := getSubsystemPath(m.Cgroups, "freezer")
if err != nil {
return err
}
prevState := m.Cgroups.Resources.Freezer
m.Cgroups.Resources.Freezer = state
freezer, err := subsystems.Get("freezer")
if err != nil {
return err
}
err = freezer.Set(path, m.Cgroups)
if err != nil {
m.Cgroups.Resources.Freezer = prevState
return err
}
return nil
}
func (m *Manager) GetPids() ([]int, error) {
path, err := getSubsystemPath(m.Cgroups, "devices")
if err != nil {
return nil, err
}
return cgroups.GetPids(path)
}
func (m *Manager) GetAllPids() ([]int, error) {
path, err := getSubsystemPath(m.Cgroups, "devices")
if err != nil {
return nil, err
}
return cgroups.GetAllPids(path)
}
func (m *Manager) GetStats() (*cgroups.Stats, error) {
m.mu.Lock()
defer m.mu.Unlock()
stats := cgroups.NewStats()
for name, path := range m.Paths {
sys, err := subsystems.Get(name)
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
continue
}
if err := sys.GetStats(path, stats); err != nil {
return nil, err
}
}
return stats, nil
}
func (m *Manager) Set(container *configs.Config) error {
// If Paths are set, then we are just joining cgroups paths
// and there is no need to set any values.
if m.Cgroups.Paths != nil {
return nil
}
for _, sys := range subsystems {
// Get the subsystem path, but don't error out for not found cgroups.
path, err := getSubsystemPath(container.Cgroups, sys.Name())
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := sys.Set(path, container.Cgroups); err != nil {
return err
}
}
if m.Paths["cpu"] != "" {
if err := fs.CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
return err
}
}
return nil
}
func getUnitName(c *configs.Cgroup) string {
// by default, we create a scope unless the user explicitly asks for a slice.
if !strings.HasSuffix(c.Name, ".slice") {
return fmt.Sprintf("%s-%s.scope", c.ScopePrefix, c.Name)
}
return c.Name
}
func setKernelMemory(c *configs.Cgroup) error {
path, err := getSubsystemPath(c, "memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
// do not try to enable the kernel memory if we already have
// tasks in the cgroup.
content, err := ioutil.ReadFile(filepath.Join(path, "tasks"))
if err != nil {
return err
}
if len(content) > 0 {
return nil
}
return fs.EnableKernelMemoryAccounting(path)
}
// isUnitExists returns true if the error is that a systemd unit already exists.
func isUnitExists(err error) bool {
if err != nil {
if dbusError, ok := err.(dbus.Error); ok {
return strings.Contains(dbusError.Name, "org.freedesktop.systemd1.UnitExists")
}
}
return false
}

View File

@ -1,517 +0,0 @@
// +build linux
package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
units "github.com/docker/go-units"
"golang.org/x/sys/unix"
)
const (
CgroupNamePrefix = "name="
CgroupProcesses = "cgroup.procs"
)
// HugePageSizeUnitList is a list of the units used by the linux kernel when
// naming the HugePage control files.
// https://www.kernel.org/doc/Documentation/cgroup-v1/hugetlb.txt
// TODO Since the kernel only use KB, MB and GB; TB and PB should be removed,
// depends on https://github.com/docker/go-units/commit/a09cd47f892041a4fac473133d181f5aea6fa393
var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
return mnt, err
}
func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) {
// We are not using mount.GetMounts() because it's super-inefficient,
// parsing it directly sped up x10 times because of not using Sscanf.
// It was one of two major performance drawbacks in container start.
if !isSubsystemAvailable(subsystem) {
return "", "", NewNotFoundError(subsystem)
}
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", "", err
}
defer f.Close()
return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem)
}
func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
txt := scanner.Text()
fields := strings.Fields(txt)
if len(fields) < 5 {
continue
}
if strings.HasPrefix(fields[4], cgroupPath) {
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
if opt == subsystem {
return fields[4], fields[3], nil
}
}
}
}
if err := scanner.Err(); err != nil {
return "", "", err
}
return "", "", NewNotFoundError(subsystem)
}
func isSubsystemAvailable(subsystem string) bool {
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
return false
}
_, avail := cgroups[subsystem]
return avail
}
func GetClosestMountpointAncestor(dir, mountinfo string) string {
deepestMountPoint := ""
for _, mountInfoEntry := range strings.Split(mountinfo, "\n") {
mountInfoParts := strings.Fields(mountInfoEntry)
if len(mountInfoParts) < 5 {
continue
}
mountPoint := mountInfoParts[4]
if strings.HasPrefix(mountPoint, deepestMountPoint) && strings.HasPrefix(dir, mountPoint) {
deepestMountPoint = mountPoint
}
}
return deepestMountPoint
}
func FindCgroupMountpointDir() (string, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
text := scanner.Text()
fields := strings.Split(text, " ")
// Safe as mountinfo encodes mountpoints with spaces as \040.
index := strings.Index(text, " - ")
postSeparatorFields := strings.Fields(text[index+3:])
numPostFields := len(postSeparatorFields)
// This is an error as we can't detect if the mount is for "cgroup"
if numPostFields == 0 {
return "", fmt.Errorf("Found no fields post '-' in %q", text)
}
if postSeparatorFields[0] == "cgroup" {
// Check that the mount is properly formatted.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
return filepath.Dir(fields[4]), nil
}
}
if err := scanner.Err(); err != nil {
return "", err
}
return "", NewNotFoundError("cgroup")
}
type Mount struct {
Mountpoint string
Root string
Subsystems []string
}
func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) {
if len(m.Subsystems) == 0 {
return "", fmt.Errorf("no subsystem for mount")
}
return getControllerPath(m.Subsystems[0], cgroups)
}
func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) {
res := make([]Mount, 0, len(ss))
scanner := bufio.NewScanner(mi)
numFound := 0
for scanner.Scan() && numFound < len(ss) {
txt := scanner.Text()
sepIdx := strings.Index(txt, " - ")
if sepIdx == -1 {
return nil, fmt.Errorf("invalid mountinfo format")
}
if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" {
continue
}
fields := strings.Split(txt, " ")
m := Mount{
Mountpoint: fields[4],
Root: fields[3],
}
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
seen, known := ss[opt]
if !known || (!all && seen) {
continue
}
ss[opt] = true
if strings.HasPrefix(opt, CgroupNamePrefix) {
opt = opt[len(CgroupNamePrefix):]
}
m.Subsystems = append(m.Subsystems, opt)
numFound++
}
if len(m.Subsystems) > 0 || all {
res = append(res, m)
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
return res, nil
}
// GetCgroupMounts returns the mounts for the cgroup subsystems.
// all indicates whether to return just the first instance or all the mounts.
func GetCgroupMounts(all bool) ([]Mount, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
allSubsystems, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
return nil, err
}
allMap := make(map[string]bool)
for s := range allSubsystems {
allMap[s] = false
}
return getCgroupMountsHelper(allMap, f, all)
}
// GetAllSubsystems returns all the cgroup subsystems supported by the kernel
func GetAllSubsystems() ([]string, error) {
f, err := os.Open("/proc/cgroups")
if err != nil {
return nil, err
}
defer f.Close()
subsystems := []string{}
s := bufio.NewScanner(f)
for s.Scan() {
text := s.Text()
if text[0] != '#' {
parts := strings.Fields(text)
if len(parts) >= 4 && parts[3] != "0" {
subsystems = append(subsystems, parts[0])
}
}
}
if err := s.Err(); err != nil {
return nil, err
}
return subsystems, nil
}
// GetOwnCgroup returns the relative path to the cgroup docker is running in.
func GetOwnCgroup(subsystem string) (string, error) {
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
return "", err
}
return getControllerPath(subsystem, cgroups)
}
func GetOwnCgroupPath(subsystem string) (string, error) {
cgroup, err := GetOwnCgroup(subsystem)
if err != nil {
return "", err
}
return getCgroupPathHelper(subsystem, cgroup)
}
func GetInitCgroup(subsystem string) (string, error) {
cgroups, err := ParseCgroupFile("/proc/1/cgroup")
if err != nil {
return "", err
}
return getControllerPath(subsystem, cgroups)
}
func GetInitCgroupPath(subsystem string) (string, error) {
cgroup, err := GetInitCgroup(subsystem)
if err != nil {
return "", err
}
return getCgroupPathHelper(subsystem, cgroup)
}
func getCgroupPathHelper(subsystem, cgroup string) (string, error) {
mnt, root, err := FindCgroupMountpointAndRoot("", subsystem)
if err != nil {
return "", err
}
// This is needed for nested containers, because in /proc/self/cgroup we
// see paths from host, which don't exist in container.
relCgroup, err := filepath.Rel(root, cgroup)
if err != nil {
return "", err
}
return filepath.Join(mnt, relCgroup), nil
}
func readProcsFile(dir string) ([]int, error) {
f, err := os.Open(filepath.Join(dir, CgroupProcesses))
if err != nil {
return nil, err
}
defer f.Close()
var (
s = bufio.NewScanner(f)
out = []int{}
)
for s.Scan() {
if t := s.Text(); t != "" {
pid, err := strconv.Atoi(t)
if err != nil {
return nil, err
}
out = append(out, pid)
}
}
return out, nil
}
// ParseCgroupFile parses the given cgroup file, typically from
// /proc/<pid>/cgroup, into a map of subgroups to cgroup names.
func ParseCgroupFile(path string) (map[string]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return parseCgroupFromReader(f)
}
// helper function for ParseCgroupFile to make testing easier
func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
s := bufio.NewScanner(r)
cgroups := make(map[string]string)
for s.Scan() {
text := s.Text()
// from cgroups(7):
// /proc/[pid]/cgroup
// ...
// For each cgroup hierarchy ... there is one entry
// containing three colon-separated fields of the form:
// hierarchy-ID:subsystem-list:cgroup-path
parts := strings.SplitN(text, ":", 3)
if len(parts) < 3 {
return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
}
for _, subs := range strings.Split(parts[1], ",") {
cgroups[subs] = parts[2]
}
}
if err := s.Err(); err != nil {
return nil, err
}
return cgroups, nil
}
func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
if p, ok := cgroups[subsystem]; ok {
return p, nil
}
if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok {
return p, nil
}
return "", NewNotFoundError(subsystem)
}
func PathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
}
return true
}
func EnterPid(cgroupPaths map[string]string, pid int) error {
for _, path := range cgroupPaths {
if PathExists(path) {
if err := WriteCgroupProc(path, pid); err != nil {
return err
}
}
}
return nil
}
// RemovePaths iterates over the provided paths removing them.
// We trying to remove all paths five times with increasing delay between tries.
// If after all there are not removed cgroups - appropriate error will be
// returned.
func RemovePaths(paths map[string]string) (err error) {
delay := 10 * time.Millisecond
for i := 0; i < 5; i++ {
if i != 0 {
time.Sleep(delay)
delay *= 2
}
for s, p := range paths {
os.RemoveAll(p)
// TODO: here probably should be logging
_, err := os.Stat(p)
// We need this strange way of checking cgroups existence because
// RemoveAll almost always returns error, even on already removed
// cgroups
if os.IsNotExist(err) {
delete(paths, s)
}
}
if len(paths) == 0 {
return nil
}
}
return fmt.Errorf("Failed to remove paths: %v", paths)
}
func GetHugePageSize() ([]string, error) {
files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
if err != nil {
return []string{}, err
}
var fileNames []string
for _, st := range files {
fileNames = append(fileNames, st.Name())
}
return getHugePageSizeFromFilenames(fileNames)
}
func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) {
var pageSizes []string
for _, fileName := range fileNames {
nameArray := strings.Split(fileName, "-")
pageSize, err := units.RAMInBytes(nameArray[1])
if err != nil {
return []string{}, err
}
sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, HugePageSizeUnitList)
pageSizes = append(pageSizes, sizeString)
}
return pageSizes, nil
}
// GetPids returns all pids, that were added to cgroup at path.
func GetPids(path string) ([]int, error) {
return readProcsFile(path)
}
// GetAllPids returns all pids, that were added to cgroup at path and to all its
// subcgroups.
func GetAllPids(path string) ([]int, error) {
var pids []int
// collect pids from all sub-cgroups
err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
dir, file := filepath.Split(p)
if file != CgroupProcesses {
return nil
}
if iErr != nil {
return iErr
}
cPids, err := readProcsFile(dir)
if err != nil {
return err
}
pids = append(pids, cPids...)
return nil
})
return pids, err
}
// WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file
func WriteCgroupProc(dir string, pid int) error {
// Normally dir should not be empty, one case is that cgroup subsystem
// is not mounted, we will get empty dir, and we want it fail here.
if dir == "" {
return fmt.Errorf("no such directory for %s", CgroupProcesses)
}
// Dont attach any pid to the cgroup if -1 is specified as a pid
if pid == -1 {
return nil
}
cgroupProcessesFile, err := os.OpenFile(filepath.Join(dir, CgroupProcesses), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
}
defer cgroupProcessesFile.Close()
for i := 0; i < 5; i++ {
_, err = cgroupProcessesFile.WriteString(strconv.Itoa(pid))
if err == nil {
return nil
}
// EINVAL might mean that the task being added to cgroup.procs is in state
// TASK_NEW. We should attempt to do so again.
if isEINVAL(err) {
time.Sleep(30 * time.Millisecond)
continue
}
return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
}
return err
}
func isEINVAL(err error) bool {
switch err := err.(type) {
case *os.PathError:
return err.Err == unix.EINVAL
default:
return false
}
}

View File

@ -1,61 +0,0 @@
package configs
import "fmt"
// blockIODevice holds major:minor format supported in blkio cgroup
type blockIODevice struct {
// Major is the device's major number
Major int64 `json:"major"`
// Minor is the device's minor number
Minor int64 `json:"minor"`
}
// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
type WeightDevice struct {
blockIODevice
// Weight is the bandwidth rate for the device, range is from 10 to 1000
Weight uint16 `json:"weight"`
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
LeafWeight uint16 `json:"leafWeight"`
}
// NewWeightDevice returns a configured WeightDevice pointer
func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice {
wd := &WeightDevice{}
wd.Major = major
wd.Minor = minor
wd.Weight = weight
wd.LeafWeight = leafWeight
return wd
}
// WeightString formats the struct to be writable to the cgroup specific file
func (wd *WeightDevice) WeightString() string {
return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)
}
// LeafWeightString formats the struct to be writable to the cgroup specific file
func (wd *WeightDevice) LeafWeightString() string {
return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)
}
// ThrottleDevice struct holds a `major:minor rate_per_second` pair
type ThrottleDevice struct {
blockIODevice
// Rate is the IO rate limit per cgroup per device
Rate uint64 `json:"rate"`
}
// NewThrottleDevice returns a configured ThrottleDevice pointer
func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
td := &ThrottleDevice{}
td.Major = major
td.Minor = minor
td.Rate = rate
return td
}
// String formats the struct to be writable to the cgroup specific file
func (td *ThrottleDevice) String() string {
return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
}

View File

@ -1,122 +0,0 @@
package configs
type FreezerState string
const (
Undefined FreezerState = ""
Frozen FreezerState = "FROZEN"
Thawed FreezerState = "THAWED"
)
type Cgroup struct {
// Deprecated, use Path instead
Name string `json:"name,omitempty"`
// name of parent of cgroup or slice
// Deprecated, use Path instead
Parent string `json:"parent,omitempty"`
// Path specifies the path to cgroups that are created and/or joined by the container.
// The path is assumed to be relative to the host system cgroup mountpoint.
Path string `json:"path"`
// ScopePrefix describes prefix for the scope name
ScopePrefix string `json:"scope_prefix"`
// Paths represent the absolute cgroups paths to join.
// This takes precedence over Path.
Paths map[string]string
// Resources contains various cgroups settings to apply
*Resources
}
type Resources struct {
// If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
// Deprecated
AllowAllDevices *bool `json:"allow_all_devices,omitempty"`
// Deprecated
AllowedDevices []*Device `json:"allowed_devices,omitempty"`
// Deprecated
DeniedDevices []*Device `json:"denied_devices,omitempty"`
Devices []*Device `json:"devices"`
// Memory limit (in bytes)
Memory int64 `json:"memory"`
// Memory reservation or soft_limit (in bytes)
MemoryReservation int64 `json:"memory_reservation"`
// Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwap int64 `json:"memory_swap"`
// Kernel memory limit (in bytes)
KernelMemory int64 `json:"kernel_memory"`
// Kernel memory limit for TCP use (in bytes)
KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
// CPU shares (relative weight vs. other containers)
CpuShares uint64 `json:"cpu_shares"`
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
CpuQuota int64 `json:"cpu_quota"`
// CPU period to be used for hardcapping (in usecs). 0 to use system default.
CpuPeriod uint64 `json:"cpu_period"`
// How many time CPU will use in realtime scheduling (in usecs).
CpuRtRuntime int64 `json:"cpu_rt_quota"`
// CPU period to be used for realtime scheduling (in usecs).
CpuRtPeriod uint64 `json:"cpu_rt_period"`
// CPU to use
CpusetCpus string `json:"cpuset_cpus"`
// MEM to use
CpusetMems string `json:"cpuset_mems"`
// Process limit; set <= `0' to disable limit.
PidsLimit int64 `json:"pids_limit"`
// Specifies per cgroup weight, range is from 10 to 1000.
BlkioWeight uint16 `json:"blkio_weight"`
// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
// Weight per cgroup per device, can override BlkioWeight.
BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
// IO read rate limit per cgroup per device, bytes per second.
BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
// IO write rate limit per cgroup per device, bytes per second.
BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
// IO read rate limit per cgroup per device, IO per second.
BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
// IO write rate limit per cgroup per device, IO per second.
BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
// set the freeze value for the process
Freezer FreezerState `json:"freezer"`
// Hugetlb limit (in bytes)
HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
// Whether to disable OOM Killer
OomKillDisable bool `json:"oom_kill_disable"`
// Tuning swappiness behaviour per cgroup
MemorySwappiness *uint64 `json:"memory_swappiness"`
// Set priority of network traffic for container
NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
// Set class identifier for container's network packets
NetClsClassid uint32 `json:"net_cls_classid_u"`
}

View File

@ -1,6 +0,0 @@
package configs
// TODO Windows: This can ultimately be entirely factored out on Windows as
// cgroups are a Unix-specific construct.
type Cgroup struct {
}

View File

@ -1,353 +0,0 @@
package configs
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"time"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
type Rlimit struct {
Type int `json:"type"`
Hard uint64 `json:"hard"`
Soft uint64 `json:"soft"`
}
// IDMap represents UID/GID Mappings for User Namespaces.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
// Seccomp represents syscall restrictions
// By default, only the native architecture of the kernel is allowed to be used
// for syscalls. Additional architectures can be added by specifying them in
// Architectures.
type Seccomp struct {
DefaultAction Action `json:"default_action"`
Architectures []string `json:"architectures"`
Syscalls []*Syscall `json:"syscalls"`
}
// Action is taken upon rule match in Seccomp
type Action int
const (
Kill Action = iota + 1
Errno
Trap
Allow
Trace
)
// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
type Operator int
const (
EqualTo Operator = iota + 1
NotEqualTo
GreaterThan
GreaterThanOrEqualTo
LessThan
LessThanOrEqualTo
MaskEqualTo
)
// Arg is a rule to match a specific syscall argument in Seccomp
type Arg struct {
Index uint `json:"index"`
Value uint64 `json:"value"`
ValueTwo uint64 `json:"value_two"`
Op Operator `json:"op"`
}
// Syscall is a rule to match a syscall in Seccomp
type Syscall struct {
Name string `json:"name"`
Action Action `json:"action"`
Args []*Arg `json:"args"`
}
// TODO Windows. Many of these fields should be factored out into those parts
// which are common across platforms, and those which are platform specific.
// Config defines configuration options for executing a process inside a contained environment.
type Config struct {
// NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
// This is a common option when the container is running in ramdisk
NoPivotRoot bool `json:"no_pivot_root"`
// ParentDeathSignal specifies the signal that is sent to the container's process in the case
// that the parent process dies.
ParentDeathSignal int `json:"parent_death_signal"`
// Path to a directory containing the container's root filesystem.
Rootfs string `json:"rootfs"`
// Readonlyfs will remount the container's rootfs as readonly where only externally mounted
// bind mounts are writtable.
Readonlyfs bool `json:"readonlyfs"`
// Specifies the mount propagation flags to be applied to /.
RootPropagation int `json:"rootPropagation"`
// Mounts specify additional source and destination paths that will be mounted inside the container's
// rootfs and mount namespace if specified
Mounts []*Mount `json:"mounts"`
// The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
Devices []*Device `json:"devices"`
MountLabel string `json:"mount_label"`
// Hostname optionally sets the container's hostname if provided
Hostname string `json:"hostname"`
// Namespaces specifies the container's namespaces that it should setup when cloning the init process
// If a namespace is not provided that namespace is shared from the container's parent process
Namespaces Namespaces `json:"namespaces"`
// Capabilities specify the capabilities to keep when executing the process inside the container
// All capabilities not specified will be dropped from the processes capability mask
Capabilities *Capabilities `json:"capabilities"`
// Networks specifies the container's network setup to be created
Networks []*Network `json:"networks"`
// Routes can be specified to create entries in the route table as the container is started
Routes []*Route `json:"routes"`
// Cgroups specifies specific cgroup settings for the various subsystems that the container is
// placed into to limit the resources the container has available
Cgroups *Cgroup `json:"cgroups"`
// AppArmorProfile specifies the profile to apply to the process running in the container and is
// change at the time the process is execed
AppArmorProfile string `json:"apparmor_profile,omitempty"`
// ProcessLabel specifies the label to apply to the process running in the container. It is
// commonly used by selinux
ProcessLabel string `json:"process_label,omitempty"`
// Rlimits specifies the resource limits, such as max open files, to set in the container
// If Rlimits are not set, the container will inherit rlimits from the parent process
Rlimits []Rlimit `json:"rlimits,omitempty"`
// OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
// for a process. Valid values are between the range [-1000, '1000'], where processes with
// higher scores are preferred for being killed. If it is unset then we don't touch the current
// value.
// More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
OomScoreAdj *int `json:"oom_score_adj,omitempty"`
// UidMappings is an array of User ID mappings for User Namespaces
UidMappings []IDMap `json:"uid_mappings"`
// GidMappings is an array of Group ID mappings for User Namespaces
GidMappings []IDMap `json:"gid_mappings"`
// MaskPaths specifies paths within the container's rootfs to mask over with a bind
// mount pointing to /dev/null as to prevent reads of the file.
MaskPaths []string `json:"mask_paths"`
// ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
// so that these files prevent any writes.
ReadonlyPaths []string `json:"readonly_paths"`
// Sysctl is a map of properties and their values. It is the equivalent of using
// sysctl -w my.property.name value in Linux.
Sysctl map[string]string `json:"sysctl"`
// Seccomp allows actions to be taken whenever a syscall is made within the container.
// A number of rules are given, each having an action to be taken if a syscall matches it.
// A default action to be taken if no rules match is also given.
Seccomp *Seccomp `json:"seccomp"`
// NoNewPrivileges controls whether processes in the container can gain additional privileges.
NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
// Hooks are a collection of actions to perform at various container lifecycle events.
// CommandHooks are serialized to JSON, but other hooks are not.
Hooks *Hooks
// Version is the version of opencontainer specification that is supported.
Version string `json:"version"`
// Labels are user defined metadata that is stored in the config and populated on the state
Labels []string `json:"labels"`
// NoNewKeyring will not allocated a new session keyring for the container. It will use the
// callers keyring in this case.
NoNewKeyring bool `json:"no_new_keyring"`
// IntelRdt specifies settings for Intel RDT group that the container is placed into
// to limit the resources (e.g., L3 cache, memory bandwidth) the container has available
IntelRdt *IntelRdt `json:"intel_rdt,omitempty"`
// RootlessEUID is set when the runc was launched with non-zero EUID.
// Note that RootlessEUID is set to false when launched with EUID=0 in userns.
// When RootlessEUID is set, runc creates a new userns for the container.
// (config.json needs to contain userns settings)
RootlessEUID bool `json:"rootless_euid,omitempty"`
// RootlessCgroups is set when unlikely to have the full access to cgroups.
// When RootlessCgroups is set, cgroups errors are ignored.
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
}
type Hooks struct {
// Prestart commands are executed after the container namespaces are created,
// but before the user supplied command is executed from init.
Prestart []Hook
// Poststart commands are executed after the container init process starts.
Poststart []Hook
// Poststop commands are executed after the container init process exits.
Poststop []Hook
}
type Capabilities struct {
// Bounding is the set of capabilities checked by the kernel.
Bounding []string
// Effective is the set of capabilities checked by the kernel.
Effective []string
// Inheritable is the capabilities preserved across execve.
Inheritable []string
// Permitted is the limiting superset for effective capabilities.
Permitted []string
// Ambient is the ambient set of capabilities that are kept.
Ambient []string
}
func (hooks *Hooks) UnmarshalJSON(b []byte) error {
var state struct {
Prestart []CommandHook
Poststart []CommandHook
Poststop []CommandHook
}
if err := json.Unmarshal(b, &state); err != nil {
return err
}
deserialize := func(shooks []CommandHook) (hooks []Hook) {
for _, shook := range shooks {
hooks = append(hooks, shook)
}
return hooks
}
hooks.Prestart = deserialize(state.Prestart)
hooks.Poststart = deserialize(state.Poststart)
hooks.Poststop = deserialize(state.Poststop)
return nil
}
func (hooks Hooks) MarshalJSON() ([]byte, error) {
serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
for _, hook := range hooks {
switch chook := hook.(type) {
case CommandHook:
serializableHooks = append(serializableHooks, chook)
default:
logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
}
}
return serializableHooks
}
return json.Marshal(map[string]interface{}{
"prestart": serialize(hooks.Prestart),
"poststart": serialize(hooks.Poststart),
"poststop": serialize(hooks.Poststop),
})
}
type Hook interface {
// Run executes the hook with the provided state.
Run(*specs.State) error
}
// NewFunctionHook will call the provided function when the hook is run.
func NewFunctionHook(f func(*specs.State) error) FuncHook {
return FuncHook{
run: f,
}
}
type FuncHook struct {
run func(*specs.State) error
}
func (f FuncHook) Run(s *specs.State) error {
return f.run(s)
}
type Command struct {
Path string `json:"path"`
Args []string `json:"args"`
Env []string `json:"env"`
Dir string `json:"dir"`
Timeout *time.Duration `json:"timeout"`
}
// NewCommandHook will execute the provided command when the hook is run.
func NewCommandHook(cmd Command) CommandHook {
return CommandHook{
Command: cmd,
}
}
type CommandHook struct {
Command
}
func (c Command) Run(s *specs.State) error {
b, err := json.Marshal(s)
if err != nil {
return err
}
var stdout, stderr bytes.Buffer
cmd := exec.Cmd{
Path: c.Path,
Args: c.Args,
Env: c.Env,
Stdin: bytes.NewReader(b),
Stdout: &stdout,
Stderr: &stderr,
}
if err := cmd.Start(); err != nil {
return err
}
errC := make(chan error, 1)
go func() {
err := cmd.Wait()
if err != nil {
err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String())
}
errC <- err
}()
var timerCh <-chan time.Time
if c.Timeout != nil {
timer := time.NewTimer(*c.Timeout)
defer timer.Stop()
timerCh = timer.C
}
select {
case err := <-errC:
return err
case <-timerCh:
cmd.Process.Kill()
cmd.Wait()
return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
}
}

View File

@ -1,61 +0,0 @@
package configs
import "fmt"
// HostUID gets the translated uid for the process on host which could be
// different when user namespaces are enabled.
func (c Config) HostUID(containerId int) (int, error) {
if c.Namespaces.Contains(NEWUSER) {
if c.UidMappings == nil {
return -1, fmt.Errorf("User namespaces enabled, but no uid mappings found.")
}
id, found := c.hostIDFromMapping(containerId, c.UidMappings)
if !found {
return -1, fmt.Errorf("User namespaces enabled, but no user mapping found.")
}
return id, nil
}
// Return unchanged id.
return containerId, nil
}
// HostRootUID gets the root uid for the process on host which could be non-zero
// when user namespaces are enabled.
func (c Config) HostRootUID() (int, error) {
return c.HostUID(0)
}
// HostGID gets the translated gid for the process on host which could be
// different when user namespaces are enabled.
func (c Config) HostGID(containerId int) (int, error) {
if c.Namespaces.Contains(NEWUSER) {
if c.GidMappings == nil {
return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.")
}
id, found := c.hostIDFromMapping(containerId, c.GidMappings)
if !found {
return -1, fmt.Errorf("User namespaces enabled, but no group mapping found.")
}
return id, nil
}
// Return unchanged id.
return containerId, nil
}
// HostRootGID gets the root gid for the process on host which could be non-zero
// when user namespaces are enabled.
func (c Config) HostRootGID() (int, error) {
return c.HostGID(0)
}
// Utility function that gets a host ID for a container ID from user namespace map
// if that ID is present in the map.
func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
for _, m := range uMap {
if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (containerID - m.ContainerID)
return hostID, true
}
}
return -1, false
}

View File

@ -1,57 +0,0 @@
package configs
import (
"fmt"
"os"
)
const (
Wildcard = -1
)
// TODO Windows: This can be factored out in the future
type Device struct {
// Device type, block, char, etc.
Type rune `json:"type"`
// Path to the device.
Path string `json:"path"`
// Major is the device's major number.
Major int64 `json:"major"`
// Minor is the device's minor number.
Minor int64 `json:"minor"`
// Cgroup permissions format, rwm.
Permissions string `json:"permissions"`
// FileMode permission bits for the device.
FileMode os.FileMode `json:"file_mode"`
// Uid of the device.
Uid uint32 `json:"uid"`
// Gid of the device.
Gid uint32 `json:"gid"`
// Write the file to the allowed list
Allow bool `json:"allow"`
}
func (d *Device) CgroupString() string {
return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
}
func (d *Device) Mkdev() int {
return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
}
// deviceNumberString converts the device number to a string return result.
func deviceNumberString(number int64) string {
if number == Wildcard {
return "*"
}
return fmt.Sprint(number)
}

View File

@ -1,111 +0,0 @@
// +build linux
package configs
var (
// DefaultSimpleDevices are devices that are to be both allowed and created.
DefaultSimpleDevices = []*Device{
// /dev/null and zero
{
Path: "/dev/null",
Type: 'c',
Major: 1,
Minor: 3,
Permissions: "rwm",
FileMode: 0666,
},
{
Path: "/dev/zero",
Type: 'c',
Major: 1,
Minor: 5,
Permissions: "rwm",
FileMode: 0666,
},
{
Path: "/dev/full",
Type: 'c',
Major: 1,
Minor: 7,
Permissions: "rwm",
FileMode: 0666,
},
// consoles and ttys
{
Path: "/dev/tty",
Type: 'c',
Major: 5,
Minor: 0,
Permissions: "rwm",
FileMode: 0666,
},
// /dev/urandom,/dev/random
{
Path: "/dev/urandom",
Type: 'c',
Major: 1,
Minor: 9,
Permissions: "rwm",
FileMode: 0666,
},
{
Path: "/dev/random",
Type: 'c',
Major: 1,
Minor: 8,
Permissions: "rwm",
FileMode: 0666,
},
}
DefaultAllowedDevices = append([]*Device{
// allow mknod for any device
{
Type: 'c',
Major: Wildcard,
Minor: Wildcard,
Permissions: "m",
},
{
Type: 'b',
Major: Wildcard,
Minor: Wildcard,
Permissions: "m",
},
{
Path: "/dev/console",
Type: 'c',
Major: 5,
Minor: 1,
Permissions: "rwm",
},
// /dev/pts/ - pts namespaces are "coming soon"
{
Path: "",
Type: 'c',
Major: 136,
Minor: Wildcard,
Permissions: "rwm",
},
{
Path: "",
Type: 'c',
Major: 5,
Minor: 2,
Permissions: "rwm",
},
// tuntap
{
Path: "",
Type: 'c',
Major: 10,
Minor: 200,
Permissions: "rwm",
},
}, DefaultSimpleDevices...)
DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...)
)

View File

@ -1,9 +0,0 @@
package configs
type HugepageLimit struct {
// which type of hugepage to limit.
Pagesize string `json:"page_size"`
// usage limit for hugepage.
Limit uint64 `json:"limit"`
}

View File

@ -1,13 +0,0 @@
package configs
type IntelRdt struct {
// The schema for L3 cache id and capacity bitmask (CBM)
// Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
L3CacheSchema string `json:"l3_cache_schema,omitempty"`
// The schema of memory bandwidth per L3 cache id
// Format: "MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;..."
// The unit of memory bandwidth is specified in "percentages" by
// default, and in "MBps" if MBA Software Controller is enabled.
MemBwSchema string `json:"memBwSchema,omitempty"`
}

View File

@ -1,14 +0,0 @@
package configs
import (
"fmt"
)
type IfPrioMap struct {
Interface string `json:"interface"`
Priority int64 `json:"priority"`
}
func (i *IfPrioMap) CgroupString() string {
return fmt.Sprintf("%s %d", i.Interface, i.Priority)
}

View File

@ -1,39 +0,0 @@
package configs
const (
// EXT_COPYUP is a directive to copy up the contents of a directory when
// a tmpfs is mounted over it.
EXT_COPYUP = 1 << iota
)
type Mount struct {
// Source path for the mount.
Source string `json:"source"`
// Destination path for the mount inside the container.
Destination string `json:"destination"`
// Device the mount is for.
Device string `json:"device"`
// Mount flags.
Flags int `json:"flags"`
// Propagation Flags
PropagationFlags []int `json:"propagation_flags"`
// Mount data applied to the mount.
Data string `json:"data"`
// Relabel source if set, "z" indicates shared, "Z" indicates unshared.
Relabel string `json:"relabel"`
// Extensions are additional flags that are specific to runc.
Extensions int `json:"extensions"`
// Optional Command to be run before Source is mounted.
PremountCmds []Command `json:"premount_cmds"`
// Optional Command to be run after Source is mounted.
PostmountCmds []Command `json:"postmount_cmds"`
}

View File

@ -1,5 +0,0 @@
package configs
type NamespaceType string
type Namespaces []Namespace

View File

@ -1,126 +0,0 @@
package configs
import (
"fmt"
"os"
"sync"
)
const (
NEWNET NamespaceType = "NEWNET"
NEWPID NamespaceType = "NEWPID"
NEWNS NamespaceType = "NEWNS"
NEWUTS NamespaceType = "NEWUTS"
NEWIPC NamespaceType = "NEWIPC"
NEWUSER NamespaceType = "NEWUSER"
NEWCGROUP NamespaceType = "NEWCGROUP"
)
var (
nsLock sync.Mutex
supportedNamespaces = make(map[NamespaceType]bool)
)
// NsName converts the namespace type to its filename
func NsName(ns NamespaceType) string {
switch ns {
case NEWNET:
return "net"
case NEWNS:
return "mnt"
case NEWPID:
return "pid"
case NEWIPC:
return "ipc"
case NEWUSER:
return "user"
case NEWUTS:
return "uts"
case NEWCGROUP:
return "cgroup"
}
return ""
}
// IsNamespaceSupported returns whether a namespace is available or
// not
func IsNamespaceSupported(ns NamespaceType) bool {
nsLock.Lock()
defer nsLock.Unlock()
supported, ok := supportedNamespaces[ns]
if ok {
return supported
}
nsFile := NsName(ns)
// if the namespace type is unknown, just return false
if nsFile == "" {
return false
}
_, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile))
// a namespace is supported if it exists and we have permissions to read it
supported = err == nil
supportedNamespaces[ns] = supported
return supported
}
func NamespaceTypes() []NamespaceType {
return []NamespaceType{
NEWUSER, // Keep user NS always first, don't move it.
NEWIPC,
NEWUTS,
NEWNET,
NEWPID,
NEWNS,
NEWCGROUP,
}
}
// Namespace defines configuration for each namespace. It specifies an
// alternate path that is able to be joined via setns.
type Namespace struct {
Type NamespaceType `json:"type"`
Path string `json:"path"`
}
func (n *Namespace) GetPath(pid int) string {
return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type))
}
func (n *Namespaces) Remove(t NamespaceType) bool {
i := n.index(t)
if i == -1 {
return false
}
*n = append((*n)[:i], (*n)[i+1:]...)
return true
}
func (n *Namespaces) Add(t NamespaceType, path string) {
i := n.index(t)
if i == -1 {
*n = append(*n, Namespace{Type: t, Path: path})
return
}
(*n)[i].Path = path
}
func (n *Namespaces) index(t NamespaceType) int {
for i, ns := range *n {
if ns.Type == t {
return i
}
}
return -1
}
func (n *Namespaces) Contains(t NamespaceType) bool {
return n.index(t) != -1
}
func (n *Namespaces) PathOf(t NamespaceType) string {
i := n.index(t)
if i == -1 {
return ""
}
return (*n)[i].Path
}

View File

@ -1,32 +0,0 @@
// +build linux
package configs
import "golang.org/x/sys/unix"
func (n *Namespace) Syscall() int {
return namespaceInfo[n.Type]
}
var namespaceInfo = map[NamespaceType]int{
NEWNET: unix.CLONE_NEWNET,
NEWNS: unix.CLONE_NEWNS,
NEWUSER: unix.CLONE_NEWUSER,
NEWIPC: unix.CLONE_NEWIPC,
NEWUTS: unix.CLONE_NEWUTS,
NEWPID: unix.CLONE_NEWPID,
NEWCGROUP: unix.CLONE_NEWCGROUP,
}
// CloneFlags parses the container's Namespaces options to set the correct
// flags on clone, unshare. This function returns flags only for new namespaces.
func (n *Namespaces) CloneFlags() uintptr {
var flag int
for _, v := range *n {
if v.Path != "" {
continue
}
flag |= namespaceInfo[v.Type]
}
return uintptr(flag)
}

View File

@ -1,13 +0,0 @@
// +build !linux,!windows
package configs
func (n *Namespace) Syscall() int {
panic("No namespace syscall support")
}
// CloneFlags parses the container's Namespaces options to set the correct
// flags on clone, unshare. This function returns flags only for new namespaces.
func (n *Namespaces) CloneFlags() uintptr {
panic("No namespace syscall support")
}

View File

@ -1,8 +0,0 @@
// +build !linux
package configs
// Namespace defines configuration for each namespace. It specifies an
// alternate path that is able to be joined via setns.
type Namespace struct {
}

View File

@ -1,72 +0,0 @@
package configs
// Network defines configuration for a container's networking stack
//
// The network configuration can be omitted from a container causing the
// container to be setup with the host's networking stack
type Network struct {
// Type sets the networks type, commonly veth and loopback
Type string `json:"type"`
// Name of the network interface
Name string `json:"name"`
// The bridge to use.
Bridge string `json:"bridge"`
// MacAddress contains the MAC address to set on the network interface
MacAddress string `json:"mac_address"`
// Address contains the IPv4 and mask to set on the network interface
Address string `json:"address"`
// Gateway sets the gateway address that is used as the default for the interface
Gateway string `json:"gateway"`
// IPv6Address contains the IPv6 and mask to set on the network interface
IPv6Address string `json:"ipv6_address"`
// IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
IPv6Gateway string `json:"ipv6_gateway"`
// Mtu sets the mtu value for the interface and will be mirrored on both the host and
// container's interfaces if a pair is created, specifically in the case of type veth
// Note: This does not apply to loopback interfaces.
Mtu int `json:"mtu"`
// TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and
// container's interfaces if a pair is created, specifically in the case of type veth
// Note: This does not apply to loopback interfaces.
TxQueueLen int `json:"txqueuelen"`
// HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the
// container.
HostInterfaceName string `json:"host_interface_name"`
// HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
// bridge port in the case of type veth
// Note: This is unsupported on some systems.
// Note: This does not apply to loopback interfaces.
HairpinMode bool `json:"hairpin_mode"`
}
// Routes can be specified to create entries in the route table as the container is started
//
// All of destination, source, and gateway should be either IPv4 or IPv6.
// One of the three options must be present, and omitted entries will use their
// IP family default for the route table. For IPv4 for example, setting the
// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
// destination of 0.0.0.0(or *) when viewed in the route table.
type Route struct {
// Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
Destination string `json:"destination"`
// Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
Source string `json:"source"`
// Sets the gateway. Accepts IPv4 and IPv6
Gateway string `json:"gateway"`
// The device to set this route up for, for example: eth0
InterfaceName string `json:"interface_name"`
}

View File

@ -1,89 +0,0 @@
package validate
import (
"fmt"
"strings"
"github.com/opencontainers/runc/libcontainer/configs"
)
// rootlessEUID makes sure that the config can be applied when runc
// is being executed as a non-root user (euid != 0) in the current user namespace.
func (v *ConfigValidator) rootlessEUID(config *configs.Config) error {
if err := rootlessEUIDMappings(config); err != nil {
return err
}
if err := rootlessEUIDMount(config); err != nil {
return err
}
// XXX: We currently can't verify the user config at all, because
// configs.Config doesn't store the user-related configs. So this
// has to be verified by setupUser() in init_linux.go.
return nil
}
func hasIDMapping(id int, mappings []configs.IDMap) bool {
for _, m := range mappings {
if id >= m.ContainerID && id < m.ContainerID+m.Size {
return true
}
}
return false
}
func rootlessEUIDMappings(config *configs.Config) error {
if !config.Namespaces.Contains(configs.NEWUSER) {
return fmt.Errorf("rootless container requires user namespaces")
}
if len(config.UidMappings) == 0 {
return fmt.Errorf("rootless containers requires at least one UID mapping")
}
if len(config.GidMappings) == 0 {
return fmt.Errorf("rootless containers requires at least one GID mapping")
}
return nil
}
// mount verifies that the user isn't trying to set up any mounts they don't have
// the rights to do. In addition, it makes sure that no mount has a `uid=` or
// `gid=` option that doesn't resolve to root.
func rootlessEUIDMount(config *configs.Config) error {
// XXX: We could whitelist allowed devices at this point, but I'm not
// convinced that's a good idea. The kernel is the best arbiter of
// access control.
for _, mount := range config.Mounts {
// Check that the options list doesn't contain any uid= or gid= entries
// that don't resolve to root.
for _, opt := range strings.Split(mount.Data, ",") {
if strings.HasPrefix(opt, "uid=") {
var uid int
n, err := fmt.Sscanf(opt, "uid=%d", &uid)
if n != 1 || err != nil {
// Ignore unknown mount options.
continue
}
if !hasIDMapping(uid, config.UidMappings) {
return fmt.Errorf("cannot specify uid= mount options for unmapped uid in rootless containers")
}
}
if strings.HasPrefix(opt, "gid=") {
var gid int
n, err := fmt.Sscanf(opt, "gid=%d", &gid)
if n != 1 || err != nil {
// Ignore unknown mount options.
continue
}
if !hasIDMapping(gid, config.GidMappings) {
return fmt.Errorf("cannot specify gid= mount options for unmapped gid in rootless containers")
}
}
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More