Add TLS options per Nomad backend (#8083)
This commit is contained in:
parent
26ee62a027
commit
13ebf5460c
|
@ -62,6 +62,15 @@ func (b *backend) client(ctx context.Context, s logical.Storage) (*api.Client, e
|
|||
if conf.Token != "" {
|
||||
nomadConf.SecretID = conf.Token
|
||||
}
|
||||
if conf.CACert != "" {
|
||||
nomadConf.TLSConfig.CACertPEM = []byte(conf.CACert)
|
||||
}
|
||||
if conf.ClientCert != "" {
|
||||
nomadConf.TLSConfig.ClientCertPEM = []byte(conf.ClientCert)
|
||||
}
|
||||
if conf.ClientKey != "" {
|
||||
nomadConf.TLSConfig.ClientKeyPEM = []byte(conf.ClientKey)
|
||||
}
|
||||
}
|
||||
|
||||
client, err := api.NewClient(nomadConf)
|
||||
|
|
|
@ -28,6 +28,21 @@ func pathConfigAccess(b *backend) *framework.Path {
|
|||
Type: framework.TypeInt,
|
||||
Description: "Max length for name of generated Nomad tokens",
|
||||
},
|
||||
"ca_cert": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: `CA certificate to use when verifying Nomad server certificate,
|
||||
must be x509 PEM encoded.`,
|
||||
},
|
||||
"client_cert": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: `Client certificate used for Nomad's TLS communication,
|
||||
must be x509 PEM encoded and if this is set you need to also set client_key.`,
|
||||
},
|
||||
"client_key": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: `Client key used for Nomad's TLS communication,
|
||||
must be x509 PEM encoded and if this is set you need to also set client_cert.`,
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
|
@ -101,6 +116,18 @@ func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Reques
|
|||
if ok {
|
||||
conf.Token = token.(string)
|
||||
}
|
||||
caCert, ok := data.GetOk("ca_cert")
|
||||
if ok {
|
||||
conf.CACert = caCert.(string)
|
||||
}
|
||||
clientCert, ok := data.GetOk("client_cert")
|
||||
if ok {
|
||||
conf.ClientCert = clientCert.(string)
|
||||
}
|
||||
clientKey, ok := data.GetOk("client_key")
|
||||
if ok {
|
||||
conf.ClientKey = clientKey.(string)
|
||||
}
|
||||
|
||||
conf.MaxTokenNameLength = data.Get("max_token_name_length").(int)
|
||||
|
||||
|
@ -126,4 +153,7 @@ type accessConfig struct {
|
|||
Address string `json:"address"`
|
||||
Token string `json:"token"`
|
||||
MaxTokenNameLength int `json:"max_token_name_length"`
|
||||
CACert string `json:"ca_cert"`
|
||||
ClientCert string `json:"client_cert"`
|
||||
ClientKey string `json:"client_key"`
|
||||
}
|
||||
|
|
4
go.mod
4
go.mod
|
@ -61,14 +61,14 @@ require (
|
|||
github.com/hashicorp/go-msgpack v0.5.5
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a
|
||||
github.com/hashicorp/go-rootcerts v1.0.1
|
||||
github.com/hashicorp/go-rootcerts v1.0.2
|
||||
github.com/hashicorp/go-sockaddr v1.0.2
|
||||
github.com/hashicorp/go-syslog v1.0.0
|
||||
github.com/hashicorp/go-uuid v1.0.2
|
||||
github.com/hashicorp/gokrb5 v7.3.1-0.20191209171754-1a6fa9886ec3+incompatible
|
||||
github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/hashicorp/hcl v1.0.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf
|
||||
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d
|
||||
github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17
|
||||
github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec
|
||||
|
|
6
go.sum
6
go.sum
|
@ -265,6 +265,8 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51
|
|||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
|
@ -326,6 +328,8 @@ github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6
|
|||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
|
@ -357,6 +361,8 @@ github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0
|
|||
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf h1:U/40PQvWkaXCDdK9QHKf1pVDVcA+NIDVbzzonFGkgIA=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf/go.mod h1:BDngVi1f4UA6aJq9WYTgxhfWSE1+42xshvstLU2fRGk=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d h1:BXqsASWhyiAiEVm6FcltF0dg8XvoookQwmpHn8lstu8=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE=
|
||||
github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
|
||||
github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4=
|
||||
github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
|
||||
|
|
115
vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go
generated
vendored
Normal file
115
vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type CFCode int
|
||||
type HTTPCode int
|
||||
|
||||
type Definition struct {
|
||||
CFCode `yaml:"-"`
|
||||
Name string `yaml:"name"`
|
||||
HTTPCode `yaml:"http_code"`
|
||||
Message string `yaml:"message"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
const url = "https://raw.githubusercontent.com/cloudfoundry/cloud_controller_ng/master/vendor/errors/v2.yml"
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var m map[CFCode]Definition
|
||||
|
||||
if err := yaml.Unmarshal(body, &m); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var definitions []Definition
|
||||
|
||||
for c, d := range m {
|
||||
d.CFCode = c
|
||||
definitions = append(definitions, d)
|
||||
}
|
||||
|
||||
sort.Slice(definitions, func(i, j int) bool {
|
||||
return definitions[i].CFCode < definitions[j].CFCode
|
||||
})
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if err := packageTemplate.Execute(buf, struct {
|
||||
Timestamp time.Time
|
||||
Definitions []Definition
|
||||
}{
|
||||
Timestamp: time.Now(),
|
||||
Definitions: definitions,
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
dst, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Printf("%s", buf.Bytes())
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile("cf_error.go", dst, 0600); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// destutter ensures that s does not end in "Error".
|
||||
func destutter(s string) string {
|
||||
return strings.TrimSuffix(s, "Error")
|
||||
}
|
||||
|
||||
var packageTemplate = template.Must(template.New("").Funcs(template.FuncMap{
|
||||
"destutter": destutter,
|
||||
}).Parse(`
|
||||
package cfclient
|
||||
|
||||
// Code generated by go generate. DO NOT EDIT.
|
||||
// This file was generated by robots at
|
||||
// {{ .Timestamp }}
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
{{- range .Definitions }}
|
||||
{{$method := printf "Is%sError" (.Name | destutter) }}
|
||||
// {{ $method }} returns a boolean indicating whether
|
||||
// the error is known to report the Cloud Foundry error:
|
||||
// - Cloud Foundry code: {{ .CFCode }}
|
||||
// - HTTP code: {{ .HTTPCode }}
|
||||
// - message: {{ printf "%q" .Message }}
|
||||
func Is{{ .Name | destutter }}Error(err error) bool {
|
||||
cause := errors.Cause(err)
|
||||
cferr, ok := cause.(CloudFoundryError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return cferr.Code == {{ .CFCode }}
|
||||
}
|
||||
{{- end }}
|
||||
`))
|
|
@ -0,0 +1,703 @@
|
|||
// Copyright 2017, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 3 {
|
||||
log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0])
|
||||
}
|
||||
typ := os.Args[1]
|
||||
path := os.Args[2]
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
t := template.Must(template.New("source").Parse(source))
|
||||
if err := t.Execute(b, struct {
|
||||
Type, GeneratedMessage string
|
||||
}{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil {
|
||||
log.Fatalf("Template.Execute error: %v", err)
|
||||
}
|
||||
out, err := format.Source(bytes.TrimSpace(b.Bytes()))
|
||||
if err != nil {
|
||||
log.Fatalf("format.Source error: %v", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||
log.Fatalf("ioutil.WriteFile error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
const source = `
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
{{.GeneratedMessage}}
|
||||
|
||||
// ====================================================
|
||||
// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person
|
||||
// obtaining a copy of this software and associated documentation
|
||||
// files (the "Software"), to deal in the Software without
|
||||
// restriction, including without limitation the rights to use,
|
||||
// copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be
|
||||
// included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
// OTHER DEALINGS IN THE SOFTWARE.
|
||||
// ====================================================
|
||||
|
||||
package sais
|
||||
|
||||
func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) {
|
||||
var i int
|
||||
for i = 0; i < k; i++ {
|
||||
C[i] = 0
|
||||
}
|
||||
for i = 0; i < n; i++ {
|
||||
C[T[i]]++
|
||||
}
|
||||
}
|
||||
|
||||
func getBuckets_{{.Type}}(C, B []int, k int, end bool) {
|
||||
var i, sum int
|
||||
if end {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum
|
||||
}
|
||||
} else {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum - C[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
b--
|
||||
if int(T[j]) > c1 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int {
|
||||
var i, j, p, q, plen, qlen, name int
|
||||
var c0, c1 int
|
||||
var diff bool
|
||||
|
||||
// Compact all the sorted substrings into the first m items of SA.
|
||||
// 2*m must be not larger than n (provable).
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
SA[i] = ^SA[i]
|
||||
}
|
||||
if i < m {
|
||||
for j, i = i, i+1; ; i++ {
|
||||
if p = SA[i]; p < 0 {
|
||||
SA[j] = ^p
|
||||
j++
|
||||
SA[i] = 0
|
||||
if j == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the length of all substrings.
|
||||
i = n - 1
|
||||
j = n - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
SA[m+((i+1)>>1)] = j - i
|
||||
j = i + 1
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find the lexicographic names of all substrings.
|
||||
name = 0
|
||||
qlen = 0
|
||||
for i, q = 0, n; i < m; i++ {
|
||||
p = SA[i]
|
||||
plen = SA[m+(p>>1)]
|
||||
diff = true
|
||||
if (plen == qlen) && ((q + plen) < n) {
|
||||
for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ {
|
||||
}
|
||||
if j == plen {
|
||||
diff = false
|
||||
}
|
||||
}
|
||||
if diff {
|
||||
name++
|
||||
q = p
|
||||
qlen = plen
|
||||
}
|
||||
SA[m+(p>>1)] = name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) {
|
||||
var b, i, j, t, d int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
t = 1
|
||||
} else {
|
||||
t = 0
|
||||
}
|
||||
j += n
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i, d = 0, 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) < c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
for i = n - 1; 0 <= i; i-- {
|
||||
if SA[i] > 0 {
|
||||
if SA[i] < n {
|
||||
SA[i] += n
|
||||
for j = i - 1; SA[j] < n; j-- {
|
||||
}
|
||||
SA[j] -= n
|
||||
i = j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i, d = n-1, d+1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) > c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
b--
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS2_{{.Type}}(SA []int, n, m int) int {
|
||||
var i, j, d, name int
|
||||
|
||||
// Compact all the sorted LMS substrings into the first m items of SA.
|
||||
name = 0
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
j = ^SA[i]
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[i] = j
|
||||
}
|
||||
if i < m {
|
||||
for d, i = i, i+1; ; i++ {
|
||||
if j = SA[i]; j < 0 {
|
||||
j = ^j
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[d] = j
|
||||
d++
|
||||
SA[i] = 0
|
||||
if d == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if name < m {
|
||||
// Store the lexicographic names.
|
||||
for i, d = m-1, name+1; 0 <= i; i-- {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
d--
|
||||
}
|
||||
SA[m+(j>>1)] = d
|
||||
}
|
||||
} else {
|
||||
// Unset flags.
|
||||
for i = 0; i < m; i++ {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
SA[i] = j
|
||||
}
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
j = SA[i]
|
||||
SA[i] = ^j
|
||||
if j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
b--
|
||||
if (j == 0) || (int(T[j-1]) > c1) {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
} else {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) {
|
||||
const (
|
||||
minBucketSize = 512
|
||||
sortLMS2Limit = 0x3fffffff
|
||||
)
|
||||
|
||||
var C, B, D, RA []int
|
||||
var bo int // Offset of B relative to SA
|
||||
var b, i, j, m, p, q, name, newfs int
|
||||
var c0, c1 int
|
||||
var flags uint
|
||||
|
||||
if k <= minBucketSize {
|
||||
C = make([]int, k)
|
||||
if k <= fs {
|
||||
bo = n + fs - k
|
||||
B = SA[bo:]
|
||||
flags = 1
|
||||
} else {
|
||||
B = make([]int, k)
|
||||
flags = 3
|
||||
}
|
||||
} else if k <= fs {
|
||||
C = SA[n+fs-k:]
|
||||
if k <= fs-k {
|
||||
bo = n + fs - 2*k
|
||||
B = SA[bo:]
|
||||
flags = 0
|
||||
} else if k <= 4*minBucketSize {
|
||||
B = make([]int, k)
|
||||
flags = 2
|
||||
} else {
|
||||
B = C
|
||||
flags = 8
|
||||
}
|
||||
} else {
|
||||
C = make([]int, k)
|
||||
B = C
|
||||
flags = 4 | 8
|
||||
}
|
||||
if n <= sortLMS2Limit && 2 <= (n/k) {
|
||||
if flags&1 > 0 {
|
||||
if 2*k <= fs-k {
|
||||
flags |= 32
|
||||
} else {
|
||||
flags |= 16
|
||||
}
|
||||
} else if flags == 0 && 2*k <= (fs-2*k) {
|
||||
flags |= 32
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 1: Reduce the problem by at least 1/2.
|
||||
// Sort all the LMS-substrings.
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
for i = 0; i < n; i++ {
|
||||
SA[i] = 0
|
||||
}
|
||||
b = -1
|
||||
i = n - 1
|
||||
j = n
|
||||
m = 0
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
if b >= 0 {
|
||||
SA[b] = j
|
||||
}
|
||||
B[c1]--
|
||||
b = B[c1]
|
||||
j = i
|
||||
m++
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m > 1 {
|
||||
if flags&(16|32) > 0 {
|
||||
if flags&16 > 0 {
|
||||
D = make([]int, 2*k)
|
||||
} else {
|
||||
D = SA[bo-2*k:]
|
||||
}
|
||||
B[T[j+1]]++
|
||||
for i, j = 0, 0; i < k; i++ {
|
||||
j += C[i]
|
||||
if B[i] != j {
|
||||
SA[B[i]] += n
|
||||
}
|
||||
D[i] = 0
|
||||
D[i+k] = 0
|
||||
}
|
||||
sortLMS2_{{.Type}}(T, SA, C, B, D, n, k)
|
||||
name = postProcLMS2_{{.Type}}(SA, n, m)
|
||||
} else {
|
||||
sortLMS1_{{.Type}}(T, SA, C, B, n, k)
|
||||
name = postProcLMS1_{{.Type}}(T, SA, n, m)
|
||||
}
|
||||
} else if m == 1 {
|
||||
SA[b] = j + 1
|
||||
name = 1
|
||||
} else {
|
||||
name = 0
|
||||
}
|
||||
|
||||
// Stage 2: Solve the reduced problem.
|
||||
// Recurse if names are not yet unique.
|
||||
if name < m {
|
||||
newfs = n + fs - 2*m
|
||||
if flags&(1|4|8) == 0 {
|
||||
if k+name <= newfs {
|
||||
newfs -= k
|
||||
} else {
|
||||
flags |= 8
|
||||
}
|
||||
}
|
||||
RA = SA[m+newfs:]
|
||||
for i, j = m+(n>>1)-1, m-1; m <= i; i-- {
|
||||
if SA[i] != 0 {
|
||||
RA[j] = SA[i] - 1
|
||||
j--
|
||||
}
|
||||
}
|
||||
computeSA_int(RA, SA, newfs, m, name)
|
||||
|
||||
i = n - 1
|
||||
j = m - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
RA[j] = i + 1
|
||||
j--
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for i = 0; i < m; i++ {
|
||||
SA[i] = RA[SA[i]]
|
||||
}
|
||||
if flags&4 > 0 {
|
||||
B = make([]int, k)
|
||||
C = B
|
||||
}
|
||||
if flags&2 > 0 {
|
||||
B = make([]int, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 3: Induce the result for the original problem.
|
||||
if flags&8 > 0 {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
// Put all left-most S characters into their buckets.
|
||||
if m > 1 {
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
i = m - 1
|
||||
j = n
|
||||
p = SA[m-1]
|
||||
c1 = int(T[p])
|
||||
for {
|
||||
c0 = c1
|
||||
q = B[c0]
|
||||
for q < j {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
for {
|
||||
j--
|
||||
SA[j] = p
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
p = SA[i]
|
||||
if c1 = int(T[p]); c1 != c0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for j > 0 {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
}
|
||||
induceSA_{{.Type}}(T, SA, C, B, n, k)
|
||||
}
|
||||
`
|
|
@ -0,0 +1,332 @@
|
|||
// Copyright 2017 The go-github AUTHORS. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// gen-accessors generates accessor methods for structs with pointer fields.
|
||||
//
|
||||
// It is meant to be used by the go-github authors in conjunction with the
|
||||
// go generate tool before sending a commit to GitHub.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
const (
|
||||
fileSuffix = "-accessors.go"
|
||||
)
|
||||
|
||||
var (
|
||||
verbose = flag.Bool("v", false, "Print verbose log messages")
|
||||
|
||||
sourceTmpl = template.Must(template.New("source").Parse(source))
|
||||
|
||||
// blacklistStructMethod lists "struct.method" combos to skip.
|
||||
blacklistStructMethod = map[string]bool{
|
||||
"RepositoryContent.GetContent": true,
|
||||
"Client.GetBaseURL": true,
|
||||
"Client.GetUploadURL": true,
|
||||
"ErrorResponse.GetResponse": true,
|
||||
"RateLimitError.GetResponse": true,
|
||||
"AbuseRateLimitError.GetResponse": true,
|
||||
}
|
||||
// blacklistStruct lists structs to skip.
|
||||
blacklistStruct = map[string]bool{
|
||||
"Client": true,
|
||||
}
|
||||
)
|
||||
|
||||
func logf(fmt string, args ...interface{}) {
|
||||
if *verbose {
|
||||
log.Printf(fmt, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
fset := token.NewFileSet()
|
||||
|
||||
pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
for pkgName, pkg := range pkgs {
|
||||
t := &templateData{
|
||||
filename: pkgName + fileSuffix,
|
||||
Year: 2017,
|
||||
Package: pkgName,
|
||||
Imports: map[string]string{},
|
||||
}
|
||||
for filename, f := range pkg.Files {
|
||||
logf("Processing %v...", filename)
|
||||
if err := t.processAST(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := t.dump(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
logf("Done.")
|
||||
}
|
||||
|
||||
func (t *templateData) processAST(f *ast.File) error {
|
||||
for _, decl := range f.Decls {
|
||||
gd, ok := decl.(*ast.GenDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, spec := range gd.Specs {
|
||||
ts, ok := spec.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Skip unexported identifiers.
|
||||
if !ts.Name.IsExported() {
|
||||
logf("Struct %v is unexported; skipping.", ts.Name)
|
||||
continue
|
||||
}
|
||||
// Check if the struct is blacklisted.
|
||||
if blacklistStruct[ts.Name.Name] {
|
||||
logf("Struct %v is blacklisted; skipping.", ts.Name)
|
||||
continue
|
||||
}
|
||||
st, ok := ts.Type.(*ast.StructType)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, field := range st.Fields.List {
|
||||
se, ok := field.Type.(*ast.StarExpr)
|
||||
if len(field.Names) == 0 || !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := field.Names[0]
|
||||
// Skip unexported identifiers.
|
||||
if !fieldName.IsExported() {
|
||||
logf("Field %v is unexported; skipping.", fieldName)
|
||||
continue
|
||||
}
|
||||
// Check if "struct.method" is blacklisted.
|
||||
if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklistStructMethod[key] {
|
||||
logf("Method %v is blacklisted; skipping.", key)
|
||||
continue
|
||||
}
|
||||
|
||||
switch x := se.X.(type) {
|
||||
case *ast.ArrayType:
|
||||
t.addArrayType(x, ts.Name.String(), fieldName.String())
|
||||
case *ast.Ident:
|
||||
t.addIdent(x, ts.Name.String(), fieldName.String())
|
||||
case *ast.MapType:
|
||||
t.addMapType(x, ts.Name.String(), fieldName.String())
|
||||
case *ast.SelectorExpr:
|
||||
t.addSelectorExpr(x, ts.Name.String(), fieldName.String())
|
||||
default:
|
||||
logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sourceFilter(fi os.FileInfo) bool {
|
||||
return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix)
|
||||
}
|
||||
|
||||
func (t *templateData) dump() error {
|
||||
if len(t.Getters) == 0 {
|
||||
logf("No getters for %v; skipping.", t.filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort getters by ReceiverType.FieldName.
|
||||
sort.Sort(byName(t.Getters))
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := sourceTmpl.Execute(&buf, t); err != nil {
|
||||
return err
|
||||
}
|
||||
clean, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logf("Writing %v...", t.filename)
|
||||
return ioutil.WriteFile(t.filename, clean, 0644)
|
||||
}
|
||||
|
||||
func newGetter(receiverType, fieldName, fieldType, zeroValue string, namedStruct bool) *getter {
|
||||
return &getter{
|
||||
sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName),
|
||||
ReceiverVar: strings.ToLower(receiverType[:1]),
|
||||
ReceiverType: receiverType,
|
||||
FieldName: fieldName,
|
||||
FieldType: fieldType,
|
||||
ZeroValue: zeroValue,
|
||||
NamedStruct: namedStruct,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) {
|
||||
var eltType string
|
||||
switch elt := x.Elt.(type) {
|
||||
case *ast.Ident:
|
||||
eltType = elt.String()
|
||||
default:
|
||||
logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt)
|
||||
return
|
||||
}
|
||||
|
||||
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil", false))
|
||||
}
|
||||
|
||||
func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) {
|
||||
var zeroValue string
|
||||
var namedStruct = false
|
||||
switch x.String() {
|
||||
case "int", "int64":
|
||||
zeroValue = "0"
|
||||
case "string":
|
||||
zeroValue = `""`
|
||||
case "bool":
|
||||
zeroValue = "false"
|
||||
case "Timestamp":
|
||||
zeroValue = "Timestamp{}"
|
||||
default:
|
||||
zeroValue = "nil"
|
||||
namedStruct = true
|
||||
}
|
||||
|
||||
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue, namedStruct))
|
||||
}
|
||||
|
||||
func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) {
|
||||
var keyType string
|
||||
switch key := x.Key.(type) {
|
||||
case *ast.Ident:
|
||||
keyType = key.String()
|
||||
default:
|
||||
logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key)
|
||||
return
|
||||
}
|
||||
|
||||
var valueType string
|
||||
switch value := x.Value.(type) {
|
||||
case *ast.Ident:
|
||||
valueType = value.String()
|
||||
default:
|
||||
logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value)
|
||||
return
|
||||
}
|
||||
|
||||
fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType)
|
||||
zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType)
|
||||
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false))
|
||||
}
|
||||
|
||||
func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) {
|
||||
if strings.ToLower(fieldName[:1]) == fieldName[:1] { // Non-exported field.
|
||||
return
|
||||
}
|
||||
|
||||
var xX string
|
||||
if xx, ok := x.X.(*ast.Ident); ok {
|
||||
xX = xx.String()
|
||||
}
|
||||
|
||||
switch xX {
|
||||
case "time", "json":
|
||||
if xX == "json" {
|
||||
t.Imports["encoding/json"] = "encoding/json"
|
||||
} else {
|
||||
t.Imports[xX] = xX
|
||||
}
|
||||
fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name)
|
||||
zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name)
|
||||
if xX == "time" && x.Sel.Name == "Duration" {
|
||||
zeroValue = "0"
|
||||
}
|
||||
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false))
|
||||
default:
|
||||
logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x)
|
||||
}
|
||||
}
|
||||
|
||||
type templateData struct {
|
||||
filename string
|
||||
Year int
|
||||
Package string
|
||||
Imports map[string]string
|
||||
Getters []*getter
|
||||
}
|
||||
|
||||
type getter struct {
|
||||
sortVal string // Lower-case version of "ReceiverType.FieldName".
|
||||
ReceiverVar string // The one-letter variable name to match the ReceiverType.
|
||||
ReceiverType string
|
||||
FieldName string
|
||||
FieldType string
|
||||
ZeroValue string
|
||||
NamedStruct bool // Getter for named struct.
|
||||
}
|
||||
|
||||
type byName []*getter
|
||||
|
||||
func (b byName) Len() int { return len(b) }
|
||||
func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal }
|
||||
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
|
||||
const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by gen-accessors; DO NOT EDIT.
|
||||
|
||||
package {{.Package}}
|
||||
{{with .Imports}}
|
||||
import (
|
||||
{{- range . -}}
|
||||
"{{.}}"
|
||||
{{end -}}
|
||||
)
|
||||
{{end}}
|
||||
{{range .Getters}}
|
||||
{{if .NamedStruct}}
|
||||
// Get{{.FieldName}} returns the {{.FieldName}} field.
|
||||
func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() *{{.FieldType}} {
|
||||
if {{.ReceiverVar}} == nil {
|
||||
return {{.ZeroValue}}
|
||||
}
|
||||
return {{.ReceiverVar}}.{{.FieldName}}
|
||||
}
|
||||
{{else}}
|
||||
// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise.
|
||||
func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} {
|
||||
if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil {
|
||||
return {{.ZeroValue}}
|
||||
}
|
||||
return *{{.ReceiverVar}}.{{.FieldName}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
|
@ -0,0 +1,9 @@
|
|||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,64 @@
|
|||
# Gorilla WebSocket
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
|
||||
[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](http://godoc.org/github.com/gorilla/websocket)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
|
@ -0,0 +1,395 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,227 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Buffers
|
||||
//
|
||||
// Connections buffer network input and output to reduce the number
|
||||
// of system calls when reading or writing messages.
|
||||
//
|
||||
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
|
||||
// Section 5 for a discussion of message framing. A WebSocket frame header is
|
||||
// written to the network each time a write buffer is flushed to the network.
|
||||
// Decreasing the size of the write buffer can increase the amount of framing
|
||||
// overhead on the connection.
|
||||
//
|
||||
// The buffer sizes in bytes are specified by the ReadBufferSize and
|
||||
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
|
||||
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
|
||||
// buffers created by the HTTP server when a buffer size field is set to zero.
|
||||
// The HTTP server buffers have a size of 4096 at the time of this writing.
|
||||
//
|
||||
// The buffer sizes do not limit the size of a message that can be read or
|
||||
// written by a connection.
|
||||
//
|
||||
// Buffers are held for the lifetime of the connection by default. If the
|
||||
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
|
||||
// write buffer only when writing a message.
|
||||
//
|
||||
// Applications should tune the buffer sizes to balance memory use and
|
||||
// performance. Increasing the buffer size uses more memory, but can reduce the
|
||||
// number of system calls to read or write the network. In the case of writing,
|
||||
// increasing the buffer size can reduce the number of frame headers written to
|
||||
// the network.
|
||||
//
|
||||
// Some guidelines for setting buffer parameters are:
|
||||
//
|
||||
// Limit the buffer sizes to the maximum expected message size. Buffers larger
|
||||
// than the largest message do not provide any benefit.
|
||||
//
|
||||
// Depending on the distribution of message sizes, setting the buffer size to
|
||||
// to a value less than the maximum expected message size can greatly reduce
|
||||
// memory use with a small impact on performance. Here's an example: If 99% of
|
||||
// the messages are smaller than 256 bytes and the maximum message size is 512
|
||||
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
|
||||
// than a buffer size of 512 bytes. The memory savings is 50%.
|
||||
//
|
||||
// A write buffer pool is useful when the application has a modest number
|
||||
// writes over a large number of connections. when buffers are pooled, a larger
|
||||
// buffer size has a reduced impact on total memory use and has the benefit of
|
||||
// reducing system calls and frame overhead.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/gorilla/websocket
|
||||
|
||||
go 1.12
|
|
@ -0,0 +1,2 @@
|
|||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinMessages concatenates received messages to create a single io.Reader.
|
||||
// The string term is appended to each message. The returned reader does not
|
||||
// support concurrent calls to the Read method.
|
||||
func JoinMessages(c *Conn, term string) io.Reader {
|
||||
return &joinReader{c: c, term: term}
|
||||
}
|
||||
|
||||
type joinReader struct {
|
||||
c *Conn
|
||||
term string
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *joinReader) Read(p []byte) (int, error) {
|
||||
if r.r == nil {
|
||||
var err error
|
||||
_, r.r, err = r.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.term != "" {
|
||||
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
|
||||
}
|
||||
}
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
forwardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.forwardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
|
@ -0,0 +1,363 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
|
@ -0,0 +1,283 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Token octets per RFC 2616.
|
||||
var isTokenOctet = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
|
||||
// whitespace removed.
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if b := s[i]; b != ' ' && b != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
// nextToken returns the leading RFC 2616 token of s and the string following
|
||||
// the token.
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if !isTokenOctet[s[i]] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
|
||||
// and the string following the token or quoted string.
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
|
||||
// defined in RFC 4790.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -0,0 +1,473 @@
|
|||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -28,8 +28,9 @@ Here's a snippet demonstrating how this library is meant to be used:
|
|||
func httpClient() (*http.Client, error)
|
||||
tlsConfig := &tls.Config{}
|
||||
err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
|
||||
CAFile: os.Getenv("MYAPP_CAFILE"),
|
||||
CAPath: os.Getenv("MYAPP_CAPATH"),
|
||||
CAFile: os.Getenv("MYAPP_CAFILE"),
|
||||
CAPath: os.Getenv("MYAPP_CAPATH"),
|
||||
Certificate: os.Getenv("MYAPP_CERTIFICATE"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -3,21 +3,26 @@ package rootcerts
|
|||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Config determines where LoadCACerts will load certificates from. When both
|
||||
// CAFile and CAPath are blank, this library's functions will either load
|
||||
// Config determines where LoadCACerts will load certificates from. When CAFile,
|
||||
// CACertificate and CAPath are blank, this library's functions will either load
|
||||
// system roots explicitly and return them, or set the CertPool to nil to allow
|
||||
// Go's standard library to load system certs.
|
||||
type Config struct {
|
||||
// CAFile is a path to a PEM-encoded certificate file or bundle. Takes
|
||||
// precedence over CAPath.
|
||||
// precedence over CACertificate and CAPath.
|
||||
CAFile string
|
||||
|
||||
// CACertificate is a PEM-encoded certificate or bundle. Takes precedence
|
||||
// over CAPath.
|
||||
CACertificate []byte
|
||||
|
||||
// CAPath is a path to a directory populated with PEM-encoded certificates.
|
||||
CAPath string
|
||||
}
|
||||
|
@ -44,6 +49,9 @@ func LoadCACerts(c *Config) (*x509.CertPool, error) {
|
|||
if c.CAFile != "" {
|
||||
return LoadCAFile(c.CAFile)
|
||||
}
|
||||
if len(c.CACertificate) != 0 {
|
||||
return AppendCertificate(c.CACertificate)
|
||||
}
|
||||
if c.CAPath != "" {
|
||||
return LoadCAPath(c.CAPath)
|
||||
}
|
||||
|
@ -68,6 +76,18 @@ func LoadCAFile(caFile string) (*x509.CertPool, error) {
|
|||
return pool, nil
|
||||
}
|
||||
|
||||
// AppendCertificate appends an in-memory PEM-encoded certificate or bundle and returns a pool.
|
||||
func AppendCertificate(ca []byte) (*x509.CertPool, error) {
|
||||
pool := x509.NewCertPool()
|
||||
|
||||
ok := pool.AppendCertsFromPEM(ca)
|
||||
if !ok {
|
||||
return nil, errors.New("Error appending CA: Couldn't parse PEM")
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// LoadCAPath walks the provided path and loads all certificates encounted into
|
||||
// a pool.
|
||||
func LoadCAPath(caPath string) (*x509.CertPool, error) {
|
||||
|
|
|
@ -237,6 +237,57 @@ func (a *Agent) Health() (*AgentHealthResponse, error) {
|
|||
return nil, fmt.Errorf("unable to unmarshal response with status %d: %v", resp.StatusCode, err)
|
||||
}
|
||||
|
||||
// Monitor returns a channel which will receive streaming logs from the agent
|
||||
// Providing a non-nil stopCh can be used to close the connection and stop log streaming
|
||||
func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
||||
errCh := make(chan error, 1)
|
||||
r, err := a.client.newRequest("GET", "/v1/agent/monitor")
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
r.setQueryOptions(q)
|
||||
_, resp, err := requireOK(a.client.doRequest(r))
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
frames := make(chan *StreamFrame, 10)
|
||||
go func() {
|
||||
defer resp.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
close(frames)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Decode the next frame
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
close(frames)
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
// Discard heartbeat frame
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
frames <- &frame
|
||||
}
|
||||
}()
|
||||
|
||||
return frames, errCh
|
||||
}
|
||||
|
||||
// joinResponse is used to decode the response we get while
|
||||
// sending a member join request.
|
||||
type joinResponse struct {
|
||||
|
|
|
@ -1,9 +1,18 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -61,6 +70,219 @@ func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *Query
|
|||
return &resp, qm, nil
|
||||
}
|
||||
|
||||
// Exec is used to execute a command inside a running task. The command is to run inside
|
||||
// the task environment.
|
||||
//
|
||||
// The parameters are:
|
||||
// * ctx: context to set deadlines or timeout
|
||||
// * allocation: the allocation to execute command inside
|
||||
// * task: the task's name to execute command in
|
||||
// * tty: indicates whether to start a pseudo-tty for the command
|
||||
// * stdin, stdout, stderr: the std io to pass to command.
|
||||
// If tty is true, then streams need to point to a tty that's alive for the whole process
|
||||
// * terminalSizeCh: A channel to send new tty terminal sizes
|
||||
//
|
||||
// The call blocks until command terminates (or an error occurs), and returns the exit code.
|
||||
func (a *Allocations) Exec(ctx context.Context,
|
||||
alloc *Allocation, task string, tty bool, command []string,
|
||||
stdin io.Reader, stdout, stderr io.Writer,
|
||||
terminalSizeCh <-chan TerminalSize, q *QueryOptions) (exitCode int, err error) {
|
||||
|
||||
ctx, cancelFn := context.WithCancel(ctx)
|
||||
defer cancelFn()
|
||||
|
||||
errCh := make(chan error, 4)
|
||||
|
||||
sender, output := a.execFrames(ctx, alloc, task, tty, command, errCh, q)
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return -2, err
|
||||
default:
|
||||
}
|
||||
|
||||
// Errors resulting from sending input (in goroutines) are silently dropped.
|
||||
// To mitigate this, extra care is needed to distinguish between actual send errors
|
||||
// and from send errors due to command terminating and our race to detect failures.
|
||||
// If we have an actual network failure or send a bad input, we'd get an
|
||||
// error in the reading side of websocket.
|
||||
|
||||
go func() {
|
||||
|
||||
bytes := make([]byte, 2048)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
input := ExecStreamingInput{Stdin: &ExecStreamingIOOperation{}}
|
||||
|
||||
n, err := stdin.Read(bytes)
|
||||
|
||||
// always send data if we read some
|
||||
if n != 0 {
|
||||
input.Stdin.Data = bytes[:n]
|
||||
sender(&input)
|
||||
}
|
||||
|
||||
// then handle error
|
||||
if err == io.EOF {
|
||||
// if n != 0, send data and we'll get n = 0 on next read
|
||||
if n == 0 {
|
||||
input.Stdin.Close = true
|
||||
sender(&input)
|
||||
return
|
||||
}
|
||||
} else if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// forwarding terminal size
|
||||
go func() {
|
||||
for {
|
||||
resizeInput := ExecStreamingInput{}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case size, ok := <-terminalSizeCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
resizeInput.TTYSize = &size
|
||||
sender(&resizeInput)
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// send a heartbeat every 10 seconds
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
// heartbeat message
|
||||
case <-time.After(10 * time.Second):
|
||||
sender(&execStreamingInputHeartbeat)
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
// drop websocket code, not relevant to user
|
||||
if wsErr, ok := err.(*websocket.CloseError); ok && wsErr.Text != "" {
|
||||
return -2, errors.New(wsErr.Text)
|
||||
}
|
||||
return -2, err
|
||||
case <-ctx.Done():
|
||||
return -2, ctx.Err()
|
||||
case frame, ok := <-output:
|
||||
if !ok {
|
||||
return -2, errors.New("disconnected without receiving the exit code")
|
||||
}
|
||||
|
||||
switch {
|
||||
case frame.Stdout != nil:
|
||||
if len(frame.Stdout.Data) != 0 {
|
||||
stdout.Write(frame.Stdout.Data)
|
||||
}
|
||||
// don't really do anything if stdout is closing
|
||||
case frame.Stderr != nil:
|
||||
if len(frame.Stderr.Data) != 0 {
|
||||
stderr.Write(frame.Stderr.Data)
|
||||
}
|
||||
// don't really do anything if stderr is closing
|
||||
case frame.Exited && frame.Result != nil:
|
||||
return frame.Result.ExitCode, nil
|
||||
default:
|
||||
// noop - heartbeat
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Allocations) execFrames(ctx context.Context, alloc *Allocation, task string, tty bool, command []string,
|
||||
errCh chan<- error, q *QueryOptions) (sendFn func(*ExecStreamingInput) error, output <-chan *ExecStreamingOutput) {
|
||||
nodeClient, _ := a.client.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
commandBytes, err := json.Marshal(command)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("failed to marshal command: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
q.Params["tty"] = strconv.FormatBool(tty)
|
||||
q.Params["task"] = task
|
||||
q.Params["command"] = string(commandBytes)
|
||||
|
||||
reqPath := fmt.Sprintf("/v1/client/allocation/%s/exec", alloc.ID)
|
||||
|
||||
var conn *websocket.Conn
|
||||
|
||||
if nodeClient != nil {
|
||||
conn, _, err = nodeClient.websocket(reqPath, q)
|
||||
if _, ok := err.(net.Error); err != nil && !ok {
|
||||
errCh <- err
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
if conn == nil {
|
||||
conn, _, err = a.client.websocket(reqPath, q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create the output channel
|
||||
frames := make(chan *ExecStreamingOutput, 10)
|
||||
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
for ctx.Err() == nil {
|
||||
|
||||
// Decode the next frame
|
||||
var frame ExecStreamingOutput
|
||||
err := conn.ReadJSON(&frame)
|
||||
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
|
||||
close(frames)
|
||||
return
|
||||
} else if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
frames <- &frame
|
||||
}
|
||||
}()
|
||||
|
||||
var sendLock sync.Mutex
|
||||
send := func(v *ExecStreamingInput) error {
|
||||
sendLock.Lock()
|
||||
defer sendLock.Unlock()
|
||||
|
||||
return conn.WriteJSON(v)
|
||||
}
|
||||
|
||||
return send, frames
|
||||
|
||||
}
|
||||
|
||||
func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) {
|
||||
var resp AllocResourceUsage
|
||||
path := fmt.Sprintf("/v1/client/allocation/%s/stats", alloc.ID)
|
||||
|
@ -89,6 +311,36 @@ func (a *Allocations) Restart(alloc *Allocation, taskName string, q *QueryOption
|
|||
return err
|
||||
}
|
||||
|
||||
func (a *Allocations) Stop(alloc *Allocation, q *QueryOptions) (*AllocStopResponse, error) {
|
||||
var resp AllocStopResponse
|
||||
_, err := a.client.putQuery("/v1/allocation/"+alloc.ID+"/stop", nil, &resp, q)
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
// AllocStopResponse is the response to an `AllocStopRequest`
|
||||
type AllocStopResponse struct {
|
||||
// EvalID is the id of the follow up evalution for the rescheduled alloc.
|
||||
EvalID string
|
||||
|
||||
WriteMeta
|
||||
}
|
||||
|
||||
func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal string) error {
|
||||
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := AllocSignalRequest{
|
||||
Signal: signal,
|
||||
Task: task,
|
||||
}
|
||||
|
||||
var resp GenericResponse
|
||||
_, err = nodeClient.putQuery("/v1/client/allocation/"+alloc.ID+"/signal", &req, &resp, q)
|
||||
return err
|
||||
}
|
||||
|
||||
// Allocation is used for serialization of allocations.
|
||||
type Allocation struct {
|
||||
ID string
|
||||
|
@ -155,28 +407,30 @@ type NodeScoreMeta struct {
|
|||
// AllocationListStub is used to return a subset of an allocation
|
||||
// during list operations.
|
||||
type AllocationListStub struct {
|
||||
ID string
|
||||
EvalID string
|
||||
Name string
|
||||
Namespace string
|
||||
NodeID string
|
||||
NodeName string
|
||||
JobID string
|
||||
JobType string
|
||||
JobVersion uint64
|
||||
TaskGroup string
|
||||
DesiredStatus string
|
||||
DesiredDescription string
|
||||
ClientStatus string
|
||||
ClientDescription string
|
||||
TaskStates map[string]*TaskState
|
||||
DeploymentStatus *AllocDeploymentStatus
|
||||
FollowupEvalID string
|
||||
RescheduleTracker *RescheduleTracker
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
CreateTime int64
|
||||
ModifyTime int64
|
||||
ID string
|
||||
EvalID string
|
||||
Name string
|
||||
Namespace string
|
||||
NodeID string
|
||||
NodeName string
|
||||
JobID string
|
||||
JobType string
|
||||
JobVersion uint64
|
||||
TaskGroup string
|
||||
DesiredStatus string
|
||||
DesiredDescription string
|
||||
ClientStatus string
|
||||
ClientDescription string
|
||||
TaskStates map[string]*TaskState
|
||||
DeploymentStatus *AllocDeploymentStatus
|
||||
FollowupEvalID string
|
||||
RescheduleTracker *RescheduleTracker
|
||||
PreemptedAllocations []string
|
||||
PreemptedByAllocation string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
CreateTime int64
|
||||
ModifyTime int64
|
||||
}
|
||||
|
||||
// AllocDeploymentStatus captures the status of the allocation as part of the
|
||||
|
@ -201,7 +455,8 @@ type AllocatedTaskResources struct {
|
|||
}
|
||||
|
||||
type AllocatedSharedResources struct {
|
||||
DiskMB int64
|
||||
DiskMB int64
|
||||
Networks []*NetworkResource
|
||||
}
|
||||
|
||||
type AllocatedCpuResources struct {
|
||||
|
@ -260,6 +515,17 @@ type AllocationRestartRequest struct {
|
|||
TaskName string
|
||||
}
|
||||
|
||||
type AllocSignalRequest struct {
|
||||
Task string
|
||||
Signal string
|
||||
}
|
||||
|
||||
// GenericResponse is used to respond to a request where no
|
||||
// specific response information is needed.
|
||||
type GenericResponse struct {
|
||||
WriteMeta
|
||||
}
|
||||
|
||||
// RescheduleTracker encapsulates previous reschedule events
|
||||
type RescheduleTracker struct {
|
||||
Events []*RescheduleEvent
|
||||
|
@ -294,3 +560,42 @@ type DesiredTransition struct {
|
|||
func (d DesiredTransition) ShouldMigrate() bool {
|
||||
return d.Migrate != nil && *d.Migrate
|
||||
}
|
||||
|
||||
// ExecStreamingIOOperation represents a stream write operation: either appending data or close (exclusively)
|
||||
type ExecStreamingIOOperation struct {
|
||||
Data []byte `json:"data,omitempty"`
|
||||
Close bool `json:"close,omitempty"`
|
||||
}
|
||||
|
||||
// TerminalSize represents the size of the terminal
|
||||
type TerminalSize struct {
|
||||
Height int `json:"height,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
}
|
||||
|
||||
var execStreamingInputHeartbeat = ExecStreamingInput{}
|
||||
|
||||
// ExecStreamingInput represents user input to be sent to nomad exec handler.
|
||||
//
|
||||
// At most one field should be set.
|
||||
type ExecStreamingInput struct {
|
||||
Stdin *ExecStreamingIOOperation `json:"stdin,omitempty"`
|
||||
TTYSize *TerminalSize `json:"tty_size,omitempty"`
|
||||
}
|
||||
|
||||
// ExecStreamingExitResults captures the exit code of just completed nomad exec command
|
||||
type ExecStreamingExitResult struct {
|
||||
ExitCode int `json:"exit_code"`
|
||||
}
|
||||
|
||||
// ExecStreamingInput represents an output streaming entity, e.g. stdout/stderr update or termination
|
||||
//
|
||||
// At most one of these fields should be set: `Stdout`, `Stderr`, or `Result`.
|
||||
// If `Exited` is true, then `Result` is non-nil, and other fields are nil.
|
||||
type ExecStreamingOutput struct {
|
||||
Stdout *ExecStreamingIOOperation `json:"stdout,omitempty"`
|
||||
Stderr *ExecStreamingIOOperation `json:"stderr,omitempty"`
|
||||
|
||||
Exited bool `json:"exited,omitempty"`
|
||||
Result *ExecStreamingExitResult `json:"result,omitempty"`
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
rootcerts "github.com/hashicorp/go-rootcerts"
|
||||
)
|
||||
|
@ -120,8 +121,11 @@ type Config struct {
|
|||
// Namespace to use. If not provided the default namespace is used.
|
||||
Namespace string
|
||||
|
||||
// httpClient is the client to use. Default will be used if not provided.
|
||||
httpClient *http.Client
|
||||
// HttpClient is the client to use. Default will be used if not provided.
|
||||
//
|
||||
// If set, it expected to be configured for tls already, and TLSConfig is ignored.
|
||||
// You may use ConfigureTLS() function to aid with initialization.
|
||||
HttpClient *http.Client
|
||||
|
||||
// HttpAuth is the auth info to use for http access.
|
||||
HttpAuth *HttpBasicAuth
|
||||
|
@ -131,7 +135,9 @@ type Config struct {
|
|||
WaitTime time.Duration
|
||||
|
||||
// TLSConfig provides the various TLS related configurations for the http
|
||||
// client
|
||||
// client.
|
||||
//
|
||||
// TLSConfig is ignored if HttpClient is set.
|
||||
TLSConfig *TLSConfig
|
||||
}
|
||||
|
||||
|
@ -142,12 +148,11 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config {
|
|||
if tlsEnabled {
|
||||
scheme = "https"
|
||||
}
|
||||
defaultConfig := DefaultConfig()
|
||||
config := &Config{
|
||||
Address: fmt.Sprintf("%s://%s", scheme, address),
|
||||
Region: region,
|
||||
Namespace: c.Namespace,
|
||||
httpClient: defaultConfig.httpClient,
|
||||
HttpClient: c.HttpClient,
|
||||
SecretID: c.SecretID,
|
||||
HttpAuth: c.HttpAuth,
|
||||
WaitTime: c.WaitTime,
|
||||
|
@ -173,12 +178,22 @@ type TLSConfig struct {
|
|||
// the Nomad server SSL certificate.
|
||||
CAPath string
|
||||
|
||||
// CACertPem is the PEM-encoded CA cert to use to verify the Nomad server
|
||||
// SSL certificate.
|
||||
CACertPEM []byte
|
||||
|
||||
// ClientCert is the path to the certificate for Nomad communication
|
||||
ClientCert string
|
||||
|
||||
// ClientCertPEM is the PEM-encoded certificate for Nomad communication
|
||||
ClientCertPEM []byte
|
||||
|
||||
// ClientKey is the path to the private key for Nomad communication
|
||||
ClientKey string
|
||||
|
||||
// ClientKeyPEM is the PEM-encoded private key for Nomad communication
|
||||
ClientKeyPEM []byte
|
||||
|
||||
// TLSServerName, if set, is used to set the SNI host when connecting via
|
||||
// TLS.
|
||||
TLSServerName string
|
||||
|
@ -197,19 +212,23 @@ func (t *TLSConfig) Copy() *TLSConfig {
|
|||
return nt
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the client
|
||||
func DefaultConfig() *Config {
|
||||
config := &Config{
|
||||
Address: "http://127.0.0.1:4646",
|
||||
httpClient: cleanhttp.DefaultClient(),
|
||||
TLSConfig: &TLSConfig{},
|
||||
}
|
||||
transport := config.httpClient.Transport.(*http.Transport)
|
||||
func defaultHttpClient() *http.Client {
|
||||
httpClient := cleanhttp.DefaultClient()
|
||||
transport := httpClient.Transport.(*http.Transport)
|
||||
transport.TLSHandshakeTimeout = 10 * time.Second
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
return httpClient
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the client
|
||||
func DefaultConfig() *Config {
|
||||
config := &Config{
|
||||
Address: "http://127.0.0.1:4646",
|
||||
TLSConfig: &TLSConfig{},
|
||||
}
|
||||
if addr := os.Getenv("NOMAD_ADDR"); addr != "" {
|
||||
config.Address = addr
|
||||
}
|
||||
|
@ -248,6 +267,9 @@ func DefaultConfig() *Config {
|
|||
if v := os.Getenv("NOMAD_CLIENT_KEY"); v != "" {
|
||||
config.TLSConfig.ClientKey = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_TLS_SERVER_NAME"); v != "" {
|
||||
config.TLSConfig.TLSServerName = v
|
||||
}
|
||||
if v := os.Getenv("NOMAD_SKIP_VERIFY"); v != "" {
|
||||
if insecure, err := strconv.ParseBool(v); err == nil {
|
||||
config.TLSConfig.Insecure = insecure
|
||||
|
@ -259,49 +281,83 @@ func DefaultConfig() *Config {
|
|||
return config
|
||||
}
|
||||
|
||||
// SetTimeout is used to place a timeout for connecting to Nomad. A negative
|
||||
// duration is ignored, a duration of zero means no timeout, and any other value
|
||||
// will add a timeout.
|
||||
func (c *Config) SetTimeout(t time.Duration) error {
|
||||
if c == nil {
|
||||
return fmt.Errorf("nil config")
|
||||
} else if c.httpClient == nil {
|
||||
return fmt.Errorf("nil HTTP client")
|
||||
} else if c.httpClient.Transport == nil {
|
||||
return fmt.Errorf("nil HTTP client transport")
|
||||
// cloneWithTimeout returns a cloned httpClient with set timeout if positive;
|
||||
// otherwise, returns the same client
|
||||
func cloneWithTimeout(httpClient *http.Client, t time.Duration) (*http.Client, error) {
|
||||
if httpClient == nil {
|
||||
return nil, fmt.Errorf("nil HTTP client")
|
||||
} else if httpClient.Transport == nil {
|
||||
return nil, fmt.Errorf("nil HTTP client transport")
|
||||
}
|
||||
|
||||
// Apply a timeout.
|
||||
if t.Nanoseconds() >= 0 {
|
||||
transport, ok := c.httpClient.Transport.(*http.Transport)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected HTTP transport: %T", c.httpClient.Transport)
|
||||
}
|
||||
|
||||
transport.DialContext = (&net.Dialer{
|
||||
Timeout: t,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext
|
||||
if t.Nanoseconds() < 0 {
|
||||
return httpClient, nil
|
||||
}
|
||||
|
||||
return nil
|
||||
tr, ok := httpClient.Transport.(*http.Transport)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected HTTP transport: %T", httpClient.Transport)
|
||||
}
|
||||
|
||||
// copy all public fields, to avoid copying transient state and locks
|
||||
ntr := &http.Transport{
|
||||
Proxy: tr.Proxy,
|
||||
DialContext: tr.DialContext,
|
||||
Dial: tr.Dial,
|
||||
DialTLS: tr.DialTLS,
|
||||
TLSClientConfig: tr.TLSClientConfig,
|
||||
TLSHandshakeTimeout: tr.TLSHandshakeTimeout,
|
||||
DisableKeepAlives: tr.DisableKeepAlives,
|
||||
DisableCompression: tr.DisableCompression,
|
||||
MaxIdleConns: tr.MaxIdleConns,
|
||||
MaxIdleConnsPerHost: tr.MaxIdleConnsPerHost,
|
||||
MaxConnsPerHost: tr.MaxConnsPerHost,
|
||||
IdleConnTimeout: tr.IdleConnTimeout,
|
||||
ResponseHeaderTimeout: tr.ResponseHeaderTimeout,
|
||||
ExpectContinueTimeout: tr.ExpectContinueTimeout,
|
||||
TLSNextProto: tr.TLSNextProto,
|
||||
ProxyConnectHeader: tr.ProxyConnectHeader,
|
||||
MaxResponseHeaderBytes: tr.MaxResponseHeaderBytes,
|
||||
}
|
||||
|
||||
// apply timeout
|
||||
ntr.DialContext = (&net.Dialer{
|
||||
Timeout: t,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext
|
||||
|
||||
// clone http client with new transport
|
||||
nc := *httpClient
|
||||
nc.Transport = ntr
|
||||
return &nc, nil
|
||||
}
|
||||
|
||||
// ConfigureTLS applies a set of TLS configurations to the the HTTP client.
|
||||
func (c *Config) ConfigureTLS() error {
|
||||
if c.TLSConfig == nil {
|
||||
func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error {
|
||||
if tlsConfig == nil {
|
||||
return nil
|
||||
}
|
||||
if c.httpClient == nil {
|
||||
if httpClient == nil {
|
||||
return fmt.Errorf("config HTTP Client must be set")
|
||||
}
|
||||
|
||||
var clientCert tls.Certificate
|
||||
foundClientCert := false
|
||||
if c.TLSConfig.ClientCert != "" || c.TLSConfig.ClientKey != "" {
|
||||
if c.TLSConfig.ClientCert != "" && c.TLSConfig.ClientKey != "" {
|
||||
if tlsConfig.ClientCert != "" || tlsConfig.ClientKey != "" {
|
||||
if tlsConfig.ClientCert != "" && tlsConfig.ClientKey != "" {
|
||||
var err error
|
||||
clientCert, err = tls.LoadX509KeyPair(c.TLSConfig.ClientCert, c.TLSConfig.ClientKey)
|
||||
clientCert, err = tls.LoadX509KeyPair(tlsConfig.ClientCert, tlsConfig.ClientKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundClientCert = true
|
||||
} else {
|
||||
return fmt.Errorf("Both client cert and client key must be provided")
|
||||
}
|
||||
} else if len(tlsConfig.ClientCertPEM) != 0 || len(tlsConfig.ClientKeyPEM) != 0 {
|
||||
if len(tlsConfig.ClientCertPEM) != 0 && len(tlsConfig.ClientKeyPEM) != 0 {
|
||||
var err error
|
||||
clientCert, err = tls.X509KeyPair(tlsConfig.ClientCertPEM, tlsConfig.ClientKeyPEM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -311,22 +367,23 @@ func (c *Config) ConfigureTLS() error {
|
|||
}
|
||||
}
|
||||
|
||||
clientTLSConfig := c.httpClient.Transport.(*http.Transport).TLSClientConfig
|
||||
clientTLSConfig := httpClient.Transport.(*http.Transport).TLSClientConfig
|
||||
rootConfig := &rootcerts.Config{
|
||||
CAFile: c.TLSConfig.CACert,
|
||||
CAPath: c.TLSConfig.CAPath,
|
||||
CAFile: tlsConfig.CACert,
|
||||
CAPath: tlsConfig.CAPath,
|
||||
CACertificate: tlsConfig.CACertPEM,
|
||||
}
|
||||
if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientTLSConfig.InsecureSkipVerify = c.TLSConfig.Insecure
|
||||
clientTLSConfig.InsecureSkipVerify = tlsConfig.Insecure
|
||||
|
||||
if foundClientCert {
|
||||
clientTLSConfig.Certificates = []tls.Certificate{clientCert}
|
||||
}
|
||||
if c.TLSConfig.TLSServerName != "" {
|
||||
clientTLSConfig.ServerName = c.TLSConfig.TLSServerName
|
||||
if tlsConfig.TLSServerName != "" {
|
||||
clientTLSConfig.ServerName = tlsConfig.TLSServerName
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -334,7 +391,8 @@ func (c *Config) ConfigureTLS() error {
|
|||
|
||||
// Client provides a client to the Nomad API
|
||||
type Client struct {
|
||||
config Config
|
||||
httpClient *http.Client
|
||||
config Config
|
||||
}
|
||||
|
||||
// NewClient returns a new client
|
||||
|
@ -348,17 +406,17 @@ func NewClient(config *Config) (*Client, error) {
|
|||
return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err)
|
||||
}
|
||||
|
||||
if config.httpClient == nil {
|
||||
config.httpClient = defConfig.httpClient
|
||||
}
|
||||
|
||||
// Configure the TLS configurations
|
||||
if err := config.ConfigureTLS(); err != nil {
|
||||
return nil, err
|
||||
httpClient := config.HttpClient
|
||||
if httpClient == nil {
|
||||
httpClient = defaultHttpClient()
|
||||
if err := ConfigureTLS(httpClient, config.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
config: *config,
|
||||
config: *config,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
@ -420,15 +478,20 @@ func (c *Client) getNodeClientImpl(nodeID string, timeout time.Duration, q *Quer
|
|||
// If the client is configured for a particular region use that
|
||||
region = c.config.Region
|
||||
default:
|
||||
// No region information is given so use the default.
|
||||
region = "global"
|
||||
// No region information is given so use GlobalRegion as the default.
|
||||
region = GlobalRegion
|
||||
}
|
||||
|
||||
// Get an API client for the node
|
||||
conf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled)
|
||||
|
||||
// Set the timeout
|
||||
conf.SetTimeout(timeout)
|
||||
// set timeout - preserve old behavior where errors are ignored and use untimed one
|
||||
httpClient, err := cloneWithTimeout(c.httpClient, timeout)
|
||||
// on error, fallback to using current http client
|
||||
if err != nil {
|
||||
httpClient = c.httpClient
|
||||
}
|
||||
conf.HttpClient = httpClient
|
||||
|
||||
return NewClient(conf)
|
||||
}
|
||||
|
@ -554,10 +617,11 @@ func (c *Client) newRequest(method, path string) (*request, error) {
|
|||
config: &c.config,
|
||||
method: method,
|
||||
url: &url.URL{
|
||||
Scheme: base.Scheme,
|
||||
User: base.User,
|
||||
Host: base.Host,
|
||||
Path: u.Path,
|
||||
Scheme: base.Scheme,
|
||||
User: base.User,
|
||||
Host: base.Host,
|
||||
Path: u.Path,
|
||||
RawPath: u.RawPath,
|
||||
},
|
||||
params: make(map[string][]string),
|
||||
}
|
||||
|
@ -611,7 +675,7 @@ func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
|||
return 0, nil, err
|
||||
}
|
||||
start := time.Now()
|
||||
resp, err := c.config.httpClient.Do(req)
|
||||
resp, err := c.httpClient.Do(req)
|
||||
diff := time.Now().Sub(start)
|
||||
|
||||
// If the response is compressed, we swap the body's reader.
|
||||
|
@ -655,6 +719,72 @@ func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, erro
|
|||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// websocket makes a websocket request to the specific endpoint
|
||||
func (c *Client) websocket(endpoint string, q *QueryOptions) (*websocket.Conn, *http.Response, error) {
|
||||
|
||||
transport, ok := c.httpClient.Transport.(*http.Transport)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("unsupported transport")
|
||||
}
|
||||
dialer := websocket.Dialer{
|
||||
ReadBufferSize: 4096,
|
||||
WriteBufferSize: 4096,
|
||||
HandshakeTimeout: c.httpClient.Timeout,
|
||||
|
||||
// values to inherit from http client configuration
|
||||
NetDial: transport.Dial,
|
||||
NetDialContext: transport.DialContext,
|
||||
Proxy: transport.Proxy,
|
||||
TLSClientConfig: transport.TLSClientConfig,
|
||||
}
|
||||
|
||||
// build request object for header and parameters
|
||||
r, err := c.newRequest("GET", endpoint)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r.setQueryOptions(q)
|
||||
|
||||
rhttp, err := r.toHTTP()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// convert scheme
|
||||
wsScheme := ""
|
||||
switch rhttp.URL.Scheme {
|
||||
case "http":
|
||||
wsScheme = "ws"
|
||||
case "https":
|
||||
wsScheme = "wss"
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported scheme: %v", rhttp.URL.Scheme)
|
||||
}
|
||||
rhttp.URL.Scheme = wsScheme
|
||||
|
||||
conn, resp, err := dialer.Dial(rhttp.URL.String(), rhttp.Header)
|
||||
|
||||
// check resp status code, as it's more informative than handshake error we get from ws library
|
||||
if resp != nil && resp.StatusCode != 101 {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
greader, err := gzip.NewReader(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
|
||||
}
|
||||
io.Copy(&buf, greader)
|
||||
} else {
|
||||
io.Copy(&buf, resp.Body)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
return nil, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||
}
|
||||
|
||||
return conn, resp, err
|
||||
}
|
||||
|
||||
// query is used to do a GET request against an endpoint
|
||||
// and deserialize the response into an interface using
|
||||
// standard Nomad conventions.
|
||||
|
|
|
@ -1,5 +1,18 @@
|
|||
package api
|
||||
|
||||
const (
|
||||
ConstraintDistinctProperty = "distinct_property"
|
||||
ConstraintDistinctHosts = "distinct_hosts"
|
||||
ConstraintRegex = "regexp"
|
||||
ConstraintVersion = "version"
|
||||
ConstraintSemver = "semver"
|
||||
ConstraintSetContains = "set_contains"
|
||||
ConstraintSetContainsAll = "set_contains_all"
|
||||
ConstraintSetContainsAny = "set_contains_any"
|
||||
ConstraintAttributeIsSet = "is_set"
|
||||
ConstraintAttributeIsNotSet = "is_not_set"
|
||||
)
|
||||
|
||||
// Constraint is used to serialize a job placement constraint.
|
||||
type Constraint struct {
|
||||
LTarget string
|
||||
|
|
|
@ -80,6 +80,8 @@ type Evaluation struct {
|
|||
SnapshotIndex uint64
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
CreateTime int64
|
||||
ModifyTime int64
|
||||
}
|
||||
|
||||
// EvalIndexSort is a wrapper to sort evaluations by CreateIndex.
|
||||
|
|
|
@ -20,11 +20,12 @@ const (
|
|||
|
||||
// AllocFileInfo holds information about a file inside the AllocDir
|
||||
type AllocFileInfo struct {
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// StreamFrame is used to frame data of a file when streaming
|
||||
|
@ -91,72 +92,24 @@ func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocF
|
|||
// ReadAt is used to read bytes at a given offset until limit at the given path
|
||||
// in an allocation directory. If limit is <= 0, there is no limit.
|
||||
func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) {
|
||||
nodeClient, err := a.client.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
q.Params["limit"] = strconv.FormatInt(limit, 10)
|
||||
|
||||
reqPath := fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID)
|
||||
r, err := nodeClient.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
// There was a networking error when talking directly to the client.
|
||||
if _, ok := err.(net.Error); !ok {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try via the server
|
||||
r, err = a.client.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
return queryClientNode(a.client, alloc, reqPath, q,
|
||||
func(q *QueryOptions) {
|
||||
q.Params["path"] = path
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
q.Params["limit"] = strconv.FormatInt(limit, 10)
|
||||
})
|
||||
}
|
||||
|
||||
// Cat is used to read contents of a file at the given path in an allocation
|
||||
// directory
|
||||
func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) {
|
||||
nodeClient, err := a.client.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
reqPath := fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID)
|
||||
r, err := nodeClient.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
// There was a networking error when talking directly to the client.
|
||||
if _, ok := err.(net.Error); !ok {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try via the server
|
||||
r, err = a.client.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
return queryClientNode(a.client, alloc, reqPath, q,
|
||||
func(q *QueryOptions) {
|
||||
q.Params["path"] = path
|
||||
})
|
||||
}
|
||||
|
||||
// Stream streams the content of a file blocking on EOF.
|
||||
|
@ -171,40 +124,19 @@ func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64,
|
|||
cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
nodeClient, err := a.client.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
|
||||
|
||||
reqPath := fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID)
|
||||
r, err := queryClientNode(a.client, alloc, reqPath, q,
|
||||
func(q *QueryOptions) {
|
||||
q.Params["path"] = path
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
q.Params["origin"] = origin
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["path"] = path
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
q.Params["origin"] = origin
|
||||
|
||||
reqPath := fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID)
|
||||
r, err := nodeClient.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
// There was a networking error when talking directly to the client.
|
||||
if _, ok := err.(net.Error); !ok {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
// Try via the server
|
||||
r, err = a.client.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
}
|
||||
|
||||
// Create the output channel
|
||||
frames := make(chan *StreamFrame, 10)
|
||||
|
||||
|
@ -243,6 +175,40 @@ func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64,
|
|||
return frames, errCh
|
||||
}
|
||||
|
||||
func queryClientNode(c *Client, alloc *Allocation, reqPath string, q *QueryOptions, customizeQ func(*QueryOptions)) (io.ReadCloser, error) {
|
||||
nodeClient, _ := c.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
if customizeQ != nil {
|
||||
customizeQ(q)
|
||||
}
|
||||
|
||||
var r io.ReadCloser
|
||||
var err error
|
||||
|
||||
if nodeClient != nil {
|
||||
r, err = nodeClient.rawQuery(reqPath, q)
|
||||
if _, ok := err.(net.Error); err != nil && !ok {
|
||||
// found a non networking error talking to client directly
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// failed to query node, access through server directly
|
||||
// or network error when talking to the client directly
|
||||
if r == nil {
|
||||
return c.rawQuery(reqPath, q)
|
||||
}
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
// Logs streams the content of a tasks logs blocking on EOF.
|
||||
// The parameters are:
|
||||
// * allocation: the allocation to stream from.
|
||||
|
@ -263,42 +229,20 @@ func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin str
|
|||
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
nodeClient, err := a.client.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
|
||||
reqPath := fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID)
|
||||
r, err := queryClientNode(a.client, alloc, reqPath, q,
|
||||
func(q *QueryOptions) {
|
||||
q.Params["follow"] = strconv.FormatBool(follow)
|
||||
q.Params["task"] = task
|
||||
q.Params["type"] = logType
|
||||
q.Params["origin"] = origin
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
q = &QueryOptions{}
|
||||
}
|
||||
if q.Params == nil {
|
||||
q.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
q.Params["follow"] = strconv.FormatBool(follow)
|
||||
q.Params["task"] = task
|
||||
q.Params["type"] = logType
|
||||
q.Params["origin"] = origin
|
||||
q.Params["offset"] = strconv.FormatInt(offset, 10)
|
||||
|
||||
reqPath := fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID)
|
||||
r, err := nodeClient.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
// There was a networking error when talking directly to the client.
|
||||
if _, ok := err.(net.Error); !ok {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
|
||||
// Try via the server
|
||||
r, err = a.client.rawQuery(reqPath, q)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return nil, errCh
|
||||
}
|
||||
}
|
||||
|
||||
// Create the output channel
|
||||
frames := make(chan *StreamFrame, 10)
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@ go 1.12
|
|||
require (
|
||||
github.com/docker/go-units v0.3.3
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
github.com/hashicorp/go-rootcerts v1.0.0
|
||||
github.com/hashicorp/go-uuid v1.0.1
|
||||
github.com/hashicorp/go-rootcerts v1.0.2
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/mitchellh/go-testing-interface v1.0.0
|
||||
github.com/stretchr/testify v1.3.0
|
||||
|
|
|
@ -4,12 +4,14 @@ github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk
|
|||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
|
@ -17,6 +19,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
|
|
|
@ -25,6 +25,13 @@ const (
|
|||
|
||||
// DefaultNamespace is the default namespace.
|
||||
DefaultNamespace = "default"
|
||||
|
||||
// For Job configuration, GlobalRegion is a sentinel region value
|
||||
// that users may specify to indicate the job should be run on
|
||||
// the region of the node that the job was submitted to.
|
||||
// For Client configuration, if no region information is given,
|
||||
// the client node will default to be part of the GlobalRegion.
|
||||
GlobalRegion = "global"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -139,7 +146,7 @@ func (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) {
|
|||
// job given its unique ID.
|
||||
func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {
|
||||
var resp Job
|
||||
qm, err := j.client.query("/v1/job/"+jobID, &resp, q)
|
||||
qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -150,7 +157,7 @@ func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {
|
|||
// unique ID.
|
||||
func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
|
||||
var resp JobVersionsResponse
|
||||
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", jobID, diffs), &resp, q)
|
||||
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", url.PathEscape(jobID), diffs), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
@ -160,7 +167,7 @@ func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*J
|
|||
// Allocations is used to return the allocs for a given job ID.
|
||||
func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {
|
||||
var resp []*AllocationListStub
|
||||
u, err := url.Parse("/v1/job/" + jobID + "/allocations")
|
||||
u, err := url.Parse("/v1/job/" + url.PathEscape(jobID) + "/allocations")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -179,9 +186,17 @@ func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*Al
|
|||
|
||||
// Deployments is used to query the deployments associated with the given job
|
||||
// ID.
|
||||
func (j *Jobs) Deployments(jobID string, q *QueryOptions) ([]*Deployment, *QueryMeta, error) {
|
||||
func (j *Jobs) Deployments(jobID string, all bool, q *QueryOptions) ([]*Deployment, *QueryMeta, error) {
|
||||
var resp []*Deployment
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/deployments", &resp, q)
|
||||
u, err := url.Parse("/v1/job/" + url.PathEscape(jobID) + "/deployments")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
v := u.Query()
|
||||
v.Add("all", strconv.FormatBool(all))
|
||||
u.RawQuery = v.Encode()
|
||||
qm, err := j.client.query(u.String(), &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -193,7 +208,7 @@ func (j *Jobs) Deployments(jobID string, q *QueryOptions) ([]*Deployment, *Query
|
|||
// the given job ID.
|
||||
func (j *Jobs) LatestDeployment(jobID string, q *QueryOptions) (*Deployment, *QueryMeta, error) {
|
||||
var resp *Deployment
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/deployment", &resp, q)
|
||||
qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID)+"/deployment", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -204,7 +219,7 @@ func (j *Jobs) LatestDeployment(jobID string, q *QueryOptions) (*Deployment, *Qu
|
|||
// ID.
|
||||
func (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) {
|
||||
var resp []*Evaluation
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/evaluations", &resp, q)
|
||||
qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID)+"/evaluations", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -217,7 +232,7 @@ func (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *Query
|
|||
// eventually GC'ed from the system. Most callers should not specify purge.
|
||||
func (j *Jobs) Deregister(jobID string, purge bool, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp JobDeregisterResponse
|
||||
wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t", jobID, purge), &resp, q)
|
||||
wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t", url.PathEscape(jobID), purge), &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -227,7 +242,7 @@ func (j *Jobs) Deregister(jobID string, purge bool, q *WriteOptions) (string, *W
|
|||
// ForceEvaluate is used to force-evaluate an existing job.
|
||||
func (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp JobRegisterResponse
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/evaluate", nil, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/evaluate", nil, &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -243,7 +258,7 @@ func (j *Jobs) EvaluateWithOpts(jobID string, opts EvalOptions, q *WriteOptions)
|
|||
}
|
||||
|
||||
var resp JobRegisterResponse
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/evaluate", req, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/evaluate", req, &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -253,7 +268,7 @@ func (j *Jobs) EvaluateWithOpts(jobID string, opts EvalOptions, q *WriteOptions)
|
|||
// PeriodicForce spawns a new instance of the periodic job and returns the eval ID
|
||||
func (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var resp periodicForceResponse
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/periodic/force", nil, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/periodic/force", nil, &resp, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -286,7 +301,7 @@ func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanR
|
|||
}
|
||||
|
||||
var resp JobPlanResponse
|
||||
wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(*job.ID)+"/plan", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -295,7 +310,7 @@ func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanR
|
|||
|
||||
func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) {
|
||||
var resp JobSummary
|
||||
qm, err := j.client.query("/v1/job/"+jobID+"/summary", &resp, q)
|
||||
qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID)+"/summary", &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -310,7 +325,7 @@ func (j *Jobs) Dispatch(jobID string, meta map[string]string,
|
|||
Meta: meta,
|
||||
Payload: payload,
|
||||
}
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/dispatch", req, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/dispatch", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -330,7 +345,7 @@ func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64,
|
|||
EnforcePriorVersion: enforcePriorVersion,
|
||||
VaultToken: vaultToken,
|
||||
}
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/revert", req, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/revert", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -347,7 +362,7 @@ func (j *Jobs) Stable(jobID string, version uint64, stable bool,
|
|||
JobVersion: version,
|
||||
Stable: stable,
|
||||
}
|
||||
wm, err := j.client.write("/v1/job/"+jobID+"/stable", req, &resp, q)
|
||||
wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/stable", req, &resp, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -367,8 +382,9 @@ type UpdateStrategy struct {
|
|||
MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"`
|
||||
HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"`
|
||||
ProgressDeadline *time.Duration `mapstructure:"progress_deadline"`
|
||||
AutoRevert *bool `mapstructure:"auto_revert"`
|
||||
Canary *int `mapstructure:"canary"`
|
||||
AutoRevert *bool `mapstructure:"auto_revert"`
|
||||
AutoPromote *bool `mapstructure:"auto_promote"`
|
||||
}
|
||||
|
||||
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
|
||||
|
@ -383,6 +399,7 @@ func DefaultUpdateStrategy() *UpdateStrategy {
|
|||
ProgressDeadline: timeToPtr(10 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
AutoPromote: boolToPtr(false),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,6 +442,10 @@ func (u *UpdateStrategy) Copy() *UpdateStrategy {
|
|||
copy.Canary = intToPtr(*u.Canary)
|
||||
}
|
||||
|
||||
if u.AutoPromote != nil {
|
||||
copy.AutoPromote = boolToPtr(*u.AutoPromote)
|
||||
}
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
|
@ -464,6 +485,10 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) {
|
|||
if o.Canary != nil {
|
||||
u.Canary = intToPtr(*o.Canary)
|
||||
}
|
||||
|
||||
if o.AutoPromote != nil {
|
||||
u.AutoPromote = boolToPtr(*o.AutoPromote)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Canonicalize() {
|
||||
|
@ -500,6 +525,10 @@ func (u *UpdateStrategy) Canonicalize() {
|
|||
if u.Canary == nil {
|
||||
u.Canary = d.Canary
|
||||
}
|
||||
|
||||
if u.AutoPromote == nil {
|
||||
u.AutoPromote = d.AutoPromote
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns whether the UpdateStrategy is empty or has user defined values.
|
||||
|
@ -536,6 +565,10 @@ func (u *UpdateStrategy) Empty() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if u.AutoPromote != nil && *u.AutoPromote {
|
||||
return false
|
||||
}
|
||||
|
||||
if u.Canary != nil && *u.Canary != 0 {
|
||||
return false
|
||||
}
|
||||
|
@ -678,7 +711,7 @@ func (j *Job) Canonicalize() {
|
|||
j.Stop = boolToPtr(false)
|
||||
}
|
||||
if j.Region == nil {
|
||||
j.Region = stringToPtr("global")
|
||||
j.Region = stringToPtr(GlobalRegion)
|
||||
}
|
||||
if j.Namespace == nil {
|
||||
j.Namespace = stringToPtr("default")
|
||||
|
@ -718,6 +751,8 @@ func (j *Job) Canonicalize() {
|
|||
}
|
||||
if j.Update != nil {
|
||||
j.Update.Canonicalize()
|
||||
} else if *j.Type == JobTypeService {
|
||||
j.Update = DefaultUpdateStrategy()
|
||||
}
|
||||
|
||||
for _, tg := range j.TaskGroups {
|
||||
|
|
|
@ -126,7 +126,7 @@ func (m *MonitorMessage) String() string {
|
|||
|
||||
// MonitorDrain emits drain related events on the returned string channel. The
|
||||
// channel will be closed when all allocations on the draining node have
|
||||
// stopped or the context is canceled.
|
||||
// stopped, when an error occurs, or if the context is canceled.
|
||||
func (n *Nodes) MonitorDrain(ctx context.Context, nodeID string, index uint64, ignoreSys bool) <-chan *MonitorMessage {
|
||||
outCh := make(chan *MonitorMessage, 8)
|
||||
nodeCh := make(chan *MonitorMessage, 1)
|
||||
|
@ -335,7 +335,7 @@ func (n *Nodes) monitorDrainAllocs(ctx context.Context, nodeID string, ignoreSys
|
|||
|
||||
// Exit if all allocs are terminal
|
||||
if runningAllocs == 0 {
|
||||
msg := Messagef(MonitorMsgLevelInfo, "All allocations on node %q have stopped.", nodeID)
|
||||
msg := Messagef(MonitorMsgLevelInfo, "All allocations on node %q have stopped", nodeID)
|
||||
select {
|
||||
case allocCh <- msg:
|
||||
case <-ctx.Done():
|
||||
|
@ -436,6 +436,12 @@ type DriverInfo struct {
|
|||
UpdateTime time.Time
|
||||
}
|
||||
|
||||
// HostVolumeInfo is used to return metadata about a given HostVolume.
|
||||
type HostVolumeInfo struct {
|
||||
Path string
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
// Node is used to deserialize a node entry.
|
||||
type Node struct {
|
||||
ID string
|
||||
|
@ -459,6 +465,7 @@ type Node struct {
|
|||
StatusUpdatedAt int64
|
||||
Events []*NodeEvent
|
||||
Drivers map[string]*DriverInfo
|
||||
HostVolumes map[string]*HostVolumeInfo
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
@ -514,6 +521,9 @@ type DrainStrategy struct {
|
|||
// ForceDeadline is the deadline time for the drain after which drains will
|
||||
// be forced
|
||||
ForceDeadline time.Time
|
||||
|
||||
// StartedAt is the time the drain process started
|
||||
StartedAt time.Time
|
||||
}
|
||||
|
||||
// DrainSpec describes a Node's drain behavior.
|
||||
|
|
|
@ -139,7 +139,9 @@ type SchedulerSetConfigurationResponse struct {
|
|||
|
||||
// PreemptionConfig specifies whether preemption is enabled based on scheduler type
|
||||
type PreemptionConfig struct {
|
||||
SystemSchedulerEnabled bool
|
||||
SystemSchedulerEnabled bool
|
||||
BatchSchedulerEnabled bool
|
||||
ServiceSchedulerEnabled bool
|
||||
}
|
||||
|
||||
// SchedulerGetConfiguration is used to query the current Scheduler configuration.
|
||||
|
|
|
@ -86,11 +86,13 @@ func (r *Resources) Merge(other *Resources) {
|
|||
type Port struct {
|
||||
Label string
|
||||
Value int `mapstructure:"static"`
|
||||
To int `mapstructure:"to"`
|
||||
}
|
||||
|
||||
// NetworkResource is used to describe required network
|
||||
// resources of a given task.
|
||||
type NetworkResource struct {
|
||||
Mode string
|
||||
Device string
|
||||
CIDR string
|
||||
IP string
|
||||
|
@ -105,6 +107,14 @@ func (n *NetworkResource) Canonicalize() {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *NetworkResource) HasPorts() bool {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(n.ReservedPorts)+len(n.DynamicPorts) > 0
|
||||
}
|
||||
|
||||
// NodeDeviceResource captures a set of devices sharing a common
|
||||
// vendor/type/device_name tuple.
|
||||
type NodeDeviceResource struct {
|
||||
|
|
|
@ -0,0 +1,179 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CheckRestart describes if and when a task should be restarted based on
|
||||
// failing health checks.
|
||||
type CheckRestart struct {
|
||||
Limit int `mapstructure:"limit"`
|
||||
Grace *time.Duration `mapstructure:"grace"`
|
||||
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
|
||||
}
|
||||
|
||||
// Canonicalize CheckRestart fields if not nil.
|
||||
func (c *CheckRestart) Canonicalize() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if c.Grace == nil {
|
||||
c.Grace = timeToPtr(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy returns a copy of CheckRestart or nil if unset.
|
||||
func (c *CheckRestart) Copy() *CheckRestart {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nc := new(CheckRestart)
|
||||
nc.Limit = c.Limit
|
||||
if c.Grace != nil {
|
||||
g := *c.Grace
|
||||
nc.Grace = &g
|
||||
}
|
||||
nc.IgnoreWarnings = c.IgnoreWarnings
|
||||
return nc
|
||||
}
|
||||
|
||||
// Merge values from other CheckRestart over default values on this
|
||||
// CheckRestart and return merged copy.
|
||||
func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
|
||||
if c == nil {
|
||||
// Just return other
|
||||
return o
|
||||
}
|
||||
|
||||
nc := c.Copy()
|
||||
|
||||
if o == nil {
|
||||
// Nothing to merge
|
||||
return nc
|
||||
}
|
||||
|
||||
if o.Limit > 0 {
|
||||
nc.Limit = o.Limit
|
||||
}
|
||||
|
||||
if o.Grace != nil {
|
||||
nc.Grace = o.Grace
|
||||
}
|
||||
|
||||
if o.IgnoreWarnings {
|
||||
nc.IgnoreWarnings = o.IgnoreWarnings
|
||||
}
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
// ServiceCheck represents the consul health check that Nomad registers.
|
||||
type ServiceCheck struct {
|
||||
//FIXME Id is unused. Remove?
|
||||
Id string
|
||||
Name string
|
||||
Type string
|
||||
Command string
|
||||
Args []string
|
||||
Path string
|
||||
Protocol string
|
||||
PortLabel string `mapstructure:"port"`
|
||||
AddressMode string `mapstructure:"address_mode"`
|
||||
Interval time.Duration
|
||||
Timeout time.Duration
|
||||
InitialStatus string `mapstructure:"initial_status"`
|
||||
TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
|
||||
Header map[string][]string
|
||||
Method string
|
||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||
GRPCService string `mapstructure:"grpc_service"`
|
||||
GRPCUseTLS bool `mapstructure:"grpc_use_tls"`
|
||||
TaskName string `mapstructure:"task"`
|
||||
}
|
||||
|
||||
// Service represents a Consul service definition.
|
||||
type Service struct {
|
||||
//FIXME Id is unused. Remove?
|
||||
Id string
|
||||
Name string
|
||||
Tags []string
|
||||
CanaryTags []string `mapstructure:"canary_tags"`
|
||||
PortLabel string `mapstructure:"port"`
|
||||
AddressMode string `mapstructure:"address_mode"`
|
||||
Checks []ServiceCheck
|
||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||
Connect *ConsulConnect
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
// Canonicalize the Service by ensuring its name and address mode are set. Task
|
||||
// will be nil for group services.
|
||||
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
||||
if s.Name == "" {
|
||||
if t != nil {
|
||||
s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name)
|
||||
} else {
|
||||
s.Name = fmt.Sprintf("%s-%s", *job.Name, *tg.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Default to AddressModeAuto
|
||||
if s.AddressMode == "" {
|
||||
s.AddressMode = "auto"
|
||||
}
|
||||
|
||||
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
|
||||
// into each check.
|
||||
for i, check := range s.Checks {
|
||||
s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart)
|
||||
s.Checks[i].CheckRestart.Canonicalize()
|
||||
}
|
||||
}
|
||||
|
||||
// ConsulConnect represents a Consul Connect jobspec stanza.
|
||||
type ConsulConnect struct {
|
||||
Native bool
|
||||
SidecarService *ConsulSidecarService `mapstructure:"sidecar_service"`
|
||||
SidecarTask *SidecarTask `mapstructure:"sidecar_task"`
|
||||
}
|
||||
|
||||
// ConsulSidecarService represents a Consul Connect SidecarService jobspec
|
||||
// stanza.
|
||||
type ConsulSidecarService struct {
|
||||
Tags []string
|
||||
Port string
|
||||
Proxy *ConsulProxy
|
||||
}
|
||||
|
||||
// SidecarTask represents a subset of Task fields that can be set to override
|
||||
// the fields of the Task generated for the sidecar
|
||||
type SidecarTask struct {
|
||||
Name string
|
||||
Driver string
|
||||
User string
|
||||
Config map[string]interface{}
|
||||
Env map[string]string
|
||||
Resources *Resources
|
||||
Meta map[string]string
|
||||
KillTimeout *time.Duration `mapstructure:"kill_timeout"`
|
||||
LogConfig *LogConfig `mapstructure:"logs"`
|
||||
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"`
|
||||
KillSignal string `mapstructure:"kill_signal"`
|
||||
}
|
||||
|
||||
// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza.
|
||||
type ConsulProxy struct {
|
||||
LocalServiceAddress string `mapstructure:"local_service_address"`
|
||||
LocalServicePort int `mapstructure:"local_service_port"`
|
||||
Upstreams []*ConsulUpstream
|
||||
Config map[string]interface{}
|
||||
}
|
||||
|
||||
// ConsulUpstream represents a Consul Connect upstream jobspec stanza.
|
||||
type ConsulUpstream struct {
|
||||
DestinationName string `mapstructure:"destination_name"`
|
||||
LocalBindPort int `mapstructure:"local_bind_port"`
|
||||
}
|
|
@ -274,124 +274,6 @@ func (s *Spread) Canonicalize() {
|
|||
}
|
||||
}
|
||||
|
||||
// CheckRestart describes if and when a task should be restarted based on
|
||||
// failing health checks.
|
||||
type CheckRestart struct {
|
||||
Limit int `mapstructure:"limit"`
|
||||
Grace *time.Duration `mapstructure:"grace"`
|
||||
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
|
||||
}
|
||||
|
||||
// Canonicalize CheckRestart fields if not nil.
|
||||
func (c *CheckRestart) Canonicalize() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if c.Grace == nil {
|
||||
c.Grace = timeToPtr(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy returns a copy of CheckRestart or nil if unset.
|
||||
func (c *CheckRestart) Copy() *CheckRestart {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nc := new(CheckRestart)
|
||||
nc.Limit = c.Limit
|
||||
if c.Grace != nil {
|
||||
g := *c.Grace
|
||||
nc.Grace = &g
|
||||
}
|
||||
nc.IgnoreWarnings = c.IgnoreWarnings
|
||||
return nc
|
||||
}
|
||||
|
||||
// Merge values from other CheckRestart over default values on this
|
||||
// CheckRestart and return merged copy.
|
||||
func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
|
||||
if c == nil {
|
||||
// Just return other
|
||||
return o
|
||||
}
|
||||
|
||||
nc := c.Copy()
|
||||
|
||||
if o == nil {
|
||||
// Nothing to merge
|
||||
return nc
|
||||
}
|
||||
|
||||
if o.Limit > 0 {
|
||||
nc.Limit = o.Limit
|
||||
}
|
||||
|
||||
if o.Grace != nil {
|
||||
nc.Grace = o.Grace
|
||||
}
|
||||
|
||||
if o.IgnoreWarnings {
|
||||
nc.IgnoreWarnings = o.IgnoreWarnings
|
||||
}
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
// The ServiceCheck data model represents the consul health check that
|
||||
// Nomad registers for a Task
|
||||
type ServiceCheck struct {
|
||||
Id string
|
||||
Name string
|
||||
Type string
|
||||
Command string
|
||||
Args []string
|
||||
Path string
|
||||
Protocol string
|
||||
PortLabel string `mapstructure:"port"`
|
||||
AddressMode string `mapstructure:"address_mode"`
|
||||
Interval time.Duration
|
||||
Timeout time.Duration
|
||||
InitialStatus string `mapstructure:"initial_status"`
|
||||
TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
|
||||
Header map[string][]string
|
||||
Method string
|
||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||
GRPCService string `mapstructure:"grpc_service"`
|
||||
GRPCUseTLS bool `mapstructure:"grpc_use_tls"`
|
||||
}
|
||||
|
||||
// The Service model represents a Consul service definition
|
||||
type Service struct {
|
||||
Id string
|
||||
Name string
|
||||
Tags []string
|
||||
CanaryTags []string `mapstructure:"canary_tags"`
|
||||
PortLabel string `mapstructure:"port"`
|
||||
AddressMode string `mapstructure:"address_mode"`
|
||||
Checks []ServiceCheck
|
||||
CheckRestart *CheckRestart `mapstructure:"check_restart"`
|
||||
}
|
||||
|
||||
func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
||||
if s.Name == "" {
|
||||
s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name)
|
||||
}
|
||||
|
||||
// Default to AddressModeAuto
|
||||
if s.AddressMode == "" {
|
||||
s.AddressMode = "auto"
|
||||
}
|
||||
|
||||
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
|
||||
// into each check.
|
||||
for i, check := range s.Checks {
|
||||
s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart)
|
||||
s.Checks[i].CheckRestart.Canonicalize()
|
||||
}
|
||||
}
|
||||
|
||||
// EphemeralDisk is an ephemeral disk object
|
||||
type EphemeralDisk struct {
|
||||
Sticky *bool
|
||||
|
@ -480,6 +362,38 @@ func (m *MigrateStrategy) Copy() *MigrateStrategy {
|
|||
return nm
|
||||
}
|
||||
|
||||
// VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use.
|
||||
type VolumeRequest struct {
|
||||
Name string
|
||||
Type string
|
||||
Source string
|
||||
ReadOnly bool `mapstructure:"read_only"`
|
||||
}
|
||||
|
||||
const (
|
||||
VolumeMountPropagationPrivate = "private"
|
||||
VolumeMountPropagationHostToTask = "host-to-task"
|
||||
VolumeMountPropagationBidirectional = "bidirectional"
|
||||
)
|
||||
|
||||
// VolumeMount represents the relationship between a destination path in a task
|
||||
// and the task group volume that should be mounted there.
|
||||
type VolumeMount struct {
|
||||
Volume *string
|
||||
Destination *string
|
||||
ReadOnly *bool `mapstructure:"read_only"`
|
||||
PropagationMode *string `mapstructure:"propagation_mode"`
|
||||
}
|
||||
|
||||
func (vm *VolumeMount) Canonicalize() {
|
||||
if vm.PropagationMode == nil {
|
||||
vm.PropagationMode = stringToPtr(VolumeMountPropagationPrivate)
|
||||
}
|
||||
if vm.ReadOnly == nil {
|
||||
vm.ReadOnly = boolToPtr(false)
|
||||
}
|
||||
}
|
||||
|
||||
// TaskGroup is the unit of scheduling.
|
||||
type TaskGroup struct {
|
||||
Name *string
|
||||
|
@ -488,12 +402,16 @@ type TaskGroup struct {
|
|||
Affinities []*Affinity
|
||||
Tasks []*Task
|
||||
Spreads []*Spread
|
||||
Volumes map[string]*VolumeRequest
|
||||
RestartPolicy *RestartPolicy
|
||||
ReschedulePolicy *ReschedulePolicy
|
||||
EphemeralDisk *EphemeralDisk
|
||||
Update *UpdateStrategy
|
||||
Migrate *MigrateStrategy
|
||||
Networks []*NetworkResource
|
||||
Meta map[string]string
|
||||
Services []*Service
|
||||
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"`
|
||||
}
|
||||
|
||||
// NewTaskGroup creates a new TaskGroup.
|
||||
|
@ -504,6 +422,7 @@ func NewTaskGroup(name string, count int) *TaskGroup {
|
|||
}
|
||||
}
|
||||
|
||||
// Canonicalize sets defaults and merges settings that should be inherited from the job
|
||||
func (g *TaskGroup) Canonicalize(job *Job) {
|
||||
if g.Name == nil {
|
||||
g.Name = stringToPtr("")
|
||||
|
@ -563,13 +482,11 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
|||
}
|
||||
|
||||
// Merge with default reschedule policy
|
||||
if *job.Type == "service" {
|
||||
defaultMigrateStrategy := &MigrateStrategy{}
|
||||
defaultMigrateStrategy.Canonicalize()
|
||||
if g.Migrate != nil {
|
||||
defaultMigrateStrategy.Merge(g.Migrate)
|
||||
}
|
||||
g.Migrate = defaultMigrateStrategy
|
||||
if g.Migrate == nil && *job.Type == "service" {
|
||||
g.Migrate = &MigrateStrategy{}
|
||||
}
|
||||
if g.Migrate != nil {
|
||||
g.Migrate.Canonicalize()
|
||||
}
|
||||
|
||||
var defaultRestartPolicy *RestartPolicy
|
||||
|
@ -605,6 +522,12 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
|||
for _, a := range g.Affinities {
|
||||
a.Canonicalize()
|
||||
}
|
||||
for _, n := range g.Networks {
|
||||
n.Canonicalize()
|
||||
}
|
||||
for _, s := range g.Services {
|
||||
s.Canonicalize(nil, g, job)
|
||||
}
|
||||
}
|
||||
|
||||
// Constrain is used to add a constraint to a task group.
|
||||
|
@ -691,9 +614,11 @@ type Task struct {
|
|||
Vault *Vault
|
||||
Templates []*Template
|
||||
DispatchPayload *DispatchPayloadConfig
|
||||
VolumeMounts []*VolumeMount
|
||||
Leader bool
|
||||
ShutdownDelay time.Duration `mapstructure:"shutdown_delay"`
|
||||
KillSignal string `mapstructure:"kill_signal"`
|
||||
Kind string
|
||||
}
|
||||
|
||||
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
||||
|
@ -724,6 +649,9 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
|||
for _, a := range t.Affinities {
|
||||
a.Canonicalize()
|
||||
}
|
||||
for _, vm := range t.VolumeMounts {
|
||||
vm.Canonicalize()
|
||||
}
|
||||
}
|
||||
|
||||
// TaskArtifact is used to download artifacts before running a task.
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
// +build ignore
|
||||
|
||||
// Generate the table of OID values
|
||||
// Run with 'go run gen.go'.
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
// OID represent a postgres Object Identifier Type.
|
||||
type OID struct {
|
||||
ID int
|
||||
Type string
|
||||
}
|
||||
|
||||
// Name returns an upper case version of the oid type.
|
||||
func (o OID) Name() string {
|
||||
return strings.ToUpper(o.Type)
|
||||
}
|
||||
|
||||
func main() {
|
||||
datname := os.Getenv("PGDATABASE")
|
||||
sslmode := os.Getenv("PGSSLMODE")
|
||||
|
||||
if datname == "" {
|
||||
os.Setenv("PGDATABASE", "pqgotest")
|
||||
}
|
||||
|
||||
if sslmode == "" {
|
||||
os.Setenv("PGSSLMODE", "disable")
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
rows, err := db.Query(`
|
||||
SELECT typname, oid
|
||||
FROM pg_type WHERE oid < 10000
|
||||
ORDER BY oid;
|
||||
`)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
oids := make([]*OID, 0)
|
||||
for rows.Next() {
|
||||
var oid OID
|
||||
if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
oids = append(oids, &oid)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd := exec.Command("gofmt")
|
||||
cmd.Stderr = os.Stderr
|
||||
w, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
f, err := os.Create("types.go")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.Stdout = f
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
|
||||
fmt.Fprintln(w, "\npackage oid")
|
||||
fmt.Fprintln(w, "const (")
|
||||
for _, oid := range oids {
|
||||
fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
|
||||
}
|
||||
fmt.Fprintln(w, ")")
|
||||
fmt.Fprintln(w, "var TypeName = map[Oid]string{")
|
||||
for _, oid := range oids {
|
||||
fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
w.Close()
|
||||
cmd.Wait()
|
||||
}
|
97
vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go
generated
vendored
Normal file
97
vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/ory/dockertest/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
// +build ignore
|
||||
// Hand writing: _Ctype_struct___0
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
|
||||
*/
|
||||
|
||||
package disk
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/mount.h>
|
||||
#include <devstat.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
// because statinfo has long double snap_time, redefine with changing long long
|
||||
struct statinfo2 {
|
||||
long cp_time[CPUSTATES];
|
||||
long tk_nin;
|
||||
long tk_nout;
|
||||
struct devinfo *dinfo;
|
||||
long long snap_time;
|
||||
};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
sizeofLongDouble = C.sizeof_longlong
|
||||
|
||||
DEVSTAT_NO_DATA = 0x00
|
||||
DEVSTAT_READ = 0x01
|
||||
DEVSTAT_WRITE = 0x02
|
||||
DEVSTAT_FREE = 0x03
|
||||
|
||||
// from sys/mount.h
|
||||
MNT_RDONLY = 0x00000001 /* read only filesystem */
|
||||
MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */
|
||||
MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */
|
||||
MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */
|
||||
MNT_UNION = 0x00000020 /* union with underlying filesystem */
|
||||
MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */
|
||||
MNT_SUIDDIR = 0x00100000 /* special handling of SUID on dirs */
|
||||
MNT_SOFTDEP = 0x00200000 /* soft updates being done */
|
||||
MNT_NOSYMFOLLOW = 0x00400000 /* do not follow symlinks */
|
||||
MNT_GJOURNAL = 0x02000000 /* GEOM journal support enabled */
|
||||
MNT_MULTILABEL = 0x04000000 /* MAC support for individual objects */
|
||||
MNT_ACLS = 0x08000000 /* ACL support enabled */
|
||||
MNT_NOATIME = 0x10000000 /* disable update of file access time */
|
||||
MNT_NOCLUSTERR = 0x40000000 /* disable cluster read */
|
||||
MNT_NOCLUSTERW = 0x80000000 /* disable cluster write */
|
||||
MNT_NFS4ACLS = 0x00000010
|
||||
|
||||
MNT_WAIT = 1 /* synchronously wait for I/O to complete */
|
||||
MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */
|
||||
MNT_LAZY = 3 /* push data not written by filesystem syncer */
|
||||
MNT_SUSPEND = 4 /* Suspend file system after sync */
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfDevstat = C.sizeof_struct_devstat
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
_C_long_double C.longlong
|
||||
)
|
||||
|
||||
type Statfs C.struct_statfs
|
||||
type Fsid C.struct_fsid
|
||||
|
||||
type Devstat C.struct_devstat
|
||||
type Bintime C.struct_bintime
|
|
@ -0,0 +1,70 @@
|
|||
// +build ignore
|
||||
// Hand writing: _Ctype_struct___0
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
package disk
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/disk.h>
|
||||
#include <sys/mount.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
sizeofLongDouble = C.sizeof_longlong
|
||||
|
||||
DEVSTAT_NO_DATA = 0x00
|
||||
DEVSTAT_READ = 0x01
|
||||
DEVSTAT_WRITE = 0x02
|
||||
DEVSTAT_FREE = 0x03
|
||||
|
||||
// from sys/mount.h
|
||||
MNT_RDONLY = 0x00000001 /* read only filesystem */
|
||||
MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */
|
||||
MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */
|
||||
MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */
|
||||
MNT_NODEV = 0x00000010 /* don't interpret special files */
|
||||
MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */
|
||||
|
||||
MNT_WAIT = 1 /* synchronously wait for I/O to complete */
|
||||
MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */
|
||||
MNT_LAZY = 3 /* push data not written by filesystem syncer */
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfDiskstats = C.sizeof_struct_diskstats
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
_C_long_double C.longlong
|
||||
)
|
||||
|
||||
type Statfs C.struct_statfs
|
||||
type Diskstats C.struct_diskstats
|
||||
type Fsid C.fsid_t
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
type Diskstat C.struct_diskstat
|
||||
type Bintime C.struct_bintime
|
|
@ -0,0 +1,17 @@
|
|||
// +build ignore
|
||||
// plus hand editing about timeval
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
package host
|
||||
|
||||
/*
|
||||
#include <sys/time.h>
|
||||
#include <utmpx.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type Utmpx C.struct_utmpx
|
||||
type Timeval C.struct_timeval
|
|
@ -0,0 +1,44 @@
|
|||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
package host
|
||||
|
||||
/*
|
||||
#define KERNEL
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <utmpx.h>
|
||||
#include "freebsd_headers/utxdb.h"
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
sizeOfUtmpx = C.sizeof_struct_futx
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
type Utmp C.struct_utmp // for FreeBSD 9.0 compatibility
|
||||
type Utmpx C.struct_futx
|
|
@ -0,0 +1,42 @@
|
|||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
package host
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <utmp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
sizeOfUtmp = C.sizeof_struct_utmp
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
type utmp C.struct_utmp
|
||||
type exit_status C.struct_exit_status
|
||||
type timeval C.struct_timeval
|
|
@ -0,0 +1,43 @@
|
|||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
package host
|
||||
|
||||
/*
|
||||
#define KERNEL
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <utmp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
sizeOfUtmp = C.sizeof_struct_utmp
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
type Utmp C.struct_utmp
|
||||
type Timeval C.struct_timeval
|
|
@ -0,0 +1,34 @@
|
|||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
package mem
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <uvm/uvmexp.h>
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
CTLVm = 2
|
||||
CTLVfs = 10
|
||||
VmUvmexp = 4 // get uvmexp
|
||||
VfsGeneric = 0
|
||||
VfsBcacheStat = 3
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfUvmexp = C.sizeof_struct_uvmexp
|
||||
sizeOfBcachestats = C.sizeof_struct_bcachestats
|
||||
)
|
||||
|
||||
type Uvmexp C.struct_uvmexp
|
||||
type Bcachestats C.struct_bcachestats
|
|
@ -0,0 +1,160 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hand Writing
|
||||
// - all pointer in ExternProc to uint64
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
// +godefs map struct_ [16]byte /* in6_addr */
|
||||
|
||||
package process
|
||||
|
||||
/*
|
||||
#define __DARWIN_UNIX03 0
|
||||
#define KERNEL
|
||||
#define _DARWIN_USE_64_BIT_INODE
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
#include <mach/mach.h>
|
||||
#include <mach/message.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/un.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/if_var.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/_types/_timeval.h>
|
||||
#include <sys/appleapiopts.h>
|
||||
#include <sys/cdefs.h>
|
||||
#include <sys/param.h>
|
||||
#include <bsm/audit.h>
|
||||
#include <sys/queue.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
struct ucred_queue {
|
||||
struct ucred *tqe_next;
|
||||
struct ucred **tqe_prev;
|
||||
TRACEBUF
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type UGid_t C.gid_t
|
||||
|
||||
type KinfoProc C.struct_kinfo_proc
|
||||
|
||||
type Eproc C.struct_eproc
|
||||
|
||||
type Proc C.struct_proc
|
||||
|
||||
type Session C.struct_session
|
||||
|
||||
type ucred C.struct_ucred
|
||||
|
||||
type Uucred C.struct__ucred
|
||||
|
||||
type Upcred C.struct__pcred
|
||||
|
||||
type Vmspace C.struct_vmspace
|
||||
|
||||
type Sigacts C.struct_sigacts
|
||||
|
||||
type ExternProc C.struct_extern_proc
|
||||
|
||||
type Itimerval C.struct_itimerval
|
||||
|
||||
type Vnode C.struct_vnode
|
||||
|
||||
type Pgrp C.struct_pgrp
|
||||
|
||||
type UserStruct C.struct_user
|
||||
|
||||
type Au_session C.struct_au_session
|
||||
|
||||
type Posix_cred C.struct_posix_cred
|
||||
|
||||
type Label C.struct_label
|
||||
|
||||
type AuditinfoAddr C.struct_auditinfo_addr
|
||||
type AuMask C.struct_au_mask
|
||||
type AuTidAddr C.struct_au_tid_addr
|
||||
|
||||
// TAILQ(ucred)
|
||||
type UcredQueue C.struct_ucred_queue
|
|
@ -0,0 +1,95 @@
|
|||
// +build ignore
|
||||
|
||||
// We still need editing by hands.
|
||||
// go tool cgo -godefs types_freebsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_freebsd_amd64.go
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
// +godefs map struct_pargs int64 /* pargs */
|
||||
// +godefs map struct_proc int64 /* proc */
|
||||
// +godefs map struct_user int64 /* user */
|
||||
// +godefs map struct_vnode int64 /* vnode */
|
||||
// +godefs map struct_vnode int64 /* vnode */
|
||||
// +godefs map struct_filedesc int64 /* filedesc */
|
||||
// +godefs map struct_vmspace int64 /* vmspace */
|
||||
// +godefs map struct_pcb int64 /* pcb */
|
||||
// +godefs map struct_thread int64 /* thread */
|
||||
// +godefs map struct___sigset [16]byte /* sigset */
|
||||
|
||||
package process
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/user.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
CTLKern = 1 // "high kernel": proc, limits
|
||||
KernProc = 14 // struct: process entries
|
||||
KernProcPID = 1 // by process id
|
||||
KernProcProc = 8 // only return procs
|
||||
KernProcPathname = 12 // path to executable
|
||||
KernProcArgs = 7 // get/set arguments/proctitle
|
||||
)
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry
|
||||
sizeOfKinfoProc = C.sizeof_struct_kinfo_proc
|
||||
)
|
||||
|
||||
// from sys/proc.h
|
||||
const (
|
||||
SIDL = 1 /* Process being created by fork. */
|
||||
SRUN = 2 /* Currently runnable. */
|
||||
SSLEEP = 3 /* Sleeping on an address. */
|
||||
SSTOP = 4 /* Process debugging or suspension. */
|
||||
SZOMB = 5 /* Awaiting collection by parent. */
|
||||
SWAIT = 6 /* Waiting for interrupt. */
|
||||
SLOCK = 7 /* Blocked on a lock. */
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type KinfoProc C.struct_kinfo_proc
|
||||
|
||||
type Priority C.struct_priority
|
||||
|
||||
type KinfoVmentry C.struct_kinfo_vmentry
|
|
@ -0,0 +1,103 @@
|
|||
// +build ignore
|
||||
|
||||
// We still need editing by hands.
|
||||
// go tool cgo -godefs types_openbsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_openbsd_amd64.go
|
||||
|
||||
/*
|
||||
Input to cgo -godefs.
|
||||
*/
|
||||
|
||||
// +godefs map struct_pargs int64 /* pargs */
|
||||
// +godefs map struct_proc int64 /* proc */
|
||||
// +godefs map struct_user int64 /* user */
|
||||
// +godefs map struct_vnode int64 /* vnode */
|
||||
// +godefs map struct_vnode int64 /* vnode */
|
||||
// +godefs map struct_filedesc int64 /* filedesc */
|
||||
// +godefs map struct_vmspace int64 /* vmspace */
|
||||
// +godefs map struct_pcb int64 /* pcb */
|
||||
// +godefs map struct_thread int64 /* thread */
|
||||
// +godefs map struct___sigset [16]byte /* sigset */
|
||||
|
||||
package process
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/user.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics; for internal use.
|
||||
|
||||
const (
|
||||
CTLKern = 1 // "high kernel": proc, limits
|
||||
KernProc = 66 // struct: process entries
|
||||
KernProcAll = 0
|
||||
KernProcPID = 1 // by process id
|
||||
KernProcProc = 8 // only return procs
|
||||
KernProcPathname = 12 // path to executable
|
||||
KernProcArgs = 55 // get/set arguments/proctitle
|
||||
KernProcArgv = 1
|
||||
KernProcEnv = 3
|
||||
)
|
||||
|
||||
const (
|
||||
ArgMax = 256 * 1024 // sys/syslimits.h:#define ARG_MAX
|
||||
)
|
||||
|
||||
const (
|
||||
sizeofPtr = C.sizeofPtr
|
||||
sizeofShort = C.sizeof_short
|
||||
sizeofInt = C.sizeof_int
|
||||
sizeofLong = C.sizeof_long
|
||||
sizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry
|
||||
sizeOfKinfoProc = C.sizeof_struct_kinfo_proc
|
||||
)
|
||||
|
||||
// from sys/proc.h
|
||||
const (
|
||||
SIDL = 1 /* Process being created by fork. */
|
||||
SRUN = 2 /* Currently runnable. */
|
||||
SSLEEP = 3 /* Sleeping on an address. */
|
||||
SSTOP = 4 /* Process debugging or suspension. */
|
||||
SZOMB = 5 /* Awaiting collection by parent. */
|
||||
SDEAD = 6 /* Thread is almost gone */
|
||||
SONPROC = 7 /* Thread is currently on a CPU. */
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type KinfoProc C.struct_kinfo_proc
|
||||
|
||||
type Priority C.struct_priority
|
||||
|
||||
type KinfoVmentry C.struct_kinfo_vmentry
|
|
@ -0,0 +1,508 @@
|
|||
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
/*
|
||||
|
||||
A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
|
||||
|
||||
We are attempting this due to perceived issues with encoding/xml:
|
||||
- Complicated. It tried to do too much, and is not as simple to use as json.
|
||||
- Due to over-engineering, reflection is over-used AND performance suffers:
|
||||
java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
|
||||
even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
|
||||
|
||||
codec framework will offer the following benefits
|
||||
- VASTLY improved performance (when using reflection-mode or codecgen)
|
||||
- simplicity and consistency: with the rest of the supported formats
|
||||
- all other benefits of codec framework (streaming, codegeneration, etc)
|
||||
|
||||
codec is not a drop-in replacement for encoding/xml.
|
||||
It is a replacement, based on the simplicity and performance of codec.
|
||||
Look at it like JAXB for Go.
|
||||
|
||||
Challenges:
|
||||
- Need to output XML preamble, with all namespaces at the right location in the output.
|
||||
- Each "end" block is dynamic, so we need to maintain a context-aware stack
|
||||
- How to decide when to use an attribute VS an element
|
||||
- How to handle chardata, attr, comment EXPLICITLY.
|
||||
- Should it output fragments?
|
||||
e.g. encoding a bool should just output true OR false, which is not well-formed XML.
|
||||
|
||||
Extend the struct tag. See representative example:
|
||||
type X struct {
|
||||
ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"`
|
||||
// format: [namespace-uri ][namespace-prefix ]local-name, ...
|
||||
}
|
||||
|
||||
Based on this, we encode
|
||||
- fields as elements, BUT
|
||||
encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string)
|
||||
- text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
|
||||
|
||||
To handle namespaces:
|
||||
- XMLHandle is denoted as being namespace-aware.
|
||||
Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
|
||||
- *Encoder and *Decoder know whether the Handle "prefers" namespaces.
|
||||
- add *Encoder.getEncName(*structFieldInfo).
|
||||
No one calls *structFieldInfo.indexForEncName directly anymore
|
||||
- OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware
|
||||
indexForEncName takes a parameter of the form namespace:local-name OR local-name
|
||||
- add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
|
||||
by being a method on *Decoder, or maybe a method on the Handle itself.
|
||||
No one accesses .encName anymore
|
||||
- let encode.go and decode.go use these (for consistency)
|
||||
- only problem exists for gen.go, where we create a big switch on encName.
|
||||
Now, we also have to add a switch on strings.endsWith(kName, encNsName)
|
||||
- gen.go will need to have many more methods, and then double-on the 2 switch loops like:
|
||||
switch k {
|
||||
case "abc" : x.abc()
|
||||
case "def" : x.def()
|
||||
default {
|
||||
switch {
|
||||
case !nsAware: panic(...)
|
||||
case strings.endsWith(":abc"): x.abc()
|
||||
case strings.endsWith(":def"): x.def()
|
||||
default: panic(...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
The structure below accommodates this:
|
||||
|
||||
type typeInfo struct {
|
||||
sfi []*structFieldInfo // sorted by encName
|
||||
sfins // sorted by namespace
|
||||
sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately.
|
||||
sfip // unsorted
|
||||
}
|
||||
type structFieldInfo struct {
|
||||
encName
|
||||
nsEncName
|
||||
ns string
|
||||
attr bool
|
||||
cdata bool
|
||||
}
|
||||
|
||||
indexForEncName is now an internal helper function that takes a sorted array
|
||||
(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
|
||||
|
||||
There will be a separate parser from the builder.
|
||||
The parser will have a method: next() xmlToken method. It has lookahead support,
|
||||
so you can pop multiple tokens, make a determination, and push them back in the order popped.
|
||||
This will be needed to determine whether we are "nakedly" decoding a container or not.
|
||||
The stack will be implemented using a slice and push/pop happens at the [0] element.
|
||||
|
||||
xmlToken has fields:
|
||||
- type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
|
||||
- value string
|
||||
- ns string
|
||||
|
||||
SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
|
||||
|
||||
The following are skipped when parsing:
|
||||
- External Entities (from external file)
|
||||
- Notation Declaration e.g. <!NOTATION GIF87A SYSTEM "GIF">
|
||||
- Entity Declarations & References
|
||||
- XML Declaration (assume UTF-8)
|
||||
- XML Directive i.e. <! ... >
|
||||
- Other Declarations: Notation, etc.
|
||||
- Comment
|
||||
- Processing Instruction
|
||||
- schema / DTD for validation:
|
||||
We are not a VALIDATING parser. Validation is done elsewhere.
|
||||
However, some parts of the DTD internal subset are used (SEE BELOW).
|
||||
For Attribute List Declarations e.g.
|
||||
<!ATTLIST foo:oldjoke name ID #REQUIRED label CDATA #IMPLIED status ( funny | notfunny ) 'funny' >
|
||||
We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
|
||||
|
||||
The following XML features are supported
|
||||
- Namespace
|
||||
- Element
|
||||
- Attribute
|
||||
- cdata
|
||||
- Unicode escape
|
||||
|
||||
The following DTD (when as an internal sub-set) features are supported:
|
||||
- Internal Entities e.g.
|
||||
<!ELEMENT burns "ugorji is cool" > AND entities for the set: [<>&"']
|
||||
- Parameter entities e.g.
|
||||
<!ENTITY % personcontent "ugorji is cool"> <!ELEMENT burns (%personcontent;)*>
|
||||
|
||||
At decode time, a structure containing the following is kept
|
||||
- namespace mapping
|
||||
- default attribute values
|
||||
- all internal entities (<>&"' and others written in the document)
|
||||
|
||||
When decode starts, it parses XML namespace declarations and creates a map in the
|
||||
xmlDecDriver. While parsing, that map continuously gets updated.
|
||||
The only problem happens when a namespace declaration happens on the node that it defines.
|
||||
e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
|
||||
To handle this, each Element must be fully parsed at a time,
|
||||
even if it amounts to multiple tokens which are returned one at a time on request.
|
||||
|
||||
xmlns is a special attribute name.
|
||||
- It is used to define namespaces, including the default
|
||||
- It is never returned as an AttrKey or AttrVal.
|
||||
*We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
|
||||
|
||||
Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
|
||||
This accommodates map[int]string for example.
|
||||
|
||||
It should be possible to create a schema from the types,
|
||||
or vice versa (generate types from schema with appropriate tags).
|
||||
This is however out-of-scope from this parsing project.
|
||||
|
||||
We should write all namespace information at the first point that it is referenced in the tree,
|
||||
and use the mapping for all child nodes and attributes. This means that state is maintained
|
||||
at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
|
||||
|
||||
When decoding, it is important to keep track of entity references and default attribute values.
|
||||
It seems these can only be stored in the DTD components. We should honor them when decoding.
|
||||
|
||||
Configuration for XMLHandle will look like this:
|
||||
|
||||
XMLHandle
|
||||
DefaultNS string
|
||||
// Encoding:
|
||||
NS map[string]string // ns URI to key, used for encoding
|
||||
// Decoding: in case ENTITY declared in external schema or dtd, store info needed here
|
||||
Entities map[string]string // map of entity rep to character
|
||||
|
||||
|
||||
During encode, if a namespace mapping is not defined for a namespace found on a struct,
|
||||
then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
|
||||
with any other namespace mapping).
|
||||
|
||||
Note that different fields in a struct can have different namespaces.
|
||||
However, all fields will default to the namespace on the _struct field (if defined).
|
||||
|
||||
An XML document is a name, a map of attributes and a list of children.
|
||||
Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
|
||||
We have to "DecodeNaked" into something that resembles XML data.
|
||||
|
||||
To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types:
|
||||
type Name struct { // Preferred. Less allocations due to conversions.
|
||||
Local string
|
||||
Space string
|
||||
}
|
||||
type Element struct {
|
||||
Name Name
|
||||
Attrs map[Name]string
|
||||
Children []interface{} // each child is either *Element or string
|
||||
}
|
||||
Only two "supporting" types are exposed for XML: Name and Element.
|
||||
|
||||
// ------------------
|
||||
|
||||
We considered 'type Name string' where Name is like "Space Local" (space-separated).
|
||||
We decided against it, because each creation of a name would lead to
|
||||
double allocation (first convert []byte to string, then concatenate them into a string).
|
||||
The benefit is that it is faster to read Attrs from a map. But given that Element is a value
|
||||
object, we want to eschew methods and have public exposed variables.
|
||||
|
||||
We also considered the following, where xml types were not value objects, and we used
|
||||
intelligent accessor methods to extract information and for performance.
|
||||
*** WE DECIDED AGAINST THIS. ***
|
||||
type Attr struct {
|
||||
Name Name
|
||||
Value string
|
||||
}
|
||||
// Element is a ValueObject: There are no accessor methods.
|
||||
// Make element self-contained.
|
||||
type Element struct {
|
||||
Name Name
|
||||
attrsMap map[string]string // where key is "Space Local"
|
||||
attrs []Attr
|
||||
childrenT []string
|
||||
childrenE []Element
|
||||
childrenI []int // each child is a index into T or E.
|
||||
}
|
||||
func (x *Element) child(i) interface{} // returns string or *Element
|
||||
|
||||
// ------------------
|
||||
|
||||
Per XML spec and our default handling, white space is always treated as
|
||||
insignificant between elements, except in a text node. The xml:space='preserve'
|
||||
attribute is ignored.
|
||||
|
||||
**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
|
||||
**So treat them as just "directives" that should be interpreted to mean something**.
|
||||
|
||||
On encoding, we support indenting aka prettifying markup in the same way we support it for json.
|
||||
|
||||
A document or element can only be encoded/decoded from/to a struct. In this mode:
|
||||
- struct name maps to element name (or tag-info from _struct field)
|
||||
- fields are mapped to child elements or attributes
|
||||
|
||||
A map is either encoded as attributes on current element, or as a set of child elements.
|
||||
Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
|
||||
|
||||
A list is encoded as a set of child elements.
|
||||
|
||||
Primitives (number, bool, string) are encoded as an element, attribute or text
|
||||
depending on the context.
|
||||
|
||||
Extensions must encode themselves as a text string.
|
||||
|
||||
Encoding is tough, specifically when encoding mappings, because we need to encode
|
||||
as either attribute or element. To do this, we need to default to encoding as attributes,
|
||||
and then let Encoder inform the Handle when to start encoding as nodes.
|
||||
i.e. Encoder does something like:
|
||||
|
||||
h.EncodeMapStart()
|
||||
h.Encode(), h.Encode(), ...
|
||||
h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
|
||||
h.Encode(), h.Encode(), ...
|
||||
h.EncodeEnd()
|
||||
|
||||
Only XMLHandle understands this, and will set itself to start encoding as elements.
|
||||
|
||||
This support extends to maps. For example, if a struct field is a map, and it has
|
||||
the struct tag signifying it should be attr, then all its fields are encoded as attributes.
|
||||
e.g.
|
||||
|
||||
type X struct {
|
||||
M map[string]int `codec:"m,attr"` // encode keys as attributes named
|
||||
}
|
||||
|
||||
Question:
|
||||
- if encoding a map, what if map keys have spaces in them???
|
||||
Then they cannot be attributes or child elements. Error.
|
||||
|
||||
Options to consider adding later:
|
||||
- For attribute values, normalize by trimming beginning and ending white space,
|
||||
and converting every white space sequence to a single space.
|
||||
- ATTLIST restrictions are enforced.
|
||||
e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
|
||||
- Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
|
||||
Some elements e.g. br, hr, etc need not close and should be auto-closed
|
||||
... (see http://www.w3.org/TR/html4/loose.dtd)
|
||||
An expansive set of entities are pre-defined.
|
||||
- Have easy way to create a HTML parser:
|
||||
add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
|
||||
and add HTML Entities to the list.
|
||||
- Support validating element/attribute XMLName before writing it.
|
||||
Keep this behind a flag, which is set to false by default (for performance).
|
||||
type XMLHandle struct {
|
||||
CheckName bool
|
||||
}
|
||||
|
||||
Misc:
|
||||
|
||||
ROADMAP (1 weeks):
|
||||
- build encoder (1 day)
|
||||
- build decoder (based off xmlParser) (1 day)
|
||||
- implement xmlParser (2 days).
|
||||
Look at encoding/xml for inspiration.
|
||||
- integrate and TEST (1 days)
|
||||
- write article and post it (1 day)
|
||||
|
||||
// ---------- MORE NOTES FROM 2017-11-30 ------------
|
||||
|
||||
when parsing
|
||||
- parse the attributes first
|
||||
- then parse the nodes
|
||||
|
||||
basically:
|
||||
- if encoding a field: we use the field name for the wrapper
|
||||
- if encoding a non-field, then just use the element type name
|
||||
|
||||
map[string]string ==> <map><key>abc</key><value>val</value></map>... or
|
||||
<map key="abc">val</map>... OR
|
||||
<key1>val1</key1><key2>val2</key2>... <- PREFERED
|
||||
[]string ==> <string>v1</string><string>v2</string>...
|
||||
string v1 ==> <string>v1</string>
|
||||
bool true ==> <bool>true</bool>
|
||||
float 1.0 ==> <float>1.0</float>
|
||||
...
|
||||
|
||||
F1 map[string]string ==> <F1><key>abc</key><value>val</value></F1>... OR
|
||||
<F1 key="abc">val</F1>... OR
|
||||
<F1><abc>val</abc>...</F1> <- PREFERED
|
||||
F2 []string ==> <F2>v1</F2><F2>v2</F2>...
|
||||
F3 bool ==> <F3>true</F3>
|
||||
...
|
||||
|
||||
- a scalar is encoded as:
|
||||
(value) of type T ==> <T><value/></T>
|
||||
(value) of field F ==> <F><value/></F>
|
||||
- A kv-pair is encoded as:
|
||||
(key,value) ==> <map><key><value/></key></map> OR <map key="value">
|
||||
(key,value) of field F ==> <F><key><value/></key></F> OR <F key="value">
|
||||
- A map or struct is just a list of kv-pairs
|
||||
- A list is encoded as sequences of same node e.g.
|
||||
<F1 key1="value11">
|
||||
<F1 key2="value12">
|
||||
<F2>value21</F2>
|
||||
<F2>value22</F2>
|
||||
- we may have to singularize the field name, when entering into xml,
|
||||
and pluralize them when encoding.
|
||||
- bi-directional encode->decode->encode is not a MUST.
|
||||
even encoding/xml cannot decode correctly what was encoded:
|
||||
|
||||
see https://play.golang.org/p/224V_nyhMS
|
||||
func main() {
|
||||
fmt.Println("Hello, playground")
|
||||
v := []interface{}{"hello", 1, true, nil, time.Now()}
|
||||
s, err := xml.Marshal(v)
|
||||
fmt.Printf("err: %v, \ns: %s\n", err, s)
|
||||
var v2 []interface{}
|
||||
err = xml.Unmarshal(s, &v2)
|
||||
fmt.Printf("err: %v, \nv2: %v\n", err, v2)
|
||||
type T struct {
|
||||
V []interface{}
|
||||
}
|
||||
v3 := T{V: v}
|
||||
s, err = xml.Marshal(v3)
|
||||
fmt.Printf("err: %v, \ns: %s\n", err, s)
|
||||
var v4 T
|
||||
err = xml.Unmarshal(s, &v4)
|
||||
fmt.Printf("err: %v, \nv4: %v\n", err, v4)
|
||||
}
|
||||
Output:
|
||||
err: <nil>,
|
||||
s: <string>hello</string><int>1</int><bool>true</bool><Time>2009-11-10T23:00:00Z</Time>
|
||||
err: <nil>,
|
||||
v2: [<nil>]
|
||||
err: <nil>,
|
||||
s: <T><V>hello</V><V>1</V><V>true</V><V>2009-11-10T23:00:00Z</V></T>
|
||||
err: <nil>,
|
||||
v4: {[<nil> <nil> <nil> <nil>]}
|
||||
-
|
||||
*/
|
||||
|
||||
// ----------- PARSER -------------------
|
||||
|
||||
type xmlTokenType uint8
|
||||
|
||||
const (
|
||||
_ xmlTokenType = iota << 1
|
||||
xmlTokenElemStart
|
||||
xmlTokenElemEnd
|
||||
xmlTokenAttrKey
|
||||
xmlTokenAttrVal
|
||||
xmlTokenText
|
||||
)
|
||||
|
||||
type xmlToken struct {
|
||||
Type xmlTokenType
|
||||
Value string
|
||||
Namespace string // blank for AttrVal and Text
|
||||
}
|
||||
|
||||
type xmlParser struct {
|
||||
r decReader
|
||||
toks []xmlToken // list of tokens.
|
||||
ptr int // ptr into the toks slice
|
||||
done bool // nothing else to parse. r now returns EOF.
|
||||
}
|
||||
|
||||
func (x *xmlParser) next() (t *xmlToken) {
|
||||
// once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish)
|
||||
if !x.done && len(x.toks) == 0 {
|
||||
x.nextTag()
|
||||
}
|
||||
// parses one element at a time (into possible many tokens)
|
||||
if x.ptr < len(x.toks) {
|
||||
t = &(x.toks[x.ptr])
|
||||
x.ptr++
|
||||
if x.ptr == len(x.toks) {
|
||||
x.ptr = 0
|
||||
x.toks = x.toks[:0]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// nextTag will parses the next element and fill up toks.
|
||||
// It set done flag if/once EOF is reached.
|
||||
func (x *xmlParser) nextTag() {
|
||||
// TODO: implement.
|
||||
}
|
||||
|
||||
// ----------- ENCODER -------------------
|
||||
|
||||
type xmlEncDriver struct {
|
||||
e *Encoder
|
||||
w encWriter
|
||||
h *XMLHandle
|
||||
b [64]byte // scratch
|
||||
bs []byte // scratch
|
||||
// s jsonStack
|
||||
noBuiltInTypes
|
||||
}
|
||||
|
||||
// ----------- DECODER -------------------
|
||||
|
||||
type xmlDecDriver struct {
|
||||
d *Decoder
|
||||
h *XMLHandle
|
||||
r decReader // *bytesDecReader decReader
|
||||
ct valueType // container type. one of unset, array or map.
|
||||
bstr [8]byte // scratch used for string \UXXX parsing
|
||||
b [64]byte // scratch
|
||||
|
||||
// wsSkipped bool // whitespace skipped
|
||||
|
||||
// s jsonStack
|
||||
|
||||
noBuiltInTypes
|
||||
}
|
||||
|
||||
// DecodeNaked will decode into an XMLNode
|
||||
|
||||
// XMLName is a value object representing a namespace-aware NAME
|
||||
type XMLName struct {
|
||||
Local string
|
||||
Space string
|
||||
}
|
||||
|
||||
// XMLNode represents a "union" of the different types of XML Nodes.
|
||||
// Only one of fields (Text or *Element) is set.
|
||||
type XMLNode struct {
|
||||
Element *Element
|
||||
Text string
|
||||
}
|
||||
|
||||
// XMLElement is a value object representing an fully-parsed XML element.
|
||||
type XMLElement struct {
|
||||
Name Name
|
||||
Attrs map[XMLName]string
|
||||
// Children is a list of child nodes, each being a *XMLElement or string
|
||||
Children []XMLNode
|
||||
}
|
||||
|
||||
// ----------- HANDLE -------------------
|
||||
|
||||
type XMLHandle struct {
|
||||
BasicHandle
|
||||
textEncodingType
|
||||
|
||||
DefaultNS string
|
||||
NS map[string]string // ns URI to key, for encoding
|
||||
Entities map[string]string // entity representation to string, for encoding.
|
||||
}
|
||||
|
||||
func (h *XMLHandle) newEncDriver(e *Encoder) encDriver {
|
||||
return &xmlEncDriver{e: e, w: e.w, h: h}
|
||||
}
|
||||
|
||||
func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
|
||||
// d := xmlDecDriver{r: r.(*bytesDecReader), h: h}
|
||||
hd := xmlDecDriver{d: d, r: d.r, h: h}
|
||||
hd.n.bytes = d.b[:]
|
||||
return &hd
|
||||
}
|
||||
|
||||
func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
|
||||
}
|
||||
|
||||
var _ decDriver = (*xmlDecDriver)(nil)
|
||||
var _ encDriver = (*xmlEncDriver)(nil)
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func main() {
|
||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
||||
var buf bytes.Buffer
|
||||
// compress text
|
||||
w, err := xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("xz.NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
// decompress buffer and write output to stdout
|
||||
r, err := xz.NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
|
||||
//This program must be run after mksyscall.go.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func writeASMFile(in string, fileName string, buildTags string) {
|
||||
trampolines := map[string]bool{}
|
||||
|
||||
var out bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
|
||||
fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
|
||||
fmt.Fprintf(&out, "\n")
|
||||
fmt.Fprintf(&out, "// +build %s\n", buildTags)
|
||||
fmt.Fprintf(&out, "\n")
|
||||
fmt.Fprintf(&out, "#include \"textflag.h\"\n")
|
||||
for _, line := range strings.Split(in, "\n") {
|
||||
if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
|
||||
continue
|
||||
}
|
||||
fn := line[5 : len(line)-13]
|
||||
if !trampolines[fn] {
|
||||
trampolines[fn] = true
|
||||
fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
|
||||
fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
|
||||
}
|
||||
}
|
||||
err := ioutil.WriteFile(fileName, out.Bytes(), 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("can't write %s: %s", fileName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
in1, err := ioutil.ReadFile("syscall_darwin.go")
|
||||
if err != nil {
|
||||
log.Fatalf("can't open syscall_darwin.go: %s", err)
|
||||
}
|
||||
arch := os.Args[1]
|
||||
in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
|
||||
if err != nil {
|
||||
log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
|
||||
}
|
||||
in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
|
||||
if err != nil {
|
||||
log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
|
||||
}
|
||||
in := string(in1) + string(in2) + string(in3)
|
||||
|
||||
writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12")
|
||||
|
||||
in1, err = ioutil.ReadFile("syscall_darwin.1_13.go")
|
||||
if err != nil {
|
||||
log.Fatalf("can't open syscall_darwin.1_13.go: %s", err)
|
||||
}
|
||||
in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch))
|
||||
if err != nil {
|
||||
log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err)
|
||||
}
|
||||
|
||||
in = string(in1) + string(in2)
|
||||
|
||||
writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13")
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// mkpost processes the output of cgo -godefs to
|
||||
// modify the generated types. It is used to clean up
|
||||
// the sys API in an architecture specific manner.
|
||||
//
|
||||
// mkpost is run after cgo -godefs; see README.md.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get the OS and architecture (using GOARCH_TARGET if it exists)
|
||||
goos := os.Getenv("GOOS")
|
||||
goarch := os.Getenv("GOARCH_TARGET")
|
||||
if goarch == "" {
|
||||
goarch = os.Getenv("GOARCH")
|
||||
}
|
||||
// Check that we are using the Docker-based build system if we should be.
|
||||
if goos == "linux" {
|
||||
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
|
||||
os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
|
||||
os.Stderr.WriteString("See README.md\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if goos == "aix" {
|
||||
// Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
|
||||
// to avoid having both StTimespec and Timespec.
|
||||
sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
|
||||
b = sttimespec.ReplaceAll(b, []byte("Timespec"))
|
||||
}
|
||||
|
||||
// Intentionally export __val fields in Fsid and Sigset_t
|
||||
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
|
||||
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
|
||||
|
||||
// Intentionally export __fds_bits field in FdSet
|
||||
fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
|
||||
b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
|
||||
|
||||
// If we have empty Ptrace structs, we should delete them. Only s390x emits
|
||||
// nonempty Ptrace structs.
|
||||
ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
|
||||
b = ptraceRexexp.ReplaceAll(b, nil)
|
||||
|
||||
// Replace the control_regs union with a blank identifier for now.
|
||||
controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
|
||||
b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
|
||||
|
||||
// Remove fields that are added by glibc
|
||||
// Note that this is unstable as the identifers are private.
|
||||
removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
|
||||
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
|
||||
|
||||
// Convert [65]int8 to [65]byte in Utsname members to simplify
|
||||
// conversion to string; see golang.org/issue/20753
|
||||
convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
|
||||
b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
|
||||
|
||||
// Convert [1024]int8 to [1024]byte in Ptmget members
|
||||
convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
|
||||
b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
|
||||
|
||||
// Remove spare fields (e.g. in Statx_t)
|
||||
spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
|
||||
b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
|
||||
|
||||
// Remove cgo padding fields
|
||||
removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
|
||||
b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
|
||||
|
||||
// Remove padding, hidden, or unused fields
|
||||
removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
|
||||
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
|
||||
|
||||
// Remove the first line of warning from cgo
|
||||
b = b[bytes.IndexByte(b, '\n')+1:]
|
||||
// Modify the command in the header to include:
|
||||
// mkpost, our own warning, and a build tag.
|
||||
replacement := fmt.Sprintf(`$1 | go run mkpost.go
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s,%s`, goarch, goos)
|
||||
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
|
||||
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
|
||||
|
||||
// Rename Stat_t time fields
|
||||
if goos == "freebsd" && goarch == "386" {
|
||||
// Hide Stat_t.[AMCB]tim_ext fields
|
||||
renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
|
||||
b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
|
||||
}
|
||||
renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
|
||||
b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
|
||||
|
||||
// gofmt
|
||||
b, err = format.Source(b)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
os.Stdout.Write(b)
|
||||
}
|
|
@ -0,0 +1,402 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
This program reads a file containing function prototypes
|
||||
(like syscall_darwin.go) and generates system call bodies.
|
||||
The prototypes are marked by lines beginning with "//sys"
|
||||
and read like func declarations if //sys is replaced by func, but:
|
||||
* The parameter lists must give a name for each argument.
|
||||
This includes return parameters.
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
* If the return parameter is an error number, it must be named errno.
|
||||
|
||||
A line beginning with //sysnb is like //sys, except that the
|
||||
goroutine will not be suspended during the execution of the system
|
||||
call. This must only be used for system calls which can never
|
||||
block, as otherwise the system call could cause all goroutines to
|
||||
hang.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
||||
plan9 = flag.Bool("plan9", false, "plan9")
|
||||
openbsd = flag.Bool("openbsd", false, "openbsd")
|
||||
netbsd = flag.Bool("netbsd", false, "netbsd")
|
||||
dragonfly = flag.Bool("dragonfly", false, "dragonfly")
|
||||
arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
|
||||
tags = flag.String("tags", "", "build tags")
|
||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
||||
)
|
||||
|
||||
// cmdLine returns this programs's commandline arguments
|
||||
func cmdLine() string {
|
||||
return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
|
||||
}
|
||||
|
||||
// buildTags returns build tags
|
||||
func buildTags() string {
|
||||
return *tags
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
// usage prints the program usage
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// parseParamList parses parameter list and returns a slice of parameters
|
||||
func parseParamList(list string) []string {
|
||||
list = strings.TrimSpace(list)
|
||||
if list == "" {
|
||||
return []string{}
|
||||
}
|
||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
||||
}
|
||||
|
||||
// parseParam splits a parameter into name and type
|
||||
func parseParam(p string) Param {
|
||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
||||
if ps == nil {
|
||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
||||
os.Exit(1)
|
||||
}
|
||||
return Param{ps[1], ps[2]}
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get the OS and architecture (using GOARCH_TARGET if it exists)
|
||||
goos := os.Getenv("GOOS")
|
||||
if goos == "" {
|
||||
fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
|
||||
os.Exit(1)
|
||||
}
|
||||
goarch := os.Getenv("GOARCH_TARGET")
|
||||
if goarch == "" {
|
||||
goarch = os.Getenv("GOARCH")
|
||||
}
|
||||
|
||||
// Check that we are using the Docker-based build system if we should
|
||||
if goos == "linux" {
|
||||
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
|
||||
fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
|
||||
fmt.Fprintf(os.Stderr, "See README.md\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
endianness := ""
|
||||
if *b32 {
|
||||
endianness = "big-endian"
|
||||
} else if *l32 {
|
||||
endianness = "little-endian"
|
||||
}
|
||||
|
||||
libc := false
|
||||
if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) {
|
||||
libc = true
|
||||
}
|
||||
trampolines := map[string]bool{}
|
||||
|
||||
text := ""
|
||||
for _, path := range flag.Args() {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := s.Text()
|
||||
t = strings.TrimSpace(t)
|
||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Line must be of the form
|
||||
// func Open(path string, mode int, perm int) (fd int, errno error)
|
||||
// Split into name, in params, out params.
|
||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
|
||||
if f == nil {
|
||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
||||
os.Exit(1)
|
||||
}
|
||||
funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
|
||||
|
||||
// ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
|
||||
if goos == "darwin" && !libc && funct == "ClockGettime" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Split argument lists on comma.
|
||||
in := parseParamList(inps)
|
||||
out := parseParamList(outps)
|
||||
|
||||
// Try in vain to keep people from editing this file.
|
||||
// The theory is that they jump into the middle of the file
|
||||
// without reading the header.
|
||||
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
||||
|
||||
// Go function header.
|
||||
outDecl := ""
|
||||
if len(out) > 0 {
|
||||
outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
|
||||
}
|
||||
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
|
||||
|
||||
// Check if err return available
|
||||
errvar := ""
|
||||
for _, param := range out {
|
||||
p := parseParam(param)
|
||||
if p.Type == "error" {
|
||||
errvar = p.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare arguments to Syscall.
|
||||
var args []string
|
||||
n := 0
|
||||
for _, param := range in {
|
||||
p := parseParam(param)
|
||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
||||
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
|
||||
} else if p.Type == "string" && errvar != "" {
|
||||
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
|
||||
text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
|
||||
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
||||
n++
|
||||
} else if p.Type == "string" {
|
||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
||||
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
|
||||
text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
|
||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
||||
n++
|
||||
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
|
||||
// Convert slice into pointer, length.
|
||||
// Have to be careful not to take address of &a[0] if len == 0:
|
||||
// pass dummy pointer in that case.
|
||||
// Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
|
||||
text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
|
||||
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
|
||||
text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
|
||||
args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
|
||||
n++
|
||||
} else if p.Type == "int64" && (*openbsd || *netbsd) {
|
||||
args = append(args, "0")
|
||||
if endianness == "big-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
} else if endianness == "little-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
}
|
||||
} else if p.Type == "int64" && *dragonfly {
|
||||
if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
|
||||
args = append(args, "0")
|
||||
}
|
||||
if endianness == "big-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
} else if endianness == "little-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
}
|
||||
} else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
|
||||
if len(args)%2 == 1 && *arm {
|
||||
// arm abi specifies 64-bit argument uses
|
||||
// (even, odd) pair
|
||||
args = append(args, "0")
|
||||
}
|
||||
if endianness == "big-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
||||
}
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// Determine which form to use; pad args with zeros.
|
||||
asm := "Syscall"
|
||||
if nonblock != nil {
|
||||
if errvar == "" && goos == "linux" {
|
||||
asm = "RawSyscallNoError"
|
||||
} else {
|
||||
asm = "RawSyscall"
|
||||
}
|
||||
} else {
|
||||
if errvar == "" && goos == "linux" {
|
||||
asm = "SyscallNoError"
|
||||
}
|
||||
}
|
||||
if len(args) <= 3 {
|
||||
for len(args) < 3 {
|
||||
args = append(args, "0")
|
||||
}
|
||||
} else if len(args) <= 6 {
|
||||
asm += "6"
|
||||
for len(args) < 6 {
|
||||
args = append(args, "0")
|
||||
}
|
||||
} else if len(args) <= 9 {
|
||||
asm += "9"
|
||||
for len(args) < 9 {
|
||||
args = append(args, "0")
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
|
||||
}
|
||||
|
||||
// System call number.
|
||||
if sysname == "" {
|
||||
sysname = "SYS_" + funct
|
||||
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
|
||||
sysname = strings.ToUpper(sysname)
|
||||
}
|
||||
|
||||
var libcFn string
|
||||
if libc {
|
||||
asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
|
||||
sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
|
||||
sysname = strings.ToLower(sysname) // lowercase
|
||||
libcFn = sysname
|
||||
sysname = "funcPC(libc_" + sysname + "_trampoline)"
|
||||
}
|
||||
|
||||
// Actual call.
|
||||
arglist := strings.Join(args, ", ")
|
||||
call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
|
||||
|
||||
// Assign return values.
|
||||
body := ""
|
||||
ret := []string{"_", "_", "_"}
|
||||
doErrno := false
|
||||
for i := 0; i < len(out); i++ {
|
||||
p := parseParam(out[i])
|
||||
reg := ""
|
||||
if p.Name == "err" && !*plan9 {
|
||||
reg = "e1"
|
||||
ret[2] = reg
|
||||
doErrno = true
|
||||
} else if p.Name == "err" && *plan9 {
|
||||
ret[0] = "r0"
|
||||
ret[2] = "e1"
|
||||
break
|
||||
} else {
|
||||
reg = fmt.Sprintf("r%d", i)
|
||||
ret[i] = reg
|
||||
}
|
||||
if p.Type == "bool" {
|
||||
reg = fmt.Sprintf("%s != 0", reg)
|
||||
}
|
||||
if p.Type == "int64" && endianness != "" {
|
||||
// 64-bit number in r1:r0 or r0:r1.
|
||||
if i+2 > len(out) {
|
||||
fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
|
||||
}
|
||||
if endianness == "big-endian" {
|
||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
|
||||
} else {
|
||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
|
||||
}
|
||||
ret[i] = fmt.Sprintf("r%d", i)
|
||||
ret[i+1] = fmt.Sprintf("r%d", i+1)
|
||||
}
|
||||
if reg != "e1" || *plan9 {
|
||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
||||
}
|
||||
}
|
||||
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
|
||||
text += fmt.Sprintf("\t%s\n", call)
|
||||
} else {
|
||||
if errvar == "" && goos == "linux" {
|
||||
// raw syscall without error on Linux, see golang.org/issue/22924
|
||||
text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
|
||||
} else {
|
||||
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
|
||||
}
|
||||
}
|
||||
text += body
|
||||
|
||||
if *plan9 && ret[2] == "e1" {
|
||||
text += "\tif int32(r0) == -1 {\n"
|
||||
text += "\t\terr = e1\n"
|
||||
text += "\t}\n"
|
||||
} else if doErrno {
|
||||
text += "\tif e1 != 0 {\n"
|
||||
text += "\t\terr = errnoErr(e1)\n"
|
||||
text += "\t}\n"
|
||||
}
|
||||
text += "\treturn\n"
|
||||
text += "}\n\n"
|
||||
|
||||
if libc && !trampolines[libcFn] {
|
||||
// some system calls share a trampoline, like read and readlen.
|
||||
trampolines[libcFn] = true
|
||||
// Declare assembly trampoline.
|
||||
text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
|
||||
// Assembly trampoline calls the libc_* function, which this magic
|
||||
// redirects to use the function from libSystem.
|
||||
text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
|
||||
text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
|
||||
text += "\n"
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
|
||||
}
|
||||
|
||||
const srcTemplate = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var _ syscall.Errno
|
||||
|
||||
%s
|
||||
`
|
|
@ -0,0 +1,415 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
This program reads a file containing function prototypes
|
||||
(like syscall_aix.go) and generates system call bodies.
|
||||
The prototypes are marked by lines beginning with "//sys"
|
||||
and read like func declarations if //sys is replaced by func, but:
|
||||
* The parameter lists must give a name for each argument.
|
||||
This includes return parameters.
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
* If go func name needs to be different than its libc name,
|
||||
* or the function is not in libc, name could be specified
|
||||
* at the end, after "=" sign, like
|
||||
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
||||
aix = flag.Bool("aix", false, "aix")
|
||||
tags = flag.String("tags", "", "build tags")
|
||||
)
|
||||
|
||||
// cmdLine returns this programs's commandline arguments
|
||||
func cmdLine() string {
|
||||
return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
|
||||
}
|
||||
|
||||
// buildTags returns build tags
|
||||
func buildTags() string {
|
||||
return *tags
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
// usage prints the program usage
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// parseParamList parses parameter list and returns a slice of parameters
|
||||
func parseParamList(list string) []string {
|
||||
list = strings.TrimSpace(list)
|
||||
if list == "" {
|
||||
return []string{}
|
||||
}
|
||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
||||
}
|
||||
|
||||
// parseParam splits a parameter into name and type
|
||||
func parseParam(p string) Param {
|
||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
||||
if ps == nil {
|
||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
||||
os.Exit(1)
|
||||
}
|
||||
return Param{ps[1], ps[2]}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
endianness := ""
|
||||
if *b32 {
|
||||
endianness = "big-endian"
|
||||
} else if *l32 {
|
||||
endianness = "little-endian"
|
||||
}
|
||||
|
||||
pack := ""
|
||||
text := ""
|
||||
cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
|
||||
for _, path := range flag.Args() {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := s.Text()
|
||||
t = strings.TrimSpace(t)
|
||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
||||
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
|
||||
pack = p[1]
|
||||
}
|
||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Line must be of the form
|
||||
// func Open(path string, mode int, perm int) (fd int, err error)
|
||||
// Split into name, in params, out params.
|
||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
|
||||
if f == nil {
|
||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
||||
os.Exit(1)
|
||||
}
|
||||
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
|
||||
|
||||
// Split argument lists on comma.
|
||||
in := parseParamList(inps)
|
||||
out := parseParamList(outps)
|
||||
|
||||
inps = strings.Join(in, ", ")
|
||||
outps = strings.Join(out, ", ")
|
||||
|
||||
// Try in vain to keep people from editing this file.
|
||||
// The theory is that they jump into the middle of the file
|
||||
// without reading the header.
|
||||
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
||||
|
||||
// Check if value return, err return available
|
||||
errvar := ""
|
||||
retvar := ""
|
||||
rettype := ""
|
||||
for _, param := range out {
|
||||
p := parseParam(param)
|
||||
if p.Type == "error" {
|
||||
errvar = p.Name
|
||||
} else {
|
||||
retvar = p.Name
|
||||
rettype = p.Type
|
||||
}
|
||||
}
|
||||
|
||||
// System call name.
|
||||
if sysname == "" {
|
||||
sysname = funct
|
||||
}
|
||||
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
|
||||
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
|
||||
|
||||
cRettype := ""
|
||||
if rettype == "unsafe.Pointer" {
|
||||
cRettype = "uintptr_t"
|
||||
} else if rettype == "uintptr" {
|
||||
cRettype = "uintptr_t"
|
||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
|
||||
cRettype = "uintptr_t"
|
||||
} else if rettype == "int" {
|
||||
cRettype = "int"
|
||||
} else if rettype == "int32" {
|
||||
cRettype = "int"
|
||||
} else if rettype == "int64" {
|
||||
cRettype = "long long"
|
||||
} else if rettype == "uint32" {
|
||||
cRettype = "unsigned int"
|
||||
} else if rettype == "uint64" {
|
||||
cRettype = "unsigned long long"
|
||||
} else {
|
||||
cRettype = "int"
|
||||
}
|
||||
if sysname == "exit" {
|
||||
cRettype = "void"
|
||||
}
|
||||
|
||||
// Change p.Types to c
|
||||
var cIn []string
|
||||
for _, param := range in {
|
||||
p := parseParam(param)
|
||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if p.Type == "string" {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
|
||||
cIn = append(cIn, "uintptr_t", "size_t")
|
||||
} else if p.Type == "unsafe.Pointer" {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if p.Type == "uintptr" {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if p.Type == "int" {
|
||||
cIn = append(cIn, "int")
|
||||
} else if p.Type == "int32" {
|
||||
cIn = append(cIn, "int")
|
||||
} else if p.Type == "int64" {
|
||||
cIn = append(cIn, "long long")
|
||||
} else if p.Type == "uint32" {
|
||||
cIn = append(cIn, "unsigned int")
|
||||
} else if p.Type == "uint64" {
|
||||
cIn = append(cIn, "unsigned long long")
|
||||
} else {
|
||||
cIn = append(cIn, "int")
|
||||
}
|
||||
}
|
||||
|
||||
if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
|
||||
if sysname == "select" {
|
||||
// select is a keyword of Go. Its name is
|
||||
// changed to c_select.
|
||||
cExtern += "#define c_select select\n"
|
||||
}
|
||||
// Imports of system calls from libc
|
||||
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
|
||||
cIn := strings.Join(cIn, ", ")
|
||||
cExtern += fmt.Sprintf("(%s);\n", cIn)
|
||||
}
|
||||
|
||||
// So file name.
|
||||
if *aix {
|
||||
if modname == "" {
|
||||
modname = "libc.a/shr_64.o"
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
strconvfunc := "C.CString"
|
||||
|
||||
// Go function header.
|
||||
if outps != "" {
|
||||
outps = fmt.Sprintf(" (%s)", outps)
|
||||
}
|
||||
if text != "" {
|
||||
text += "\n"
|
||||
}
|
||||
|
||||
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
|
||||
|
||||
// Prepare arguments to Syscall.
|
||||
var args []string
|
||||
n := 0
|
||||
argN := 0
|
||||
for _, param := range in {
|
||||
p := parseParam(param)
|
||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
||||
args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
|
||||
} else if p.Type == "string" && errvar != "" {
|
||||
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
||||
n++
|
||||
} else if p.Type == "string" {
|
||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
||||
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
||||
n++
|
||||
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
|
||||
// Convert slice into pointer, length.
|
||||
// Have to be careful not to take address of &a[0] if len == 0:
|
||||
// pass nil in that case.
|
||||
text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
|
||||
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
|
||||
n++
|
||||
text += fmt.Sprintf("\tvar _p%d int\n", n)
|
||||
text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
|
||||
args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
|
||||
n++
|
||||
} else if p.Type == "int64" && endianness != "" {
|
||||
if endianness == "big-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
||||
}
|
||||
n++
|
||||
} else if p.Type == "bool" {
|
||||
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
|
||||
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
|
||||
args = append(args, fmt.Sprintf("_p%d", n))
|
||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
|
||||
} else if p.Type == "unsafe.Pointer" {
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
|
||||
} else if p.Type == "int" {
|
||||
if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
|
||||
args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
|
||||
} else if argN == 0 && funct == "fcntl" {
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
} else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
|
||||
}
|
||||
} else if p.Type == "int32" {
|
||||
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
|
||||
} else if p.Type == "int64" {
|
||||
args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
|
||||
} else if p.Type == "uint32" {
|
||||
args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
|
||||
} else if p.Type == "uint64" {
|
||||
args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
|
||||
} else if p.Type == "uintptr" {
|
||||
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
|
||||
}
|
||||
argN++
|
||||
}
|
||||
|
||||
// Actual call.
|
||||
arglist := strings.Join(args, ", ")
|
||||
call := ""
|
||||
if sysname == "exit" {
|
||||
if errvar != "" {
|
||||
call += "er :="
|
||||
} else {
|
||||
call += ""
|
||||
}
|
||||
} else if errvar != "" {
|
||||
call += "r0,er :="
|
||||
} else if retvar != "" {
|
||||
call += "r0,_ :="
|
||||
} else {
|
||||
call += ""
|
||||
}
|
||||
if sysname == "select" {
|
||||
// select is a keyword of Go. Its name is
|
||||
// changed to c_select.
|
||||
call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
|
||||
} else {
|
||||
call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
|
||||
}
|
||||
|
||||
// Assign return values.
|
||||
body := ""
|
||||
for i := 0; i < len(out); i++ {
|
||||
p := parseParam(out[i])
|
||||
reg := ""
|
||||
if p.Name == "err" {
|
||||
reg = "e1"
|
||||
} else {
|
||||
reg = "r0"
|
||||
}
|
||||
if reg != "e1" {
|
||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
||||
}
|
||||
}
|
||||
|
||||
// verify return
|
||||
if sysname != "exit" && errvar != "" {
|
||||
if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
|
||||
body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
|
||||
body += fmt.Sprintf("\t\t%s = er\n", errvar)
|
||||
body += "\t}\n"
|
||||
} else {
|
||||
body += "\tif (r0 ==-1 && er != nil) {\n"
|
||||
body += fmt.Sprintf("\t\t%s = er\n", errvar)
|
||||
body += "\t}\n"
|
||||
}
|
||||
} else if errvar != "" {
|
||||
body += "\tif (er != nil) {\n"
|
||||
body += fmt.Sprintf("\t\t%s = er\n", errvar)
|
||||
body += "\t}\n"
|
||||
}
|
||||
|
||||
text += fmt.Sprintf("\t%s\n", call)
|
||||
text += body
|
||||
|
||||
text += "\treturn\n"
|
||||
text += "}\n"
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
imp := ""
|
||||
if pack != "unix" {
|
||||
imp = "import \"golang.org/x/sys/unix\"\n"
|
||||
|
||||
}
|
||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
|
||||
}
|
||||
|
||||
const srcTemplate = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
|
||||
package %s
|
||||
|
||||
|
||||
%s
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
||||
%s
|
||||
|
||||
%s
|
||||
`
|
|
@ -0,0 +1,614 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
This program reads a file containing function prototypes
|
||||
(like syscall_aix.go) and generates system call bodies.
|
||||
The prototypes are marked by lines beginning with "//sys"
|
||||
and read like func declarations if //sys is replaced by func, but:
|
||||
* The parameter lists must give a name for each argument.
|
||||
This includes return parameters.
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
* If go func name needs to be different than its libc name,
|
||||
* or the function is not in libc, name could be specified
|
||||
* at the end, after "=" sign, like
|
||||
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
||||
|
||||
|
||||
This program will generate three files and handle both gc and gccgo implementation:
|
||||
- zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
|
||||
- zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
|
||||
- zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
|
||||
|
||||
The generated code looks like this
|
||||
|
||||
zsyscall_aix_ppc64.go
|
||||
func asyscall(...) (n int, err error) {
|
||||
// Pointer Creation
|
||||
r1, e1 := callasyscall(...)
|
||||
// Type Conversion
|
||||
// Error Handler
|
||||
return
|
||||
}
|
||||
|
||||
zsyscall_aix_ppc64_gc.go
|
||||
//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
|
||||
//go:linkname libc_asyscall libc_asyscall
|
||||
var asyscall syscallFunc
|
||||
|
||||
func callasyscall(...) (r1 uintptr, e1 Errno) {
|
||||
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
|
||||
return
|
||||
}
|
||||
|
||||
zsyscall_aix_ppc64_ggcgo.go
|
||||
|
||||
// int asyscall(...)
|
||||
|
||||
import "C"
|
||||
|
||||
func callasyscall(...) (r1 uintptr, e1 Errno) {
|
||||
r1 = uintptr(C.asyscall(...))
|
||||
e1 = syscall.GetErrno()
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
||||
aix = flag.Bool("aix", false, "aix")
|
||||
tags = flag.String("tags", "", "build tags")
|
||||
)
|
||||
|
||||
// cmdLine returns this programs's commandline arguments
|
||||
func cmdLine() string {
|
||||
return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
|
||||
}
|
||||
|
||||
// buildTags returns build tags
|
||||
func buildTags() string {
|
||||
return *tags
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
// usage prints the program usage
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// parseParamList parses parameter list and returns a slice of parameters
|
||||
func parseParamList(list string) []string {
|
||||
list = strings.TrimSpace(list)
|
||||
if list == "" {
|
||||
return []string{}
|
||||
}
|
||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
||||
}
|
||||
|
||||
// parseParam splits a parameter into name and type
|
||||
func parseParam(p string) Param {
|
||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
||||
if ps == nil {
|
||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
||||
os.Exit(1)
|
||||
}
|
||||
return Param{ps[1], ps[2]}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
endianness := ""
|
||||
if *b32 {
|
||||
endianness = "big-endian"
|
||||
} else if *l32 {
|
||||
endianness = "little-endian"
|
||||
}
|
||||
|
||||
pack := ""
|
||||
// GCCGO
|
||||
textgccgo := ""
|
||||
cExtern := "/*\n#include <stdint.h>\n"
|
||||
// GC
|
||||
textgc := ""
|
||||
dynimports := ""
|
||||
linknames := ""
|
||||
var vars []string
|
||||
// COMMON
|
||||
textcommon := ""
|
||||
for _, path := range flag.Args() {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := s.Text()
|
||||
t = strings.TrimSpace(t)
|
||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
||||
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
|
||||
pack = p[1]
|
||||
}
|
||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Line must be of the form
|
||||
// func Open(path string, mode int, perm int) (fd int, err error)
|
||||
// Split into name, in params, out params.
|
||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
|
||||
if f == nil {
|
||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
||||
os.Exit(1)
|
||||
}
|
||||
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
|
||||
|
||||
// Split argument lists on comma.
|
||||
in := parseParamList(inps)
|
||||
out := parseParamList(outps)
|
||||
|
||||
inps = strings.Join(in, ", ")
|
||||
outps = strings.Join(out, ", ")
|
||||
|
||||
if sysname == "" {
|
||||
sysname = funct
|
||||
}
|
||||
|
||||
onlyCommon := false
|
||||
if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
|
||||
// This function call another syscall which is already implemented.
|
||||
// Therefore, the gc and gccgo part must not be generated.
|
||||
onlyCommon = true
|
||||
}
|
||||
|
||||
// Try in vain to keep people from editing this file.
|
||||
// The theory is that they jump into the middle of the file
|
||||
// without reading the header.
|
||||
|
||||
textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
||||
if !onlyCommon {
|
||||
textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
||||
textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
||||
}
|
||||
|
||||
// Check if value return, err return available
|
||||
errvar := ""
|
||||
rettype := ""
|
||||
for _, param := range out {
|
||||
p := parseParam(param)
|
||||
if p.Type == "error" {
|
||||
errvar = p.Name
|
||||
} else {
|
||||
rettype = p.Type
|
||||
}
|
||||
}
|
||||
|
||||
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
|
||||
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
|
||||
|
||||
// GCCGO Prototype return type
|
||||
cRettype := ""
|
||||
if rettype == "unsafe.Pointer" {
|
||||
cRettype = "uintptr_t"
|
||||
} else if rettype == "uintptr" {
|
||||
cRettype = "uintptr_t"
|
||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
|
||||
cRettype = "uintptr_t"
|
||||
} else if rettype == "int" {
|
||||
cRettype = "int"
|
||||
} else if rettype == "int32" {
|
||||
cRettype = "int"
|
||||
} else if rettype == "int64" {
|
||||
cRettype = "long long"
|
||||
} else if rettype == "uint32" {
|
||||
cRettype = "unsigned int"
|
||||
} else if rettype == "uint64" {
|
||||
cRettype = "unsigned long long"
|
||||
} else {
|
||||
cRettype = "int"
|
||||
}
|
||||
if sysname == "exit" {
|
||||
cRettype = "void"
|
||||
}
|
||||
|
||||
// GCCGO Prototype arguments type
|
||||
var cIn []string
|
||||
for i, param := range in {
|
||||
p := parseParam(param)
|
||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if p.Type == "string" {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
|
||||
cIn = append(cIn, "uintptr_t", "size_t")
|
||||
} else if p.Type == "unsafe.Pointer" {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if p.Type == "uintptr" {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else if p.Type == "int" {
|
||||
if (i == 0 || i == 2) && funct == "fcntl" {
|
||||
// These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
|
||||
cIn = append(cIn, "uintptr_t")
|
||||
} else {
|
||||
cIn = append(cIn, "int")
|
||||
}
|
||||
|
||||
} else if p.Type == "int32" {
|
||||
cIn = append(cIn, "int")
|
||||
} else if p.Type == "int64" {
|
||||
cIn = append(cIn, "long long")
|
||||
} else if p.Type == "uint32" {
|
||||
cIn = append(cIn, "unsigned int")
|
||||
} else if p.Type == "uint64" {
|
||||
cIn = append(cIn, "unsigned long long")
|
||||
} else {
|
||||
cIn = append(cIn, "int")
|
||||
}
|
||||
}
|
||||
|
||||
if !onlyCommon {
|
||||
// GCCGO Prototype Generation
|
||||
// Imports of system calls from libc
|
||||
if sysname == "select" {
|
||||
// select is a keyword of Go. Its name is
|
||||
// changed to c_select.
|
||||
cExtern += "#define c_select select\n"
|
||||
}
|
||||
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
|
||||
cIn := strings.Join(cIn, ", ")
|
||||
cExtern += fmt.Sprintf("(%s);\n", cIn)
|
||||
}
|
||||
// GC Library name
|
||||
if modname == "" {
|
||||
modname = "libc.a/shr_64.o"
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
|
||||
os.Exit(1)
|
||||
}
|
||||
sysvarname := fmt.Sprintf("libc_%s", sysname)
|
||||
|
||||
if !onlyCommon {
|
||||
// GC Runtime import of function to allow cross-platform builds.
|
||||
dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
|
||||
// GC Link symbol to proc address variable.
|
||||
linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
|
||||
// GC Library proc address variable.
|
||||
vars = append(vars, sysvarname)
|
||||
}
|
||||
|
||||
strconvfunc := "BytePtrFromString"
|
||||
strconvtype := "*byte"
|
||||
|
||||
// Go function header.
|
||||
if outps != "" {
|
||||
outps = fmt.Sprintf(" (%s)", outps)
|
||||
}
|
||||
if textcommon != "" {
|
||||
textcommon += "\n"
|
||||
}
|
||||
|
||||
textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
|
||||
|
||||
// Prepare arguments tocall.
|
||||
var argscommon []string // Arguments in the common part
|
||||
var argscall []string // Arguments for call prototype
|
||||
var argsgc []string // Arguments for gc call (with syscall6)
|
||||
var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
|
||||
n := 0
|
||||
argN := 0
|
||||
for _, param := range in {
|
||||
p := parseParam(param)
|
||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
|
||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
||||
argsgc = append(argsgc, p.Name)
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
} else if p.Type == "string" && errvar != "" {
|
||||
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
||||
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
|
||||
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
||||
|
||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
||||
argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
|
||||
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
||||
n++
|
||||
} else if p.Type == "string" {
|
||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
||||
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
||||
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
|
||||
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
||||
|
||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
||||
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
|
||||
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
||||
n++
|
||||
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
|
||||
// Convert slice into pointer, length.
|
||||
// Have to be careful not to take address of &a[0] if len == 0:
|
||||
// pass nil in that case.
|
||||
textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
|
||||
textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
|
||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
|
||||
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
|
||||
argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
|
||||
n++
|
||||
} else if p.Type == "int64" && endianness != "" {
|
||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
|
||||
} else if p.Type == "bool" {
|
||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
|
||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
|
||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
||||
argsgc = append(argsgc, p.Name)
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
} else if p.Type == "int" {
|
||||
if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
|
||||
// These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
|
||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
||||
argsgc = append(argsgc, p.Name)
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
|
||||
} else {
|
||||
argscommon = append(argscommon, p.Name)
|
||||
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
|
||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
|
||||
}
|
||||
} else if p.Type == "int32" {
|
||||
argscommon = append(argscommon, p.Name)
|
||||
argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
|
||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
|
||||
} else if p.Type == "int64" {
|
||||
argscommon = append(argscommon, p.Name)
|
||||
argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
|
||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
|
||||
} else if p.Type == "uint32" {
|
||||
argscommon = append(argscommon, p.Name)
|
||||
argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
|
||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
|
||||
} else if p.Type == "uint64" {
|
||||
argscommon = append(argscommon, p.Name)
|
||||
argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
|
||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
|
||||
} else if p.Type == "uintptr" {
|
||||
argscommon = append(argscommon, p.Name)
|
||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
||||
argsgc = append(argsgc, p.Name)
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
||||
} else {
|
||||
argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
|
||||
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
|
||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
|
||||
}
|
||||
argN++
|
||||
}
|
||||
nargs := len(argsgc)
|
||||
|
||||
// COMMON function generation
|
||||
argscommonlist := strings.Join(argscommon, ", ")
|
||||
callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
|
||||
ret := []string{"_", "_"}
|
||||
body := ""
|
||||
doErrno := false
|
||||
for i := 0; i < len(out); i++ {
|
||||
p := parseParam(out[i])
|
||||
reg := ""
|
||||
if p.Name == "err" {
|
||||
reg = "e1"
|
||||
ret[1] = reg
|
||||
doErrno = true
|
||||
} else {
|
||||
reg = "r0"
|
||||
ret[0] = reg
|
||||
}
|
||||
if p.Type == "bool" {
|
||||
reg = fmt.Sprintf("%s != 0", reg)
|
||||
}
|
||||
if reg != "e1" {
|
||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
||||
}
|
||||
}
|
||||
if ret[0] == "_" && ret[1] == "_" {
|
||||
textcommon += fmt.Sprintf("\t%s\n", callcommon)
|
||||
} else {
|
||||
textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
|
||||
}
|
||||
textcommon += body
|
||||
|
||||
if doErrno {
|
||||
textcommon += "\tif e1 != 0 {\n"
|
||||
textcommon += "\t\terr = errnoErr(e1)\n"
|
||||
textcommon += "\t}\n"
|
||||
}
|
||||
textcommon += "\treturn\n"
|
||||
textcommon += "}\n"
|
||||
|
||||
if onlyCommon {
|
||||
continue
|
||||
}
|
||||
|
||||
// CALL Prototype
|
||||
callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
|
||||
|
||||
// GC function generation
|
||||
asm := "syscall6"
|
||||
if nonblock != nil {
|
||||
asm = "rawSyscall6"
|
||||
}
|
||||
|
||||
if len(argsgc) <= 6 {
|
||||
for len(argsgc) < 6 {
|
||||
argsgc = append(argsgc, "0")
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
|
||||
os.Exit(1)
|
||||
}
|
||||
argsgclist := strings.Join(argsgc, ", ")
|
||||
callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
|
||||
|
||||
textgc += callProto
|
||||
textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
|
||||
textgc += "\treturn\n}\n"
|
||||
|
||||
// GCCGO function generation
|
||||
argsgccgolist := strings.Join(argsgccgo, ", ")
|
||||
var callgccgo string
|
||||
if sysname == "select" {
|
||||
// select is a keyword of Go. Its name is
|
||||
// changed to c_select.
|
||||
callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
|
||||
} else {
|
||||
callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
|
||||
}
|
||||
textgccgo += callProto
|
||||
textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
|
||||
textgccgo += "\te1 = syscall.GetErrno()\n"
|
||||
textgccgo += "\treturn\n}\n"
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
imp := ""
|
||||
if pack != "unix" {
|
||||
imp = "import \"golang.org/x/sys/unix\"\n"
|
||||
|
||||
}
|
||||
|
||||
// Print zsyscall_aix_ppc64.go
|
||||
err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
|
||||
[]byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
|
||||
0644)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print zsyscall_aix_ppc64_gc.go
|
||||
vardecls := "\t" + strings.Join(vars, ",\n\t")
|
||||
vardecls += " syscallFunc"
|
||||
err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
|
||||
[]byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
|
||||
0644)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print zsyscall_aix_ppc64_gccgo.go
|
||||
err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
|
||||
[]byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
|
||||
0644)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const srcTemplate1 = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
|
||||
package %s
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
||||
%s
|
||||
|
||||
%s
|
||||
`
|
||||
const srcTemplate2 = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
// +build !gccgo
|
||||
|
||||
package %s
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
%s
|
||||
%s
|
||||
%s
|
||||
type syscallFunc uintptr
|
||||
|
||||
var (
|
||||
%s
|
||||
)
|
||||
|
||||
// Implemented in runtime/syscall_aix.go.
|
||||
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
|
||||
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
|
||||
|
||||
%s
|
||||
`
|
||||
const srcTemplate3 = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
// +build gccgo
|
||||
|
||||
package %s
|
||||
|
||||
%s
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
||||
%s
|
||||
|
||||
%s
|
||||
`
|
|
@ -0,0 +1,335 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
This program reads a file containing function prototypes
|
||||
(like syscall_solaris.go) and generates system call bodies.
|
||||
The prototypes are marked by lines beginning with "//sys"
|
||||
and read like func declarations if //sys is replaced by func, but:
|
||||
* The parameter lists must give a name for each argument.
|
||||
This includes return parameters.
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
* If go func name needs to be different than its libc name,
|
||||
* or the function is not in libc, name could be specified
|
||||
* at the end, after "=" sign, like
|
||||
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
||||
tags = flag.String("tags", "", "build tags")
|
||||
)
|
||||
|
||||
// cmdLine returns this programs's commandline arguments
|
||||
func cmdLine() string {
|
||||
return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
|
||||
}
|
||||
|
||||
// buildTags returns build tags
|
||||
func buildTags() string {
|
||||
return *tags
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
// usage prints the program usage
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// parseParamList parses parameter list and returns a slice of parameters
|
||||
func parseParamList(list string) []string {
|
||||
list = strings.TrimSpace(list)
|
||||
if list == "" {
|
||||
return []string{}
|
||||
}
|
||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
||||
}
|
||||
|
||||
// parseParam splits a parameter into name and type
|
||||
func parseParam(p string) Param {
|
||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
||||
if ps == nil {
|
||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
||||
os.Exit(1)
|
||||
}
|
||||
return Param{ps[1], ps[2]}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
endianness := ""
|
||||
if *b32 {
|
||||
endianness = "big-endian"
|
||||
} else if *l32 {
|
||||
endianness = "little-endian"
|
||||
}
|
||||
|
||||
pack := ""
|
||||
text := ""
|
||||
dynimports := ""
|
||||
linknames := ""
|
||||
var vars []string
|
||||
for _, path := range flag.Args() {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := s.Text()
|
||||
t = strings.TrimSpace(t)
|
||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
||||
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
|
||||
pack = p[1]
|
||||
}
|
||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Line must be of the form
|
||||
// func Open(path string, mode int, perm int) (fd int, err error)
|
||||
// Split into name, in params, out params.
|
||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
|
||||
if f == nil {
|
||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
||||
os.Exit(1)
|
||||
}
|
||||
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
|
||||
|
||||
// Split argument lists on comma.
|
||||
in := parseParamList(inps)
|
||||
out := parseParamList(outps)
|
||||
|
||||
inps = strings.Join(in, ", ")
|
||||
outps = strings.Join(out, ", ")
|
||||
|
||||
// Try in vain to keep people from editing this file.
|
||||
// The theory is that they jump into the middle of the file
|
||||
// without reading the header.
|
||||
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
||||
|
||||
// So file name.
|
||||
if modname == "" {
|
||||
modname = "libc"
|
||||
}
|
||||
|
||||
// System call name.
|
||||
if sysname == "" {
|
||||
sysname = funct
|
||||
}
|
||||
|
||||
// System call pointer variable name.
|
||||
sysvarname := fmt.Sprintf("proc%s", sysname)
|
||||
|
||||
strconvfunc := "BytePtrFromString"
|
||||
strconvtype := "*byte"
|
||||
|
||||
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
|
||||
|
||||
// Runtime import of function to allow cross-platform builds.
|
||||
dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
|
||||
// Link symbol to proc address variable.
|
||||
linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
|
||||
// Library proc address variable.
|
||||
vars = append(vars, sysvarname)
|
||||
|
||||
// Go function header.
|
||||
outlist := strings.Join(out, ", ")
|
||||
if outlist != "" {
|
||||
outlist = fmt.Sprintf(" (%s)", outlist)
|
||||
}
|
||||
if text != "" {
|
||||
text += "\n"
|
||||
}
|
||||
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
|
||||
|
||||
// Check if err return available
|
||||
errvar := ""
|
||||
for _, param := range out {
|
||||
p := parseParam(param)
|
||||
if p.Type == "error" {
|
||||
errvar = p.Name
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare arguments to Syscall.
|
||||
var args []string
|
||||
n := 0
|
||||
for _, param := range in {
|
||||
p := parseParam(param)
|
||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
||||
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
|
||||
} else if p.Type == "string" && errvar != "" {
|
||||
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
||||
text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
|
||||
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
||||
n++
|
||||
} else if p.Type == "string" {
|
||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
||||
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
||||
text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
|
||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
||||
n++
|
||||
} else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
|
||||
// Convert slice into pointer, length.
|
||||
// Have to be careful not to take address of &a[0] if len == 0:
|
||||
// pass nil in that case.
|
||||
text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
|
||||
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
|
||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
|
||||
n++
|
||||
} else if p.Type == "int64" && endianness != "" {
|
||||
if endianness == "big-endian" {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
||||
}
|
||||
} else if p.Type == "bool" {
|
||||
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
|
||||
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
|
||||
args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
|
||||
n++
|
||||
} else {
|
||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
||||
}
|
||||
}
|
||||
nargs := len(args)
|
||||
|
||||
// Determine which form to use; pad args with zeros.
|
||||
asm := "sysvicall6"
|
||||
if nonblock != nil {
|
||||
asm = "rawSysvicall6"
|
||||
}
|
||||
if len(args) <= 6 {
|
||||
for len(args) < 6 {
|
||||
args = append(args, "0")
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Actual call.
|
||||
arglist := strings.Join(args, ", ")
|
||||
call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
|
||||
|
||||
// Assign return values.
|
||||
body := ""
|
||||
ret := []string{"_", "_", "_"}
|
||||
doErrno := false
|
||||
for i := 0; i < len(out); i++ {
|
||||
p := parseParam(out[i])
|
||||
reg := ""
|
||||
if p.Name == "err" {
|
||||
reg = "e1"
|
||||
ret[2] = reg
|
||||
doErrno = true
|
||||
} else {
|
||||
reg = fmt.Sprintf("r%d", i)
|
||||
ret[i] = reg
|
||||
}
|
||||
if p.Type == "bool" {
|
||||
reg = fmt.Sprintf("%d != 0", reg)
|
||||
}
|
||||
if p.Type == "int64" && endianness != "" {
|
||||
// 64-bit number in r1:r0 or r0:r1.
|
||||
if i+2 > len(out) {
|
||||
fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
|
||||
os.Exit(1)
|
||||
}
|
||||
if endianness == "big-endian" {
|
||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
|
||||
} else {
|
||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
|
||||
}
|
||||
ret[i] = fmt.Sprintf("r%d", i)
|
||||
ret[i+1] = fmt.Sprintf("r%d", i+1)
|
||||
}
|
||||
if reg != "e1" {
|
||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
||||
}
|
||||
}
|
||||
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
|
||||
text += fmt.Sprintf("\t%s\n", call)
|
||||
} else {
|
||||
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
|
||||
}
|
||||
text += body
|
||||
|
||||
if doErrno {
|
||||
text += "\tif e1 != 0 {\n"
|
||||
text += "\t\terr = e1\n"
|
||||
text += "\t}\n"
|
||||
}
|
||||
text += "\treturn\n"
|
||||
text += "}\n"
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
imp := ""
|
||||
if pack != "unix" {
|
||||
imp = "import \"golang.org/x/sys/unix\"\n"
|
||||
|
||||
}
|
||||
vardecls := "\t" + strings.Join(vars, ",\n\t")
|
||||
vardecls += " syscallFunc"
|
||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
|
||||
}
|
||||
|
||||
const srcTemplate = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
|
||||
package %s
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
%s
|
||||
%s
|
||||
%s
|
||||
var (
|
||||
%s
|
||||
)
|
||||
|
||||
%s
|
||||
`
|
|
@ -0,0 +1,355 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
|
||||
//
|
||||
// Build a MIB with each entry being an array containing the level, type and
|
||||
// a hash that will contain additional entries if the current entry is a node.
|
||||
// We then walk this MIB and create a flattened sysctl name to OID hash.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
goos, goarch string
|
||||
)
|
||||
|
||||
// cmdLine returns this programs's commandline arguments.
|
||||
func cmdLine() string {
|
||||
return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
|
||||
}
|
||||
|
||||
// buildTags returns build tags.
|
||||
func buildTags() string {
|
||||
return fmt.Sprintf("%s,%s", goarch, goos)
|
||||
}
|
||||
|
||||
// reMatch performs regular expression match and stores the substring slice to value pointed by m.
|
||||
func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
|
||||
*m = re.FindStringSubmatch(str)
|
||||
if *m != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nodeElement struct {
|
||||
n int
|
||||
t string
|
||||
pE *map[string]nodeElement
|
||||
}
|
||||
|
||||
var (
|
||||
debugEnabled bool
|
||||
mib map[string]nodeElement
|
||||
node *map[string]nodeElement
|
||||
nodeMap map[string]string
|
||||
sysCtl []string
|
||||
)
|
||||
|
||||
var (
|
||||
ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
|
||||
ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
|
||||
ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
|
||||
netInetRE = regexp.MustCompile(`^netinet/`)
|
||||
netInet6RE = regexp.MustCompile(`^netinet6/`)
|
||||
netRE = regexp.MustCompile(`^net/`)
|
||||
bracesRE = regexp.MustCompile(`{.*}`)
|
||||
ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
|
||||
fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
|
||||
)
|
||||
|
||||
func debug(s string) {
|
||||
if debugEnabled {
|
||||
fmt.Fprintln(os.Stderr, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the MIB and build a sysctl name to OID mapping.
|
||||
func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
|
||||
lNode := pNode // local copy of pointer to node
|
||||
var keys []string
|
||||
for k := range *lNode {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
nodename := name
|
||||
if name != "" {
|
||||
nodename += "."
|
||||
}
|
||||
nodename += key
|
||||
|
||||
nodeoid := append(oid, (*pNode)[key].n)
|
||||
|
||||
if (*pNode)[key].t == `CTLTYPE_NODE` {
|
||||
if _, ok := nodeMap[nodename]; ok {
|
||||
lNode = &mib
|
||||
ctlName := nodeMap[nodename]
|
||||
for _, part := range strings.Split(ctlName, ".") {
|
||||
lNode = ((*lNode)[part]).pE
|
||||
}
|
||||
} else {
|
||||
lNode = (*pNode)[key].pE
|
||||
}
|
||||
buildSysctl(lNode, nodename, nodeoid)
|
||||
} else if (*pNode)[key].t != "" {
|
||||
oidStr := []string{}
|
||||
for j := range nodeoid {
|
||||
oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
|
||||
}
|
||||
text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
|
||||
sysCtl = append(sysCtl, text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get the OS (using GOOS_TARGET if it exist)
|
||||
goos = os.Getenv("GOOS_TARGET")
|
||||
if goos == "" {
|
||||
goos = os.Getenv("GOOS")
|
||||
}
|
||||
// Get the architecture (using GOARCH_TARGET if it exists)
|
||||
goarch = os.Getenv("GOARCH_TARGET")
|
||||
if goarch == "" {
|
||||
goarch = os.Getenv("GOARCH")
|
||||
}
|
||||
// Check if GOOS and GOARCH environment variables are defined
|
||||
if goarch == "" || goos == "" {
|
||||
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
mib = make(map[string]nodeElement)
|
||||
headers := [...]string{
|
||||
`sys/sysctl.h`,
|
||||
`sys/socket.h`,
|
||||
`sys/tty.h`,
|
||||
`sys/malloc.h`,
|
||||
`sys/mount.h`,
|
||||
`sys/namei.h`,
|
||||
`sys/sem.h`,
|
||||
`sys/shm.h`,
|
||||
`sys/vmmeter.h`,
|
||||
`uvm/uvmexp.h`,
|
||||
`uvm/uvm_param.h`,
|
||||
`uvm/uvm_swap_encrypt.h`,
|
||||
`ddb/db_var.h`,
|
||||
`net/if.h`,
|
||||
`net/if_pfsync.h`,
|
||||
`net/pipex.h`,
|
||||
`netinet/in.h`,
|
||||
`netinet/icmp_var.h`,
|
||||
`netinet/igmp_var.h`,
|
||||
`netinet/ip_ah.h`,
|
||||
`netinet/ip_carp.h`,
|
||||
`netinet/ip_divert.h`,
|
||||
`netinet/ip_esp.h`,
|
||||
`netinet/ip_ether.h`,
|
||||
`netinet/ip_gre.h`,
|
||||
`netinet/ip_ipcomp.h`,
|
||||
`netinet/ip_ipip.h`,
|
||||
`netinet/pim_var.h`,
|
||||
`netinet/tcp_var.h`,
|
||||
`netinet/udp_var.h`,
|
||||
`netinet6/in6.h`,
|
||||
`netinet6/ip6_divert.h`,
|
||||
`netinet6/pim6_var.h`,
|
||||
`netinet/icmp6.h`,
|
||||
`netmpls/mpls.h`,
|
||||
}
|
||||
|
||||
ctls := [...]string{
|
||||
`kern`,
|
||||
`vm`,
|
||||
`fs`,
|
||||
`net`,
|
||||
//debug /* Special handling required */
|
||||
`hw`,
|
||||
//machdep /* Arch specific */
|
||||
`user`,
|
||||
`ddb`,
|
||||
//vfs /* Special handling required */
|
||||
`fs.posix`,
|
||||
`kern.forkstat`,
|
||||
`kern.intrcnt`,
|
||||
`kern.malloc`,
|
||||
`kern.nchstats`,
|
||||
`kern.seminfo`,
|
||||
`kern.shminfo`,
|
||||
`kern.timecounter`,
|
||||
`kern.tty`,
|
||||
`kern.watchdog`,
|
||||
`net.bpf`,
|
||||
`net.ifq`,
|
||||
`net.inet`,
|
||||
`net.inet.ah`,
|
||||
`net.inet.carp`,
|
||||
`net.inet.divert`,
|
||||
`net.inet.esp`,
|
||||
`net.inet.etherip`,
|
||||
`net.inet.gre`,
|
||||
`net.inet.icmp`,
|
||||
`net.inet.igmp`,
|
||||
`net.inet.ip`,
|
||||
`net.inet.ip.ifq`,
|
||||
`net.inet.ipcomp`,
|
||||
`net.inet.ipip`,
|
||||
`net.inet.mobileip`,
|
||||
`net.inet.pfsync`,
|
||||
`net.inet.pim`,
|
||||
`net.inet.tcp`,
|
||||
`net.inet.udp`,
|
||||
`net.inet6`,
|
||||
`net.inet6.divert`,
|
||||
`net.inet6.ip6`,
|
||||
`net.inet6.icmp6`,
|
||||
`net.inet6.pim6`,
|
||||
`net.inet6.tcp6`,
|
||||
`net.inet6.udp6`,
|
||||
`net.mpls`,
|
||||
`net.mpls.ifq`,
|
||||
`net.key`,
|
||||
`net.pflow`,
|
||||
`net.pfsync`,
|
||||
`net.pipex`,
|
||||
`net.rt`,
|
||||
`vm.swapencrypt`,
|
||||
//vfsgenctl /* Special handling required */
|
||||
}
|
||||
|
||||
// Node name "fixups"
|
||||
ctlMap := map[string]string{
|
||||
"ipproto": "net.inet",
|
||||
"net.inet.ipproto": "net.inet",
|
||||
"net.inet6.ipv6proto": "net.inet6",
|
||||
"net.inet6.ipv6": "net.inet6.ip6",
|
||||
"net.inet.icmpv6": "net.inet6.icmp6",
|
||||
"net.inet6.divert6": "net.inet6.divert",
|
||||
"net.inet6.tcp6": "net.inet.tcp",
|
||||
"net.inet6.udp6": "net.inet.udp",
|
||||
"mpls": "net.mpls",
|
||||
"swpenc": "vm.swapencrypt",
|
||||
}
|
||||
|
||||
// Node mappings
|
||||
nodeMap = map[string]string{
|
||||
"net.inet.ip.ifq": "net.ifq",
|
||||
"net.inet.pfsync": "net.pfsync",
|
||||
"net.mpls.ifq": "net.ifq",
|
||||
}
|
||||
|
||||
mCtls := make(map[string]bool)
|
||||
for _, ctl := range ctls {
|
||||
mCtls[ctl] = true
|
||||
}
|
||||
|
||||
for _, header := range headers {
|
||||
debug("Processing " + header)
|
||||
file, err := os.Open(filepath.Join("/usr/include", header))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
var sub []string
|
||||
if reMatch(ctlNames1RE, s.Text(), &sub) ||
|
||||
reMatch(ctlNames2RE, s.Text(), &sub) ||
|
||||
reMatch(ctlNames3RE, s.Text(), &sub) {
|
||||
if sub[1] == `CTL_NAMES` {
|
||||
// Top level.
|
||||
node = &mib
|
||||
} else {
|
||||
// Node.
|
||||
nodename := strings.ToLower(sub[2])
|
||||
ctlName := ""
|
||||
if reMatch(netInetRE, header, &sub) {
|
||||
ctlName = "net.inet." + nodename
|
||||
} else if reMatch(netInet6RE, header, &sub) {
|
||||
ctlName = "net.inet6." + nodename
|
||||
} else if reMatch(netRE, header, &sub) {
|
||||
ctlName = "net." + nodename
|
||||
} else {
|
||||
ctlName = nodename
|
||||
ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
|
||||
}
|
||||
|
||||
if val, ok := ctlMap[ctlName]; ok {
|
||||
ctlName = val
|
||||
}
|
||||
if _, ok := mCtls[ctlName]; !ok {
|
||||
debug("Ignoring " + ctlName + "...")
|
||||
continue
|
||||
}
|
||||
|
||||
// Walk down from the top of the MIB.
|
||||
node = &mib
|
||||
for _, part := range strings.Split(ctlName, ".") {
|
||||
if _, ok := (*node)[part]; !ok {
|
||||
debug("Missing node " + part)
|
||||
(*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
|
||||
}
|
||||
node = (*node)[part].pE
|
||||
}
|
||||
}
|
||||
|
||||
// Populate current node with entries.
|
||||
i := -1
|
||||
for !strings.HasPrefix(s.Text(), "}") {
|
||||
s.Scan()
|
||||
if reMatch(bracesRE, s.Text(), &sub) {
|
||||
i++
|
||||
}
|
||||
if !reMatch(ctlTypeRE, s.Text(), &sub) {
|
||||
continue
|
||||
}
|
||||
(*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
|
||||
}
|
||||
}
|
||||
}
|
||||
err = s.Err()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
buildSysctl(&mib, "", []int{})
|
||||
|
||||
sort.Strings(sysCtl)
|
||||
text := strings.Join(sysCtl, "")
|
||||
|
||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
|
||||
}
|
||||
|
||||
const srcTemplate = `// %s
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
|
||||
package unix
|
||||
|
||||
type mibentry struct {
|
||||
ctlname string
|
||||
ctloid []_C_int
|
||||
}
|
||||
|
||||
var sysctlMib = []mibentry {
|
||||
%s
|
||||
}
|
||||
`
|
|
@ -0,0 +1,190 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Generate system call table for DragonFly, NetBSD,
|
||||
// FreeBSD, OpenBSD or Darwin from master list
|
||||
// (for example, /usr/src/sys/kern/syscalls.master or
|
||||
// sys/syscall.h).
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
goos, goarch string
|
||||
)
|
||||
|
||||
// cmdLine returns this programs's commandline arguments
|
||||
func cmdLine() string {
|
||||
return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
|
||||
}
|
||||
|
||||
// buildTags returns build tags
|
||||
func buildTags() string {
|
||||
return fmt.Sprintf("%s,%s", goarch, goos)
|
||||
}
|
||||
|
||||
func checkErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// source string and substring slice for regexp
|
||||
type re struct {
|
||||
str string // source string
|
||||
sub []string // matched sub-string
|
||||
}
|
||||
|
||||
// Match performs regular expression match
|
||||
func (r *re) Match(exp string) bool {
|
||||
r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
|
||||
if r.sub != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fetchFile fetches a text file from URL
|
||||
func fetchFile(URL string) io.Reader {
|
||||
resp, err := http.Get(URL)
|
||||
checkErr(err)
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
checkErr(err)
|
||||
return strings.NewReader(string(body))
|
||||
}
|
||||
|
||||
// readFile reads a text file from path
|
||||
func readFile(path string) io.Reader {
|
||||
file, err := os.Open(os.Args[1])
|
||||
checkErr(err)
|
||||
return file
|
||||
}
|
||||
|
||||
func format(name, num, proto string) string {
|
||||
name = strings.ToUpper(name)
|
||||
// There are multiple entries for enosys and nosys, so comment them out.
|
||||
nm := re{str: name}
|
||||
if nm.Match(`^SYS_E?NOSYS$`) {
|
||||
name = fmt.Sprintf("// %s", name)
|
||||
}
|
||||
if name == `SYS_SYS_EXIT` {
|
||||
name = `SYS_EXIT`
|
||||
}
|
||||
return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get the OS (using GOOS_TARGET if it exist)
|
||||
goos = os.Getenv("GOOS_TARGET")
|
||||
if goos == "" {
|
||||
goos = os.Getenv("GOOS")
|
||||
}
|
||||
// Get the architecture (using GOARCH_TARGET if it exists)
|
||||
goarch = os.Getenv("GOARCH_TARGET")
|
||||
if goarch == "" {
|
||||
goarch = os.Getenv("GOARCH")
|
||||
}
|
||||
// Check if GOOS and GOARCH environment variables are defined
|
||||
if goarch == "" || goos == "" {
|
||||
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
file := strings.TrimSpace(os.Args[1])
|
||||
var syscalls io.Reader
|
||||
if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
|
||||
// Download syscalls.master file
|
||||
syscalls = fetchFile(file)
|
||||
} else {
|
||||
syscalls = readFile(file)
|
||||
}
|
||||
|
||||
var text, line string
|
||||
s := bufio.NewScanner(syscalls)
|
||||
for s.Scan() {
|
||||
t := re{str: line}
|
||||
if t.Match(`^(.*)\\$`) {
|
||||
// Handle continuation
|
||||
line = t.sub[1]
|
||||
line += strings.TrimLeft(s.Text(), " \t")
|
||||
} else {
|
||||
// New line
|
||||
line = s.Text()
|
||||
}
|
||||
t = re{str: line}
|
||||
if t.Match(`\\$`) {
|
||||
continue
|
||||
}
|
||||
t = re{str: line}
|
||||
|
||||
switch goos {
|
||||
case "dragonfly":
|
||||
if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
|
||||
num, proto := t.sub[1], t.sub[2]
|
||||
name := fmt.Sprintf("SYS_%s", t.sub[3])
|
||||
text += format(name, num, proto)
|
||||
}
|
||||
case "freebsd":
|
||||
if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
|
||||
num, proto := t.sub[1], t.sub[2]
|
||||
name := fmt.Sprintf("SYS_%s", t.sub[3])
|
||||
text += format(name, num, proto)
|
||||
}
|
||||
case "openbsd":
|
||||
if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
|
||||
num, proto, name := t.sub[1], t.sub[3], t.sub[4]
|
||||
text += format(name, num, proto)
|
||||
}
|
||||
case "netbsd":
|
||||
if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
|
||||
num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
|
||||
name := t.sub[7] + "_" + t.sub[9]
|
||||
if t.sub[11] != "" {
|
||||
name = t.sub[7] + "_" + t.sub[11]
|
||||
}
|
||||
name = strings.ToUpper(name)
|
||||
if compat == "" || compat == "13" || compat == "30" || compat == "50" {
|
||||
text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
|
||||
}
|
||||
}
|
||||
case "darwin":
|
||||
if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
|
||||
name, num := t.sub[1], t.sub[2]
|
||||
name = strings.ToUpper(name)
|
||||
text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
|
||||
}
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
}
|
||||
err := s.Err()
|
||||
checkErr(err)
|
||||
|
||||
fmt.Printf(template, cmdLine(), buildTags(), text)
|
||||
}
|
||||
|
||||
const template = `// %s
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build %s
|
||||
|
||||
package unix
|
||||
|
||||
const(
|
||||
%s)`
|
|
@ -0,0 +1,237 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
// +build aix
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See also mkerrors.sh and mkall.sh
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/limits.h>
|
||||
#include <sys/un.h>
|
||||
#include <utime.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/poll.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statfs.h>
|
||||
#include <sys/termio.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include <termios.h>
|
||||
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
|
||||
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
PathMax = C.PATH_MAX
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
type off64 C.off64_t
|
||||
type off C.off_t
|
||||
type Mode_t C.mode_t
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
type Timeval32 C.struct_timeval32
|
||||
|
||||
type Timex C.struct_timex
|
||||
|
||||
type Time_t C.time_t
|
||||
|
||||
type Tms C.struct_tms
|
||||
|
||||
type Utimbuf C.struct_utimbuf
|
||||
|
||||
type Timezone C.struct_timezone
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit64
|
||||
|
||||
type Pid_t C.pid_t
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
type dev_t C.dev_t
|
||||
|
||||
// Files
|
||||
|
||||
type Stat_t C.struct_stat
|
||||
|
||||
type StatxTimestamp C.struct_statx_timestamp
|
||||
|
||||
type Statx_t C.struct_statx
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
)
|
||||
|
||||
type IfMsgHdr C.struct_if_msghdr
|
||||
|
||||
// Misc
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
type Utsname C.struct_utsname
|
||||
|
||||
type Ustat_t C.struct_ustat
|
||||
|
||||
type Sigset_t C.sigset_t
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
)
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Termio C.struct_termio
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
//poll
|
||||
|
||||
type PollFd struct {
|
||||
Fd int32
|
||||
Events uint16
|
||||
Revents uint16
|
||||
}
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
||||
|
||||
//flock_t
|
||||
|
||||
type Flock_t C.struct_flock64
|
||||
|
||||
// Statfs
|
||||
|
||||
type Fsid_t C.struct_fsid_t
|
||||
type Fsid64_t C.struct_fsid64_t
|
||||
|
||||
type Statfs_t C.struct_statfs
|
||||
|
||||
const RNDGETENTCNT = 0x80045200
|
|
@ -0,0 +1,283 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See README.md
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#define __DARWIN_UNIX03 0
|
||||
#define KERNEL
|
||||
#define _DARWIN_USE_64_BIT_INODE
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
#include <mach/mach.h>
|
||||
#include <mach/message.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/if_var.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
#include <netinet/tcp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
type Timeval32 C.struct_timeval32
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
// Files
|
||||
|
||||
type Stat_t C.struct_stat64
|
||||
|
||||
type Statfs_t C.struct_statfs64
|
||||
|
||||
type Flock_t C.struct_flock
|
||||
|
||||
type Fstore_t C.struct_fstore
|
||||
|
||||
type Radvisory_t C.struct_radvisory
|
||||
|
||||
type Fbootstraptransfer_t C.struct_fbootstraptransfer
|
||||
|
||||
type Log2phys_t C.struct_log2phys
|
||||
|
||||
type Fsid C.struct_fsid
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type Inet4Pktinfo C.struct_in_pktinfo
|
||||
|
||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
|
||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Ptrace requests
|
||||
|
||||
const (
|
||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
||||
PTRACE_CONT = C.PT_CONTINUE
|
||||
PTRACE_KILL = C.PT_KILL
|
||||
)
|
||||
|
||||
// Events (kqueue, kevent)
|
||||
|
||||
type Kevent_t C.struct_kevent
|
||||
|
||||
// Select
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
SizeofIfData = C.sizeof_struct_if_data
|
||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
||||
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
|
||||
SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
|
||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
||||
)
|
||||
|
||||
type IfMsghdr C.struct_if_msghdr
|
||||
|
||||
type IfData C.struct_if_data
|
||||
|
||||
type IfaMsghdr C.struct_ifa_msghdr
|
||||
|
||||
type IfmaMsghdr C.struct_ifma_msghdr
|
||||
|
||||
type IfmaMsghdr2 C.struct_ifma_msghdr2
|
||||
|
||||
type RtMsghdr C.struct_rt_msghdr
|
||||
|
||||
type RtMetrics C.struct_rt_metrics
|
||||
|
||||
// Berkeley packet filter
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
||||
)
|
||||
|
||||
type BpfVersion C.struct_bpf_version
|
||||
|
||||
type BpfStat C.struct_bpf_stat
|
||||
|
||||
type BpfProgram C.struct_bpf_program
|
||||
|
||||
type BpfInsn C.struct_bpf_insn
|
||||
|
||||
type BpfHdr C.struct_bpf_hdr
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
// fchmodat-like syscalls.
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
)
|
||||
|
||||
// poll
|
||||
|
||||
type PollFd C.struct_pollfd
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
||||
|
||||
// uname
|
||||
|
||||
type Utsname C.struct_utsname
|
||||
|
||||
// Clockinfo
|
||||
|
||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
||||
|
||||
type Clockinfo C.struct_clockinfo
|
|
@ -0,0 +1,263 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See README.md
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#define KERNEL
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
#include <netinet/tcp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
// Files
|
||||
|
||||
type Stat_t C.struct_stat
|
||||
|
||||
type Statfs_t C.struct_statfs
|
||||
|
||||
type Flock_t C.struct_flock
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
type Fsid C.struct_fsid
|
||||
|
||||
// File system limits
|
||||
|
||||
const (
|
||||
PathMax = C.PATH_MAX
|
||||
)
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Ptrace requests
|
||||
|
||||
const (
|
||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
||||
PTRACE_CONT = C.PT_CONTINUE
|
||||
PTRACE_KILL = C.PT_KILL
|
||||
)
|
||||
|
||||
// Events (kqueue, kevent)
|
||||
|
||||
type Kevent_t C.struct_kevent
|
||||
|
||||
// Select
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
SizeofIfData = C.sizeof_struct_if_data
|
||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
||||
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
|
||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
||||
)
|
||||
|
||||
type IfMsghdr C.struct_if_msghdr
|
||||
|
||||
type IfData C.struct_if_data
|
||||
|
||||
type IfaMsghdr C.struct_ifa_msghdr
|
||||
|
||||
type IfmaMsghdr C.struct_ifma_msghdr
|
||||
|
||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
||||
|
||||
type RtMsghdr C.struct_rt_msghdr
|
||||
|
||||
type RtMetrics C.struct_rt_metrics
|
||||
|
||||
// Berkeley packet filter
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
||||
)
|
||||
|
||||
type BpfVersion C.struct_bpf_version
|
||||
|
||||
type BpfStat C.struct_bpf_stat
|
||||
|
||||
type BpfProgram C.struct_bpf_program
|
||||
|
||||
type BpfInsn C.struct_bpf_insn
|
||||
|
||||
type BpfHdr C.struct_bpf_hdr
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
// fchmodat-like syscalls.
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
)
|
||||
|
||||
// poll
|
||||
|
||||
type PollFd C.struct_pollfd
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
||||
|
||||
// Uname
|
||||
|
||||
type Utsname C.struct_utsname
|
|
@ -0,0 +1,400 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See README.md
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#define _WANT_FREEBSD11_STAT 1
|
||||
#define _WANT_FREEBSD11_STATFS 1
|
||||
#define _WANT_FREEBSD11_DIRENT 1
|
||||
#define _WANT_FREEBSD11_KEVENT 1
|
||||
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/capsicum.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
#include <netinet/tcp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
|
||||
// See /usr/include/net/if.h.
|
||||
struct if_data8 {
|
||||
u_char ifi_type;
|
||||
u_char ifi_physical;
|
||||
u_char ifi_addrlen;
|
||||
u_char ifi_hdrlen;
|
||||
u_char ifi_link_state;
|
||||
u_char ifi_spare_char1;
|
||||
u_char ifi_spare_char2;
|
||||
u_char ifi_datalen;
|
||||
u_long ifi_mtu;
|
||||
u_long ifi_metric;
|
||||
u_long ifi_baudrate;
|
||||
u_long ifi_ipackets;
|
||||
u_long ifi_ierrors;
|
||||
u_long ifi_opackets;
|
||||
u_long ifi_oerrors;
|
||||
u_long ifi_collisions;
|
||||
u_long ifi_ibytes;
|
||||
u_long ifi_obytes;
|
||||
u_long ifi_imcasts;
|
||||
u_long ifi_omcasts;
|
||||
u_long ifi_iqdrops;
|
||||
u_long ifi_noproto;
|
||||
u_long ifi_hwassist;
|
||||
// FIXME: these are now unions, so maybe need to change definitions?
|
||||
#undef ifi_epoch
|
||||
time_t ifi_epoch;
|
||||
#undef ifi_lastchange
|
||||
struct timeval ifi_lastchange;
|
||||
};
|
||||
|
||||
// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
|
||||
// See /usr/include/net/if.h.
|
||||
struct if_msghdr8 {
|
||||
u_short ifm_msglen;
|
||||
u_char ifm_version;
|
||||
u_char ifm_type;
|
||||
int ifm_addrs;
|
||||
int ifm_flags;
|
||||
u_short ifm_index;
|
||||
struct if_data8 ifm_data;
|
||||
};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
// Files
|
||||
|
||||
const (
|
||||
_statfsVersion = C.STATFS_VERSION
|
||||
_dirblksiz = C.DIRBLKSIZ
|
||||
)
|
||||
|
||||
type Stat_t C.struct_stat
|
||||
|
||||
type stat_freebsd11_t C.struct_freebsd11_stat
|
||||
|
||||
type Statfs_t C.struct_statfs
|
||||
|
||||
type statfs_freebsd11_t C.struct_freebsd11_statfs
|
||||
|
||||
type Flock_t C.struct_flock
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
type dirent_freebsd11 C.struct_freebsd11_dirent
|
||||
|
||||
type Fsid C.struct_fsid
|
||||
|
||||
// File system limits
|
||||
|
||||
const (
|
||||
PathMax = C.PATH_MAX
|
||||
)
|
||||
|
||||
// Advice to Fadvise
|
||||
|
||||
const (
|
||||
FADV_NORMAL = C.POSIX_FADV_NORMAL
|
||||
FADV_RANDOM = C.POSIX_FADV_RANDOM
|
||||
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
|
||||
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
|
||||
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
|
||||
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
|
||||
)
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPMreqn C.struct_ip_mreqn
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPMreqn = C.sizeof_struct_ip_mreqn
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Ptrace requests
|
||||
|
||||
const (
|
||||
PTRACE_ATTACH = C.PT_ATTACH
|
||||
PTRACE_CONT = C.PT_CONTINUE
|
||||
PTRACE_DETACH = C.PT_DETACH
|
||||
PTRACE_GETFPREGS = C.PT_GETFPREGS
|
||||
PTRACE_GETFSBASE = C.PT_GETFSBASE
|
||||
PTRACE_GETLWPLIST = C.PT_GETLWPLIST
|
||||
PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
|
||||
PTRACE_GETREGS = C.PT_GETREGS
|
||||
PTRACE_GETXSTATE = C.PT_GETXSTATE
|
||||
PTRACE_IO = C.PT_IO
|
||||
PTRACE_KILL = C.PT_KILL
|
||||
PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
|
||||
PTRACE_LWPINFO = C.PT_LWPINFO
|
||||
PTRACE_SETFPREGS = C.PT_SETFPREGS
|
||||
PTRACE_SETREGS = C.PT_SETREGS
|
||||
PTRACE_SINGLESTEP = C.PT_STEP
|
||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
||||
)
|
||||
|
||||
const (
|
||||
PIOD_READ_D = C.PIOD_READ_D
|
||||
PIOD_WRITE_D = C.PIOD_WRITE_D
|
||||
PIOD_READ_I = C.PIOD_READ_I
|
||||
PIOD_WRITE_I = C.PIOD_WRITE_I
|
||||
)
|
||||
|
||||
const (
|
||||
PL_FLAG_BORN = C.PL_FLAG_BORN
|
||||
PL_FLAG_EXITED = C.PL_FLAG_EXITED
|
||||
PL_FLAG_SI = C.PL_FLAG_SI
|
||||
)
|
||||
|
||||
const (
|
||||
TRAP_BRKPT = C.TRAP_BRKPT
|
||||
TRAP_TRACE = C.TRAP_TRACE
|
||||
)
|
||||
|
||||
type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
|
||||
|
||||
type __Siginfo C.struct___siginfo
|
||||
|
||||
type Sigset_t C.sigset_t
|
||||
|
||||
type Reg C.struct_reg
|
||||
|
||||
type FpReg C.struct_fpreg
|
||||
|
||||
type PtraceIoDesc C.struct_ptrace_io_desc
|
||||
|
||||
// Events (kqueue, kevent)
|
||||
|
||||
type Kevent_t C.struct_kevent_freebsd11
|
||||
|
||||
// Select
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
sizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
|
||||
sizeofIfData = C.sizeof_struct_if_data
|
||||
SizeofIfData = C.sizeof_struct_if_data8
|
||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
||||
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
|
||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
||||
)
|
||||
|
||||
type ifMsghdr C.struct_if_msghdr
|
||||
|
||||
type IfMsghdr C.struct_if_msghdr8
|
||||
|
||||
type ifData C.struct_if_data
|
||||
|
||||
type IfData C.struct_if_data8
|
||||
|
||||
type IfaMsghdr C.struct_ifa_msghdr
|
||||
|
||||
type IfmaMsghdr C.struct_ifma_msghdr
|
||||
|
||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
||||
|
||||
type RtMsghdr C.struct_rt_msghdr
|
||||
|
||||
type RtMetrics C.struct_rt_metrics
|
||||
|
||||
// Berkeley packet filter
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
||||
SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
|
||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
||||
SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
|
||||
)
|
||||
|
||||
type BpfVersion C.struct_bpf_version
|
||||
|
||||
type BpfStat C.struct_bpf_stat
|
||||
|
||||
type BpfZbuf C.struct_bpf_zbuf
|
||||
|
||||
type BpfProgram C.struct_bpf_program
|
||||
|
||||
type BpfInsn C.struct_bpf_insn
|
||||
|
||||
type BpfHdr C.struct_bpf_hdr
|
||||
|
||||
type BpfZbufHeader C.struct_bpf_zbuf_header
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
// fchmodat-like syscalls.
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
)
|
||||
|
||||
// poll
|
||||
|
||||
type PollFd C.struct_pollfd
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLINIGNEOF = C.POLLINIGNEOF
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
||||
|
||||
// Capabilities
|
||||
|
||||
type CapRights C.struct_cap_rights
|
||||
|
||||
// Uname
|
||||
|
||||
type Utsname C.struct_utsname
|
|
@ -0,0 +1,290 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See README.md
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#define KERNEL
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
#include <netinet/tcp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
// Files
|
||||
|
||||
type Stat_t C.struct_stat
|
||||
|
||||
type Statfs_t C.struct_statfs
|
||||
|
||||
type Flock_t C.struct_flock
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
type Fsid C.fsid_t
|
||||
|
||||
// File system limits
|
||||
|
||||
const (
|
||||
PathMax = C.PATH_MAX
|
||||
)
|
||||
|
||||
// Advice to Fadvise
|
||||
|
||||
const (
|
||||
FADV_NORMAL = C.POSIX_FADV_NORMAL
|
||||
FADV_RANDOM = C.POSIX_FADV_RANDOM
|
||||
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
|
||||
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
|
||||
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
|
||||
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
|
||||
)
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Ptrace requests
|
||||
|
||||
const (
|
||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
||||
PTRACE_CONT = C.PT_CONTINUE
|
||||
PTRACE_KILL = C.PT_KILL
|
||||
)
|
||||
|
||||
// Events (kqueue, kevent)
|
||||
|
||||
type Kevent_t C.struct_kevent
|
||||
|
||||
// Select
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
SizeofIfData = C.sizeof_struct_if_data
|
||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
||||
)
|
||||
|
||||
type IfMsghdr C.struct_if_msghdr
|
||||
|
||||
type IfData C.struct_if_data
|
||||
|
||||
type IfaMsghdr C.struct_ifa_msghdr
|
||||
|
||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
||||
|
||||
type RtMsghdr C.struct_rt_msghdr
|
||||
|
||||
type RtMetrics C.struct_rt_metrics
|
||||
|
||||
type Mclpool C.struct_mclpool
|
||||
|
||||
// Berkeley packet filter
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
||||
)
|
||||
|
||||
type BpfVersion C.struct_bpf_version
|
||||
|
||||
type BpfStat C.struct_bpf_stat
|
||||
|
||||
type BpfProgram C.struct_bpf_program
|
||||
|
||||
type BpfInsn C.struct_bpf_insn
|
||||
|
||||
type BpfHdr C.struct_bpf_hdr
|
||||
|
||||
type BpfTimeval C.struct_bpf_timeval
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
type Ptmget C.struct_ptmget
|
||||
|
||||
// fchmodat-like syscalls.
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
)
|
||||
|
||||
// poll
|
||||
|
||||
type PollFd C.struct_pollfd
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
||||
|
||||
// Sysctl
|
||||
|
||||
type Sysctlnode C.struct_sysctlnode
|
||||
|
||||
// Uname
|
||||
|
||||
type Utsname C.struct_utsname
|
||||
|
||||
// Clockinfo
|
||||
|
||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
||||
|
||||
type Clockinfo C.struct_clockinfo
|
|
@ -0,0 +1,283 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See README.md
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#define KERNEL
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/wait.h>
|
||||
#include <uvm/uvmexp.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
#include <netinet/tcp.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
// Files
|
||||
|
||||
type Stat_t C.struct_stat
|
||||
|
||||
type Statfs_t C.struct_statfs
|
||||
|
||||
type Flock_t C.struct_flock
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
type Fsid C.fsid_t
|
||||
|
||||
// File system limits
|
||||
|
||||
const (
|
||||
PathMax = C.PATH_MAX
|
||||
)
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Ptrace requests
|
||||
|
||||
const (
|
||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
||||
PTRACE_CONT = C.PT_CONTINUE
|
||||
PTRACE_KILL = C.PT_KILL
|
||||
)
|
||||
|
||||
// Events (kqueue, kevent)
|
||||
|
||||
type Kevent_t C.struct_kevent
|
||||
|
||||
// Select
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
SizeofIfData = C.sizeof_struct_if_data
|
||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
||||
)
|
||||
|
||||
type IfMsghdr C.struct_if_msghdr
|
||||
|
||||
type IfData C.struct_if_data
|
||||
|
||||
type IfaMsghdr C.struct_ifa_msghdr
|
||||
|
||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
||||
|
||||
type RtMsghdr C.struct_rt_msghdr
|
||||
|
||||
type RtMetrics C.struct_rt_metrics
|
||||
|
||||
type Mclpool C.struct_mclpool
|
||||
|
||||
// Berkeley packet filter
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
||||
)
|
||||
|
||||
type BpfVersion C.struct_bpf_version
|
||||
|
||||
type BpfStat C.struct_bpf_stat
|
||||
|
||||
type BpfProgram C.struct_bpf_program
|
||||
|
||||
type BpfInsn C.struct_bpf_insn
|
||||
|
||||
type BpfHdr C.struct_bpf_hdr
|
||||
|
||||
type BpfTimeval C.struct_bpf_timeval
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
// fchmodat-like syscalls.
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
)
|
||||
|
||||
// poll
|
||||
|
||||
type PollFd C.struct_pollfd
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
||||
|
||||
// Signal Sets
|
||||
|
||||
type Sigset_t C.sigset_t
|
||||
|
||||
// Uname
|
||||
|
||||
type Utsname C.struct_utsname
|
||||
|
||||
// Uvmexp
|
||||
|
||||
const SizeofUvmexp = C.sizeof_struct_uvmexp
|
||||
|
||||
type Uvmexp C.struct_uvmexp
|
||||
|
||||
// Clockinfo
|
||||
|
||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
||||
|
||||
type Clockinfo C.struct_clockinfo
|
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
Input to cgo -godefs. See README.md
|
||||
*/
|
||||
|
||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||
|
||||
package unix
|
||||
|
||||
/*
|
||||
#define KERNEL
|
||||
// These defines ensure that builds done on newer versions of Solaris are
|
||||
// backwards-compatible with older versions of Solaris and
|
||||
// OpenSolaris-based derivatives.
|
||||
#define __USE_SUNOS_SOCKETS__ // msghdr
|
||||
#define __USE_LEGACY_PROTOTYPES__ // iovec
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <netdb.h>
|
||||
#include <limits.h>
|
||||
#include <poll.h>
|
||||
#include <signal.h>
|
||||
#include <termios.h>
|
||||
#include <termio.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/signal.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/times.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/icmp6.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <ustat.h>
|
||||
#include <utime.h>
|
||||
|
||||
enum {
|
||||
sizeofPtr = sizeof(void*),
|
||||
};
|
||||
|
||||
union sockaddr_all {
|
||||
struct sockaddr s1; // this one gets used for fields
|
||||
struct sockaddr_in s2; // these pad it out
|
||||
struct sockaddr_in6 s3;
|
||||
struct sockaddr_un s4;
|
||||
struct sockaddr_dl s5;
|
||||
};
|
||||
|
||||
struct sockaddr_any {
|
||||
struct sockaddr addr;
|
||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
||||
};
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Machine characteristics
|
||||
|
||||
const (
|
||||
SizeofPtr = C.sizeofPtr
|
||||
SizeofShort = C.sizeof_short
|
||||
SizeofInt = C.sizeof_int
|
||||
SizeofLong = C.sizeof_long
|
||||
SizeofLongLong = C.sizeof_longlong
|
||||
PathMax = C.PATH_MAX
|
||||
MaxHostNameLen = C.MAXHOSTNAMELEN
|
||||
)
|
||||
|
||||
// Basic types
|
||||
|
||||
type (
|
||||
_C_short C.short
|
||||
_C_int C.int
|
||||
_C_long C.long
|
||||
_C_long_long C.longlong
|
||||
)
|
||||
|
||||
// Time
|
||||
|
||||
type Timespec C.struct_timespec
|
||||
|
||||
type Timeval C.struct_timeval
|
||||
|
||||
type Timeval32 C.struct_timeval32
|
||||
|
||||
type Tms C.struct_tms
|
||||
|
||||
type Utimbuf C.struct_utimbuf
|
||||
|
||||
// Processes
|
||||
|
||||
type Rusage C.struct_rusage
|
||||
|
||||
type Rlimit C.struct_rlimit
|
||||
|
||||
type _Gid_t C.gid_t
|
||||
|
||||
// Files
|
||||
|
||||
type Stat_t C.struct_stat
|
||||
|
||||
type Flock_t C.struct_flock
|
||||
|
||||
type Dirent C.struct_dirent
|
||||
|
||||
// Filesystems
|
||||
|
||||
type _Fsblkcnt_t C.fsblkcnt_t
|
||||
|
||||
type Statvfs_t C.struct_statvfs
|
||||
|
||||
// Sockets
|
||||
|
||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
||||
|
||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
||||
|
||||
type RawSockaddrUnix C.struct_sockaddr_un
|
||||
|
||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
||||
|
||||
type RawSockaddr C.struct_sockaddr
|
||||
|
||||
type RawSockaddrAny C.struct_sockaddr_any
|
||||
|
||||
type _Socklen C.socklen_t
|
||||
|
||||
type Linger C.struct_linger
|
||||
|
||||
type Iovec C.struct_iovec
|
||||
|
||||
type IPMreq C.struct_ip_mreq
|
||||
|
||||
type IPv6Mreq C.struct_ipv6_mreq
|
||||
|
||||
type Msghdr C.struct_msghdr
|
||||
|
||||
type Cmsghdr C.struct_cmsghdr
|
||||
|
||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
||||
|
||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
||||
|
||||
type ICMPv6Filter C.struct_icmp6_filter
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
||||
SizeofLinger = C.sizeof_struct_linger
|
||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||
)
|
||||
|
||||
// Select
|
||||
|
||||
type FdSet C.fd_set
|
||||
|
||||
// Misc
|
||||
|
||||
type Utsname C.struct_utsname
|
||||
|
||||
type Ustat_t C.struct_ustat
|
||||
|
||||
const (
|
||||
AT_FDCWD = C.AT_FDCWD
|
||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
||||
AT_EACCESS = C.AT_EACCESS
|
||||
)
|
||||
|
||||
// Routing and interface messages
|
||||
|
||||
const (
|
||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
||||
SizeofIfData = C.sizeof_struct_if_data
|
||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
||||
)
|
||||
|
||||
type IfMsghdr C.struct_if_msghdr
|
||||
|
||||
type IfData C.struct_if_data
|
||||
|
||||
type IfaMsghdr C.struct_ifa_msghdr
|
||||
|
||||
type RtMsghdr C.struct_rt_msghdr
|
||||
|
||||
type RtMetrics C.struct_rt_metrics
|
||||
|
||||
// Berkeley packet filter
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
||||
)
|
||||
|
||||
type BpfVersion C.struct_bpf_version
|
||||
|
||||
type BpfStat C.struct_bpf_stat
|
||||
|
||||
type BpfProgram C.struct_bpf_program
|
||||
|
||||
type BpfInsn C.struct_bpf_insn
|
||||
|
||||
type BpfTimeval C.struct_bpf_timeval
|
||||
|
||||
type BpfHdr C.struct_bpf_hdr
|
||||
|
||||
// Terminal handling
|
||||
|
||||
type Termios C.struct_termios
|
||||
|
||||
type Termio C.struct_termio
|
||||
|
||||
type Winsize C.struct_winsize
|
||||
|
||||
// poll
|
||||
|
||||
type PollFd C.struct_pollfd
|
||||
|
||||
const (
|
||||
POLLERR = C.POLLERR
|
||||
POLLHUP = C.POLLHUP
|
||||
POLLIN = C.POLLIN
|
||||
POLLNVAL = C.POLLNVAL
|
||||
POLLOUT = C.POLLOUT
|
||||
POLLPRI = C.POLLPRI
|
||||
POLLRDBAND = C.POLLRDBAND
|
||||
POLLRDNORM = C.POLLRDNORM
|
||||
POLLWRBAND = C.POLLWRBAND
|
||||
POLLWRNORM = C.POLLWRNORM
|
||||
)
|
|
@ -0,0 +1,142 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
type registry struct {
|
||||
XMLName xml.Name `xml:"registry"`
|
||||
Updated string `xml:"updated"`
|
||||
Registry []struct {
|
||||
ID string `xml:"id,attr"`
|
||||
Record []struct {
|
||||
Name string `xml:"name"`
|
||||
Xref []struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Data string `xml:"data,attr"`
|
||||
} `xml:"xref"`
|
||||
Desc struct {
|
||||
Data string `xml:",innerxml"`
|
||||
// Any []struct {
|
||||
// Data string `xml:",chardata"`
|
||||
// } `xml:",any"`
|
||||
// Data string `xml:",chardata"`
|
||||
} `xml:"description,"`
|
||||
MIB string `xml:"value"`
|
||||
Alias []string `xml:"alias"`
|
||||
MIME string `xml:"preferred_alias"`
|
||||
} `xml:"record"`
|
||||
} `xml:"registry"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
|
||||
reg := ®istry{}
|
||||
if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
|
||||
log.Fatalf("Error decoding charset registry: %v", err)
|
||||
}
|
||||
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
|
||||
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
|
||||
}
|
||||
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintf(w, "const (\n")
|
||||
for _, rec := range reg.Registry[0].Record {
|
||||
constName := ""
|
||||
for _, a := range rec.Alias {
|
||||
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
|
||||
// Some of the constant definitions have comments in them. Strip those.
|
||||
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
|
||||
}
|
||||
}
|
||||
if constName == "" {
|
||||
switch rec.MIB {
|
||||
case "2085":
|
||||
constName = "HZGB2312" // Not listed as alias for some reason.
|
||||
default:
|
||||
log.Fatalf("No cs alias defined for %s.", rec.MIB)
|
||||
}
|
||||
}
|
||||
if rec.MIME != "" {
|
||||
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
|
||||
}
|
||||
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
|
||||
if len(rec.Desc.Data) > 0 {
|
||||
fmt.Fprint(w, "// ")
|
||||
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
|
||||
inElem := true
|
||||
attr := ""
|
||||
for {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
switch x := t.(type) {
|
||||
case xml.CharData:
|
||||
attr = "" // Don't need attribute info.
|
||||
a := bytes.Split([]byte(x), []byte("\n"))
|
||||
for i, b := range a {
|
||||
if b = bytes.TrimSpace(b); len(b) != 0 {
|
||||
if !inElem && i > 0 {
|
||||
fmt.Fprint(w, "\n// ")
|
||||
}
|
||||
inElem = false
|
||||
fmt.Fprintf(w, "%s ", string(b))
|
||||
}
|
||||
}
|
||||
case xml.StartElement:
|
||||
if x.Name.Local == "xref" {
|
||||
inElem = true
|
||||
use := false
|
||||
for _, a := range x.Attr {
|
||||
if a.Name.Local == "type" {
|
||||
use = use || a.Value != "person"
|
||||
}
|
||||
if a.Name.Local == "data" && use {
|
||||
// Patch up URLs to use https. From some links, the
|
||||
// https version is different from the http one.
|
||||
s := a.Value
|
||||
s = strings.Replace(s, "http://", "https://", -1)
|
||||
s = strings.Replace(s, "/unicode/", "/", -1)
|
||||
attr = s + " "
|
||||
}
|
||||
}
|
||||
}
|
||||
case xml.EndElement:
|
||||
inElem = false
|
||||
fmt.Fprint(w, attr)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, "\n")
|
||||
}
|
||||
for _, x := range rec.Xref {
|
||||
switch x.Type {
|
||||
case "rfc":
|
||||
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
|
||||
case "uri":
|
||||
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
fmt.Fprintln(w, ")")
|
||||
|
||||
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/triegen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
)
|
||||
|
||||
var outputFile = flag.String("out", "tables.go", "output file")
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
|
||||
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
|
||||
|
||||
genTables()
|
||||
}
|
||||
|
||||
// bidiClass names and codes taken from class "bc" in
|
||||
// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
|
||||
var bidiClass = map[string]Class{
|
||||
"AL": AL, // ArabicLetter
|
||||
"AN": AN, // ArabicNumber
|
||||
"B": B, // ParagraphSeparator
|
||||
"BN": BN, // BoundaryNeutral
|
||||
"CS": CS, // CommonSeparator
|
||||
"EN": EN, // EuropeanNumber
|
||||
"ES": ES, // EuropeanSeparator
|
||||
"ET": ET, // EuropeanTerminator
|
||||
"L": L, // LeftToRight
|
||||
"NSM": NSM, // NonspacingMark
|
||||
"ON": ON, // OtherNeutral
|
||||
"R": R, // RightToLeft
|
||||
"S": S, // SegmentSeparator
|
||||
"WS": WS, // WhiteSpace
|
||||
|
||||
"FSI": Control,
|
||||
"PDF": Control,
|
||||
"PDI": Control,
|
||||
"LRE": Control,
|
||||
"LRI": Control,
|
||||
"LRO": Control,
|
||||
"RLE": Control,
|
||||
"RLI": Control,
|
||||
"RLO": Control,
|
||||
}
|
||||
|
||||
func genTables() {
|
||||
if numClass > 0x0F {
|
||||
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
|
||||
}
|
||||
w := gen.NewCodeWriter()
|
||||
defer w.WriteVersionedGoFile(*outputFile, "bidi")
|
||||
|
||||
gen.WriteUnicodeVersion(w)
|
||||
|
||||
t := triegen.NewTrie("bidi")
|
||||
|
||||
// Build data about bracket mapping. These bits need to be or-ed with
|
||||
// any other bits.
|
||||
orMask := map[rune]uint64{}
|
||||
|
||||
xorMap := map[rune]int{}
|
||||
xorMasks := []rune{0} // First value is no-op.
|
||||
|
||||
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
|
||||
r1 := p.Rune(0)
|
||||
r2 := p.Rune(1)
|
||||
xor := r1 ^ r2
|
||||
if _, ok := xorMap[xor]; !ok {
|
||||
xorMap[xor] = len(xorMasks)
|
||||
xorMasks = append(xorMasks, xor)
|
||||
}
|
||||
entry := uint64(xorMap[xor]) << xorMaskShift
|
||||
switch p.String(2) {
|
||||
case "o":
|
||||
entry |= openMask
|
||||
case "c", "n":
|
||||
default:
|
||||
log.Fatalf("Unknown bracket class %q.", p.String(2))
|
||||
}
|
||||
orMask[r1] = entry
|
||||
})
|
||||
|
||||
w.WriteComment(`
|
||||
xorMasks contains masks to be xor-ed with brackets to get the reverse
|
||||
version.`)
|
||||
w.WriteVar("xorMasks", xorMasks)
|
||||
|
||||
done := map[rune]bool{}
|
||||
|
||||
insert := func(r rune, c Class) {
|
||||
if !done[r] {
|
||||
t.Insert(r, orMask[r]|uint64(c))
|
||||
done[r] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the derived BiDi properties.
|
||||
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
|
||||
r := p.Rune(0)
|
||||
class, ok := bidiClass[p.String(1)]
|
||||
if !ok {
|
||||
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
|
||||
}
|
||||
insert(r, class)
|
||||
})
|
||||
visitDefaults(insert)
|
||||
|
||||
// TODO: use sparse blocks. This would reduce table size considerably
|
||||
// from the looks of it.
|
||||
|
||||
sz, err := t.Gen(w)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
w.Size += sz
|
||||
}
|
||||
|
||||
// dummy values to make methods in gen_common compile. The real versions
|
||||
// will be generated by this file to tables.go.
|
||||
var (
|
||||
xorMasks []rune
|
||||
)
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
"golang.org/x/text/unicode/rangetable"
|
||||
)
|
||||
|
||||
// These tables are hand-extracted from:
|
||||
// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
|
||||
func visitDefaults(fn func(r rune, c Class)) {
|
||||
// first write default values for ranges listed above.
|
||||
visitRunes(fn, AL, []rune{
|
||||
0x0600, 0x07BF, // Arabic
|
||||
0x08A0, 0x08FF, // Arabic Extended-A
|
||||
0xFB50, 0xFDCF, // Arabic Presentation Forms
|
||||
0xFDF0, 0xFDFF,
|
||||
0xFE70, 0xFEFF,
|
||||
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
|
||||
})
|
||||
visitRunes(fn, R, []rune{
|
||||
0x0590, 0x05FF, // Hebrew
|
||||
0x07C0, 0x089F, // Nko et al.
|
||||
0xFB1D, 0xFB4F,
|
||||
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
|
||||
0x0001E800, 0x0001EDFF,
|
||||
0x0001EF00, 0x0001EFFF,
|
||||
})
|
||||
visitRunes(fn, ET, []rune{ // European Terminator
|
||||
0x20A0, 0x20Cf, // Currency symbols
|
||||
})
|
||||
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
|
||||
fn(r, BN) // Boundary Neutral
|
||||
})
|
||||
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
|
||||
if p.String(1) == "Default_Ignorable_Code_Point" {
|
||||
fn(p.Rune(0), BN) // Boundary Neutral
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
|
||||
for i := 0; i < len(runes); i += 2 {
|
||||
lo, hi := runes[i], runes[i+1]
|
||||
for j := lo; j <= hi; j++ {
|
||||
fn(j, c)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// Class is the Unicode BiDi class. Each rune has a single class.
|
||||
type Class uint
|
||||
|
||||
const (
|
||||
L Class = iota // LeftToRight
|
||||
R // RightToLeft
|
||||
EN // EuropeanNumber
|
||||
ES // EuropeanSeparator
|
||||
ET // EuropeanTerminator
|
||||
AN // ArabicNumber
|
||||
CS // CommonSeparator
|
||||
B // ParagraphSeparator
|
||||
S // SegmentSeparator
|
||||
WS // WhiteSpace
|
||||
ON // OtherNeutral
|
||||
BN // BoundaryNeutral
|
||||
NSM // NonspacingMark
|
||||
AL // ArabicLetter
|
||||
Control // Control LRO - PDI
|
||||
|
||||
numClass
|
||||
|
||||
LRO // LeftToRightOverride
|
||||
RLO // RightToLeftOverride
|
||||
LRE // LeftToRightEmbedding
|
||||
RLE // RightToLeftEmbedding
|
||||
PDF // PopDirectionalFormat
|
||||
LRI // LeftToRightIsolate
|
||||
RLI // RightToLeftIsolate
|
||||
FSI // FirstStrongIsolate
|
||||
PDI // PopDirectionalIsolate
|
||||
|
||||
unknownClass = ^Class(0)
|
||||
)
|
||||
|
||||
var controlToClass = map[rune]Class{
|
||||
0x202D: LRO, // LeftToRightOverride,
|
||||
0x202E: RLO, // RightToLeftOverride,
|
||||
0x202A: LRE, // LeftToRightEmbedding,
|
||||
0x202B: RLE, // RightToLeftEmbedding,
|
||||
0x202C: PDF, // PopDirectionalFormat,
|
||||
0x2066: LRI, // LeftToRightIsolate,
|
||||
0x2067: RLI, // RightToLeftIsolate,
|
||||
0x2068: FSI, // FirstStrongIsolate,
|
||||
0x2069: PDI, // PopDirectionalIsolate,
|
||||
}
|
||||
|
||||
// A trie entry has the following bits:
|
||||
// 7..5 XOR mask for brackets
|
||||
// 4 1: Bracket open, 0: Bracket close
|
||||
// 3..0 Class type
|
||||
|
||||
const (
|
||||
openMask = 0x10
|
||||
xorMaskShift = 5
|
||||
)
|
|
@ -0,0 +1,986 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Normalization table generator.
|
||||
// Data read from the web.
|
||||
// See forminfo.go for a description of the trie values associated with each rune.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/triegen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
loadUnicodeData()
|
||||
compactCCC()
|
||||
loadCompositionExclusions()
|
||||
completeCharFields(FCanonical)
|
||||
completeCharFields(FCompatibility)
|
||||
computeNonStarterCounts()
|
||||
verifyComputed()
|
||||
printChars()
|
||||
testDerived()
|
||||
printTestdata()
|
||||
makeTables()
|
||||
}
|
||||
|
||||
var (
|
||||
tablelist = flag.String("tables",
|
||||
"all",
|
||||
"comma-separated list of which tables to generate; "+
|
||||
"can be 'decomp', 'recomp', 'info' and 'all'")
|
||||
test = flag.Bool("test",
|
||||
false,
|
||||
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
|
||||
verbose = flag.Bool("verbose",
|
||||
false,
|
||||
"write data to stdout as it is parsed")
|
||||
)
|
||||
|
||||
const MaxChar = 0x10FFFF // anything above this shouldn't exist
|
||||
|
||||
// Quick Check properties of runes allow us to quickly
|
||||
// determine whether a rune may occur in a normal form.
|
||||
// For a given normal form, a rune may be guaranteed to occur
|
||||
// verbatim (QC=Yes), may or may not combine with another
|
||||
// rune (QC=Maybe), or may not occur (QC=No).
|
||||
type QCResult int
|
||||
|
||||
const (
|
||||
QCUnknown QCResult = iota
|
||||
QCYes
|
||||
QCNo
|
||||
QCMaybe
|
||||
)
|
||||
|
||||
func (r QCResult) String() string {
|
||||
switch r {
|
||||
case QCYes:
|
||||
return "Yes"
|
||||
case QCNo:
|
||||
return "No"
|
||||
case QCMaybe:
|
||||
return "Maybe"
|
||||
}
|
||||
return "***UNKNOWN***"
|
||||
}
|
||||
|
||||
const (
|
||||
FCanonical = iota // NFC or NFD
|
||||
FCompatibility // NFKC or NFKD
|
||||
FNumberOfFormTypes
|
||||
)
|
||||
|
||||
const (
|
||||
MComposed = iota // NFC or NFKC
|
||||
MDecomposed // NFD or NFKD
|
||||
MNumberOfModes
|
||||
)
|
||||
|
||||
// This contains only the properties we're interested in.
|
||||
type Char struct {
|
||||
name string
|
||||
codePoint rune // if zero, this index is not a valid code point.
|
||||
ccc uint8 // canonical combining class
|
||||
origCCC uint8
|
||||
excludeInComp bool // from CompositionExclusions.txt
|
||||
compatDecomp bool // it has a compatibility expansion
|
||||
|
||||
nTrailingNonStarters uint8
|
||||
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
|
||||
|
||||
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
|
||||
|
||||
state State
|
||||
}
|
||||
|
||||
var chars = make([]Char, MaxChar+1)
|
||||
var cccMap = make(map[uint8]uint8)
|
||||
|
||||
func (c Char) String() string {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
|
||||
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
|
||||
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
|
||||
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
|
||||
fmt.Fprintf(buf, " state: %v\n", c.state)
|
||||
fmt.Fprintf(buf, " NFC:\n")
|
||||
fmt.Fprint(buf, c.forms[FCanonical])
|
||||
fmt.Fprintf(buf, " NFKC:\n")
|
||||
fmt.Fprint(buf, c.forms[FCompatibility])
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// In UnicodeData.txt, some ranges are marked like this:
|
||||
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
|
||||
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
|
||||
// parseCharacter keeps a state variable indicating the weirdness.
|
||||
type State int
|
||||
|
||||
const (
|
||||
SNormal State = iota // known to be zero for the type
|
||||
SFirst
|
||||
SLast
|
||||
SMissing
|
||||
)
|
||||
|
||||
var lastChar = rune('\u0000')
|
||||
|
||||
func (c Char) isValid() bool {
|
||||
return c.codePoint != 0 && c.state != SMissing
|
||||
}
|
||||
|
||||
type FormInfo struct {
|
||||
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
|
||||
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
|
||||
|
||||
combinesForward bool // May combine with rune on the right
|
||||
combinesBackward bool // May combine with rune on the left
|
||||
isOneWay bool // Never appears in result
|
||||
inDecomp bool // Some decompositions result in this char.
|
||||
decomp Decomposition
|
||||
expandedDecomp Decomposition
|
||||
}
|
||||
|
||||
func (f FormInfo) String() string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0))
|
||||
|
||||
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
|
||||
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
|
||||
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
|
||||
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
|
||||
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
|
||||
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
|
||||
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
|
||||
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type Decomposition []rune
|
||||
|
||||
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
|
||||
decomp := strings.Split(s, " ")
|
||||
if len(decomp) > 0 && skipfirst {
|
||||
decomp = decomp[1:]
|
||||
}
|
||||
for _, d := range decomp {
|
||||
point, err := strconv.ParseUint(d, 16, 64)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
a = append(a, rune(point))
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func loadUnicodeData() {
|
||||
f := gen.OpenUCDFile("UnicodeData.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
r := p.Rune(ucd.CodePoint)
|
||||
char := &chars[r]
|
||||
|
||||
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
|
||||
decmap := p.String(ucd.DecompMapping)
|
||||
|
||||
exp, err := parseDecomposition(decmap, false)
|
||||
isCompat := false
|
||||
if err != nil {
|
||||
if len(decmap) > 0 {
|
||||
exp, err = parseDecomposition(decmap, true)
|
||||
if err != nil {
|
||||
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
|
||||
}
|
||||
isCompat = true
|
||||
}
|
||||
}
|
||||
|
||||
char.name = p.String(ucd.Name)
|
||||
char.codePoint = r
|
||||
char.forms[FCompatibility].decomp = exp
|
||||
if !isCompat {
|
||||
char.forms[FCanonical].decomp = exp
|
||||
} else {
|
||||
char.compatDecomp = true
|
||||
}
|
||||
if len(decmap) > 0 {
|
||||
char.forms[FCompatibility].decomp = exp
|
||||
}
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// compactCCC converts the sparse set of CCC values to a continguous one,
|
||||
// reducing the number of bits needed from 8 to 6.
|
||||
func compactCCC() {
|
||||
m := make(map[uint8]uint8)
|
||||
for i := range chars {
|
||||
c := &chars[i]
|
||||
m[c.ccc] = 0
|
||||
}
|
||||
cccs := []int{}
|
||||
for v, _ := range m {
|
||||
cccs = append(cccs, int(v))
|
||||
}
|
||||
sort.Ints(cccs)
|
||||
for i, c := range cccs {
|
||||
cccMap[uint8(i)] = uint8(c)
|
||||
m[uint8(c)] = uint8(i)
|
||||
}
|
||||
for i := range chars {
|
||||
c := &chars[i]
|
||||
c.origCCC = c.ccc
|
||||
c.ccc = m[c.ccc]
|
||||
}
|
||||
if len(m) >= 1<<6 {
|
||||
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
|
||||
}
|
||||
}
|
||||
|
||||
// CompositionExclusions.txt has form:
|
||||
// 0958 # ...
|
||||
// See https://unicode.org/reports/tr44/ for full explanation
|
||||
func loadCompositionExclusions() {
|
||||
f := gen.OpenUCDFile("CompositionExclusions.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
c := &chars[p.Rune(0)]
|
||||
if c.excludeInComp {
|
||||
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
|
||||
}
|
||||
c.excludeInComp = true
|
||||
}
|
||||
if e := p.Err(); e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// hasCompatDecomp returns true if any of the recursive
|
||||
// decompositions contains a compatibility expansion.
|
||||
// In this case, the character may not occur in NFK*.
|
||||
func hasCompatDecomp(r rune) bool {
|
||||
c := &chars[r]
|
||||
if c.compatDecomp {
|
||||
return true
|
||||
}
|
||||
for _, d := range c.forms[FCompatibility].decomp {
|
||||
if hasCompatDecomp(d) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Hangul related constants.
|
||||
const (
|
||||
HangulBase = 0xAC00
|
||||
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
|
||||
|
||||
JamoLBase = 0x1100
|
||||
JamoLEnd = 0x1113
|
||||
JamoVBase = 0x1161
|
||||
JamoVEnd = 0x1176
|
||||
JamoTBase = 0x11A8
|
||||
JamoTEnd = 0x11C3
|
||||
|
||||
JamoLVTCount = 19 * 21 * 28
|
||||
JamoTCount = 28
|
||||
)
|
||||
|
||||
func isHangul(r rune) bool {
|
||||
return HangulBase <= r && r < HangulEnd
|
||||
}
|
||||
|
||||
func isHangulWithoutJamoT(r rune) bool {
|
||||
if !isHangul(r) {
|
||||
return false
|
||||
}
|
||||
r -= HangulBase
|
||||
return r < JamoLVTCount && r%JamoTCount == 0
|
||||
}
|
||||
|
||||
func ccc(r rune) uint8 {
|
||||
return chars[r].ccc
|
||||
}
|
||||
|
||||
// Insert a rune in a buffer, ordered by Canonical Combining Class.
|
||||
func insertOrdered(b Decomposition, r rune) Decomposition {
|
||||
n := len(b)
|
||||
b = append(b, 0)
|
||||
cc := ccc(r)
|
||||
if cc > 0 {
|
||||
// Use bubble sort.
|
||||
for ; n > 0; n-- {
|
||||
if ccc(b[n-1]) <= cc {
|
||||
break
|
||||
}
|
||||
b[n] = b[n-1]
|
||||
}
|
||||
}
|
||||
b[n] = r
|
||||
return b
|
||||
}
|
||||
|
||||
// Recursively decompose.
|
||||
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
|
||||
dcomp := chars[r].forms[form].decomp
|
||||
if len(dcomp) == 0 {
|
||||
return insertOrdered(d, r)
|
||||
}
|
||||
for _, c := range dcomp {
|
||||
d = decomposeRecursive(form, c, d)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func completeCharFields(form int) {
|
||||
// Phase 0: pre-expand decomposition.
|
||||
for i := range chars {
|
||||
f := &chars[i].forms[form]
|
||||
if len(f.decomp) == 0 {
|
||||
continue
|
||||
}
|
||||
exp := make(Decomposition, 0)
|
||||
for _, c := range f.decomp {
|
||||
exp = decomposeRecursive(form, c, exp)
|
||||
}
|
||||
f.expandedDecomp = exp
|
||||
}
|
||||
|
||||
// Phase 1: composition exclusion, mark decomposition.
|
||||
for i := range chars {
|
||||
c := &chars[i]
|
||||
f := &c.forms[form]
|
||||
|
||||
// Marks script-specific exclusions and version restricted.
|
||||
f.isOneWay = c.excludeInComp
|
||||
|
||||
// Singletons
|
||||
f.isOneWay = f.isOneWay || len(f.decomp) == 1
|
||||
|
||||
// Non-starter decompositions
|
||||
if len(f.decomp) > 1 {
|
||||
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
|
||||
f.isOneWay = f.isOneWay || chk
|
||||
}
|
||||
|
||||
// Runes that decompose into more than two runes.
|
||||
f.isOneWay = f.isOneWay || len(f.decomp) > 2
|
||||
|
||||
if form == FCompatibility {
|
||||
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
|
||||
}
|
||||
|
||||
for _, r := range f.decomp {
|
||||
chars[r].forms[form].inDecomp = true
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: forward and backward combining.
|
||||
for i := range chars {
|
||||
c := &chars[i]
|
||||
f := &c.forms[form]
|
||||
|
||||
if !f.isOneWay && len(f.decomp) == 2 {
|
||||
f0 := &chars[f.decomp[0]].forms[form]
|
||||
f1 := &chars[f.decomp[1]].forms[form]
|
||||
if !f0.isOneWay {
|
||||
f0.combinesForward = true
|
||||
}
|
||||
if !f1.isOneWay {
|
||||
f1.combinesBackward = true
|
||||
}
|
||||
}
|
||||
if isHangulWithoutJamoT(rune(i)) {
|
||||
f.combinesForward = true
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3: quick check values.
|
||||
for i := range chars {
|
||||
c := &chars[i]
|
||||
f := &c.forms[form]
|
||||
|
||||
switch {
|
||||
case len(f.decomp) > 0:
|
||||
f.quickCheck[MDecomposed] = QCNo
|
||||
case isHangul(rune(i)):
|
||||
f.quickCheck[MDecomposed] = QCNo
|
||||
default:
|
||||
f.quickCheck[MDecomposed] = QCYes
|
||||
}
|
||||
switch {
|
||||
case f.isOneWay:
|
||||
f.quickCheck[MComposed] = QCNo
|
||||
case (i & 0xffff00) == JamoLBase:
|
||||
f.quickCheck[MComposed] = QCYes
|
||||
if JamoLBase <= i && i < JamoLEnd {
|
||||
f.combinesForward = true
|
||||
}
|
||||
if JamoVBase <= i && i < JamoVEnd {
|
||||
f.quickCheck[MComposed] = QCMaybe
|
||||
f.combinesBackward = true
|
||||
f.combinesForward = true
|
||||
}
|
||||
if JamoTBase <= i && i < JamoTEnd {
|
||||
f.quickCheck[MComposed] = QCMaybe
|
||||
f.combinesBackward = true
|
||||
}
|
||||
case !f.combinesBackward:
|
||||
f.quickCheck[MComposed] = QCYes
|
||||
default:
|
||||
f.quickCheck[MComposed] = QCMaybe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeNonStarterCounts() {
|
||||
// Phase 4: leading and trailing non-starter count
|
||||
for i := range chars {
|
||||
c := &chars[i]
|
||||
|
||||
runes := []rune{rune(i)}
|
||||
// We always use FCompatibility so that the CGJ insertion points do not
|
||||
// change for repeated normalizations with different forms.
|
||||
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
|
||||
runes = exp
|
||||
}
|
||||
// We consider runes that combine backwards to be non-starters for the
|
||||
// purpose of Stream-Safe Text Processing.
|
||||
for _, r := range runes {
|
||||
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
|
||||
break
|
||||
}
|
||||
c.nLeadingNonStarters++
|
||||
}
|
||||
for i := len(runes) - 1; i >= 0; i-- {
|
||||
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
|
||||
break
|
||||
}
|
||||
c.nTrailingNonStarters++
|
||||
}
|
||||
if c.nTrailingNonStarters > 3 {
|
||||
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
|
||||
}
|
||||
|
||||
if isHangul(rune(i)) {
|
||||
c.nTrailingNonStarters = 2
|
||||
if isHangulWithoutJamoT(rune(i)) {
|
||||
c.nTrailingNonStarters = 1
|
||||
}
|
||||
}
|
||||
|
||||
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
|
||||
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
|
||||
}
|
||||
if t := c.nTrailingNonStarters; t > 3 {
|
||||
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printBytes(w io.Writer, b []byte, name string) {
|
||||
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
|
||||
fmt.Fprintf(w, "var %s = [...]byte {", name)
|
||||
for i, c := range b {
|
||||
switch {
|
||||
case i%64 == 0:
|
||||
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
|
||||
case i%8 == 0:
|
||||
fmt.Fprintf(w, "\n")
|
||||
}
|
||||
fmt.Fprintf(w, "0x%.2X, ", c)
|
||||
}
|
||||
fmt.Fprint(w, "\n}\n\n")
|
||||
}
|
||||
|
||||
// See forminfo.go for format.
|
||||
func makeEntry(f *FormInfo, c *Char) uint16 {
|
||||
e := uint16(0)
|
||||
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
|
||||
e |= 0x40
|
||||
}
|
||||
if f.combinesForward {
|
||||
e |= 0x20
|
||||
}
|
||||
if f.quickCheck[MDecomposed] == QCNo {
|
||||
e |= 0x4
|
||||
}
|
||||
switch f.quickCheck[MComposed] {
|
||||
case QCYes:
|
||||
case QCNo:
|
||||
e |= 0x10
|
||||
case QCMaybe:
|
||||
e |= 0x18
|
||||
default:
|
||||
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
|
||||
}
|
||||
e |= uint16(c.nTrailingNonStarters)
|
||||
return e
|
||||
}
|
||||
|
||||
// decompSet keeps track of unique decompositions, grouped by whether
|
||||
// the decomposition is followed by a trailing and/or leading CCC.
|
||||
type decompSet [7]map[string]bool
|
||||
|
||||
const (
|
||||
normalDecomp = iota
|
||||
firstMulti
|
||||
firstCCC
|
||||
endMulti
|
||||
firstLeadingCCC
|
||||
firstCCCZeroExcept
|
||||
firstStarterWithNLead
|
||||
lastDecomp
|
||||
)
|
||||
|
||||
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
|
||||
|
||||
func makeDecompSet() decompSet {
|
||||
m := decompSet{}
|
||||
for i := range m {
|
||||
m[i] = make(map[string]bool)
|
||||
}
|
||||
return m
|
||||
}
|
||||
func (m *decompSet) insert(key int, s string) {
|
||||
m[key][s] = true
|
||||
}
|
||||
|
||||
func printCharInfoTables(w io.Writer) int {
|
||||
mkstr := func(r rune, f *FormInfo) (int, string) {
|
||||
d := f.expandedDecomp
|
||||
s := string([]rune(d))
|
||||
if max := 1 << 6; len(s) >= max {
|
||||
const msg = "%U: too many bytes in decomposition: %d >= %d"
|
||||
log.Fatalf(msg, r, len(s), max)
|
||||
}
|
||||
head := uint8(len(s))
|
||||
if f.quickCheck[MComposed] != QCYes {
|
||||
head |= 0x40
|
||||
}
|
||||
if f.combinesForward {
|
||||
head |= 0x80
|
||||
}
|
||||
s = string([]byte{head}) + s
|
||||
|
||||
lccc := ccc(d[0])
|
||||
tccc := ccc(d[len(d)-1])
|
||||
cc := ccc(r)
|
||||
if cc != 0 && lccc == 0 && tccc == 0 {
|
||||
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
|
||||
}
|
||||
if tccc < lccc && lccc != 0 {
|
||||
const msg = "%U: lccc (%d) must be <= tcc (%d)"
|
||||
log.Fatalf(msg, r, lccc, tccc)
|
||||
}
|
||||
index := normalDecomp
|
||||
nTrail := chars[r].nTrailingNonStarters
|
||||
nLead := chars[r].nLeadingNonStarters
|
||||
if tccc > 0 || lccc > 0 || nTrail > 0 {
|
||||
tccc <<= 2
|
||||
tccc |= nTrail
|
||||
s += string([]byte{tccc})
|
||||
index = endMulti
|
||||
for _, r := range d[1:] {
|
||||
if ccc(r) == 0 {
|
||||
index = firstCCC
|
||||
}
|
||||
}
|
||||
if lccc > 0 || nLead > 0 {
|
||||
s += string([]byte{lccc})
|
||||
if index == firstCCC {
|
||||
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
|
||||
}
|
||||
index = firstLeadingCCC
|
||||
}
|
||||
if cc != lccc {
|
||||
if cc != 0 {
|
||||
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
|
||||
}
|
||||
index = firstCCCZeroExcept
|
||||
}
|
||||
} else if len(d) > 1 {
|
||||
index = firstMulti
|
||||
}
|
||||
return index, s
|
||||
}
|
||||
|
||||
decompSet := makeDecompSet()
|
||||
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
|
||||
decompSet.insert(firstStarterWithNLead, nLeadStr)
|
||||
|
||||
// Store the uniqued decompositions in a byte buffer,
|
||||
// preceded by their byte length.
|
||||
for _, c := range chars {
|
||||
for _, f := range c.forms {
|
||||
if len(f.expandedDecomp) == 0 {
|
||||
continue
|
||||
}
|
||||
if f.combinesBackward {
|
||||
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
|
||||
}
|
||||
index, s := mkstr(c.codePoint, &f)
|
||||
decompSet.insert(index, s)
|
||||
}
|
||||
}
|
||||
|
||||
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
|
||||
size := 0
|
||||
positionMap := make(map[string]uint16)
|
||||
decompositions.WriteString("\000")
|
||||
fmt.Fprintln(w, "const (")
|
||||
for i, m := range decompSet {
|
||||
sa := []string{}
|
||||
for s := range m {
|
||||
sa = append(sa, s)
|
||||
}
|
||||
sort.Strings(sa)
|
||||
for _, s := range sa {
|
||||
p := decompositions.Len()
|
||||
decompositions.WriteString(s)
|
||||
positionMap[s] = uint16(p)
|
||||
}
|
||||
if cname[i] != "" {
|
||||
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, "maxDecomp = 0x8000")
|
||||
fmt.Fprintln(w, ")")
|
||||
b := decompositions.Bytes()
|
||||
printBytes(w, b, "decomps")
|
||||
size += len(b)
|
||||
|
||||
varnames := []string{"nfc", "nfkc"}
|
||||
for i := 0; i < FNumberOfFormTypes; i++ {
|
||||
trie := triegen.NewTrie(varnames[i])
|
||||
|
||||
for r, c := range chars {
|
||||
f := c.forms[i]
|
||||
d := f.expandedDecomp
|
||||
if len(d) != 0 {
|
||||
_, key := mkstr(c.codePoint, &f)
|
||||
trie.Insert(rune(r), uint64(positionMap[key]))
|
||||
if c.ccc != ccc(d[0]) {
|
||||
// We assume the lead ccc of a decomposition !=0 in this case.
|
||||
if ccc(d[0]) == 0 {
|
||||
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
|
||||
}
|
||||
}
|
||||
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
|
||||
// Handle cases where it can't be detected that the nLead should be equal
|
||||
// to nTrail.
|
||||
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
|
||||
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
|
||||
trie.Insert(c.codePoint, uint64(0x8000|v))
|
||||
}
|
||||
}
|
||||
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
size += sz
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func contains(sa []string, s string) bool {
|
||||
for _, a := range sa {
|
||||
if a == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func makeTables() {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
size := 0
|
||||
if *tablelist == "" {
|
||||
return
|
||||
}
|
||||
list := strings.Split(*tablelist, ",")
|
||||
if *tablelist == "all" {
|
||||
list = []string{"recomp", "info"}
|
||||
}
|
||||
|
||||
// Compute maximum decomposition size.
|
||||
max := 0
|
||||
for _, c := range chars {
|
||||
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
|
||||
max = n
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, `import "sync"`)
|
||||
fmt.Fprintln(w)
|
||||
|
||||
fmt.Fprintln(w, "const (")
|
||||
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
|
||||
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
|
||||
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
|
||||
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
|
||||
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
|
||||
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
|
||||
fmt.Fprintln(w, ")\n")
|
||||
|
||||
// Print the CCC remap table.
|
||||
size += len(cccMap)
|
||||
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
|
||||
for i := 0; i < len(cccMap); i++ {
|
||||
if i%8 == 0 {
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
|
||||
}
|
||||
fmt.Fprintln(w, "\n}\n")
|
||||
|
||||
if contains(list, "info") {
|
||||
size += printCharInfoTables(w)
|
||||
}
|
||||
|
||||
if contains(list, "recomp") {
|
||||
// Note that we use 32 bit keys, instead of 64 bit.
|
||||
// This clips the bits of three entries, but we know
|
||||
// this won't cause a collision. The compiler will catch
|
||||
// any changes made to UnicodeData.txt that introduces
|
||||
// a collision.
|
||||
// Note that the recomposition map for NFC and NFKC
|
||||
// are identical.
|
||||
|
||||
// Recomposition map
|
||||
nrentries := 0
|
||||
for _, c := range chars {
|
||||
f := c.forms[FCanonical]
|
||||
if !f.isOneWay && len(f.decomp) > 0 {
|
||||
nrentries++
|
||||
}
|
||||
}
|
||||
sz := nrentries * 8
|
||||
size += sz
|
||||
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
|
||||
fmt.Fprintln(w, "var recompMap map[uint32]rune")
|
||||
fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
|
||||
fmt.Fprintln(w, `const recompMapPacked = "" +`)
|
||||
var buf [8]byte
|
||||
for i, c := range chars {
|
||||
f := c.forms[FCanonical]
|
||||
d := f.decomp
|
||||
if !f.isOneWay && len(d) > 0 {
|
||||
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
|
||||
binary.BigEndian.PutUint32(buf[:4], key)
|
||||
binary.BigEndian.PutUint32(buf[4:], uint32(i))
|
||||
fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
|
||||
}
|
||||
}
|
||||
// hack so we don't have to special case the trailing plus sign
|
||||
fmt.Fprintf(w, ` ""`)
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
|
||||
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
|
||||
}
|
||||
|
||||
func printChars() {
|
||||
if *verbose {
|
||||
for _, c := range chars {
|
||||
if !c.isValid() || c.state == SMissing {
|
||||
continue
|
||||
}
|
||||
fmt.Println(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifyComputed does various consistency tests.
|
||||
func verifyComputed() {
|
||||
for i, c := range chars {
|
||||
for _, f := range c.forms {
|
||||
isNo := (f.quickCheck[MDecomposed] == QCNo)
|
||||
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
|
||||
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
|
||||
}
|
||||
|
||||
isMaybe := f.quickCheck[MComposed] == QCMaybe
|
||||
if f.combinesBackward != isMaybe {
|
||||
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
|
||||
}
|
||||
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
|
||||
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
|
||||
}
|
||||
|
||||
if len(f.expandedDecomp) != 0 {
|
||||
continue
|
||||
}
|
||||
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
|
||||
// We accept these runes to be treated differently (it only affects
|
||||
// segment breaking in iteration, most likely on improper use), but
|
||||
// reconsider if more characters are added.
|
||||
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
|
||||
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
|
||||
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
|
||||
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
|
||||
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
|
||||
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
|
||||
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
|
||||
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
nfc := c.forms[FCanonical]
|
||||
nfkc := c.forms[FCompatibility]
|
||||
if nfc.combinesBackward != nfkc.combinesBackward {
|
||||
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use values in DerivedNormalizationProps.txt to compare against the
|
||||
// values we computed.
|
||||
// DerivedNormalizationProps.txt has form:
|
||||
// 00C0..00C5 ; NFD_QC; N # ...
|
||||
// 0374 ; NFD_QC; N # ...
|
||||
// See https://unicode.org/reports/tr44/ for full explanation
|
||||
func testDerived() {
|
||||
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
r := p.Rune(0)
|
||||
c := &chars[r]
|
||||
|
||||
var ftype, mode int
|
||||
qt := p.String(1)
|
||||
switch qt {
|
||||
case "NFC_QC":
|
||||
ftype, mode = FCanonical, MComposed
|
||||
case "NFD_QC":
|
||||
ftype, mode = FCanonical, MDecomposed
|
||||
case "NFKC_QC":
|
||||
ftype, mode = FCompatibility, MComposed
|
||||
case "NFKD_QC":
|
||||
ftype, mode = FCompatibility, MDecomposed
|
||||
default:
|
||||
continue
|
||||
}
|
||||
var qr QCResult
|
||||
switch p.String(2) {
|
||||
case "Y":
|
||||
qr = QCYes
|
||||
case "N":
|
||||
qr = QCNo
|
||||
case "M":
|
||||
qr = QCMaybe
|
||||
default:
|
||||
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
|
||||
}
|
||||
if got := c.forms[ftype].quickCheck[mode]; got != qr {
|
||||
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
|
||||
}
|
||||
c.forms[ftype].verified[mode] = true
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Any unspecified value must be QCYes. Verify this.
|
||||
for i, c := range chars {
|
||||
for j, fd := range c.forms {
|
||||
for k, qr := range fd.quickCheck {
|
||||
if !fd.verified[k] && qr != QCYes {
|
||||
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
|
||||
log.Printf(m, i, j, k, qr, c.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testHeader = `const (
|
||||
Yes = iota
|
||||
No
|
||||
Maybe
|
||||
)
|
||||
|
||||
type formData struct {
|
||||
qc uint8
|
||||
combinesForward bool
|
||||
decomposition string
|
||||
}
|
||||
|
||||
type runeData struct {
|
||||
r rune
|
||||
ccc uint8
|
||||
nLead uint8
|
||||
nTrail uint8
|
||||
f [2]formData // 0: canonical; 1: compatibility
|
||||
}
|
||||
|
||||
func f(qc uint8, cf bool, dec string) [2]formData {
|
||||
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
|
||||
}
|
||||
|
||||
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
|
||||
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
|
||||
}
|
||||
|
||||
var testData = []runeData{
|
||||
`
|
||||
|
||||
func printTestdata() {
|
||||
type lastInfo struct {
|
||||
ccc uint8
|
||||
nLead uint8
|
||||
nTrail uint8
|
||||
f string
|
||||
}
|
||||
|
||||
last := lastInfo{}
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintf(w, testHeader)
|
||||
for r, c := range chars {
|
||||
f := c.forms[FCanonical]
|
||||
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
|
||||
f = c.forms[FCompatibility]
|
||||
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
|
||||
s := ""
|
||||
if d == dk && qc == qck && cf == cfk {
|
||||
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
|
||||
} else {
|
||||
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
|
||||
}
|
||||
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
|
||||
if last != current {
|
||||
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
|
||||
last = current
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Trie table generator.
|
||||
// Used by make*tables tools to generate a go file with trie data structures
|
||||
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
|
||||
// sequence are used to lookup offsets in the index table to be used for the
|
||||
// next byte. The last byte is used to index into a table with 16-bit values.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const maxSparseEntries = 16
|
||||
|
||||
type normCompacter struct {
|
||||
sparseBlocks [][]uint64
|
||||
sparseOffset []uint16
|
||||
sparseCount int
|
||||
name string
|
||||
}
|
||||
|
||||
func mostFrequentStride(a []uint64) int {
|
||||
counts := make(map[int]int)
|
||||
var v int
|
||||
for _, x := range a {
|
||||
if stride := int(x) - v; v != 0 && stride >= 0 {
|
||||
counts[stride]++
|
||||
}
|
||||
v = int(x)
|
||||
}
|
||||
var maxs, maxc int
|
||||
for stride, cnt := range counts {
|
||||
if cnt > maxc || (cnt == maxc && stride < maxs) {
|
||||
maxs, maxc = stride, cnt
|
||||
}
|
||||
}
|
||||
return maxs
|
||||
}
|
||||
|
||||
func countSparseEntries(a []uint64) int {
|
||||
stride := mostFrequentStride(a)
|
||||
var v, count int
|
||||
for _, tv := range a {
|
||||
if int(tv)-v != stride {
|
||||
if tv != 0 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
v = int(tv)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
|
||||
if n := countSparseEntries(v); n <= maxSparseEntries {
|
||||
return (n+1)*4 + 2, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (c *normCompacter) Store(v []uint64) uint32 {
|
||||
h := uint32(len(c.sparseOffset))
|
||||
c.sparseBlocks = append(c.sparseBlocks, v)
|
||||
c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
|
||||
c.sparseCount += countSparseEntries(v) + 1
|
||||
return h
|
||||
}
|
||||
|
||||
func (c *normCompacter) Handler() string {
|
||||
return c.name + "Sparse.lookup"
|
||||
}
|
||||
|
||||
func (c *normCompacter) Print(w io.Writer) (retErr error) {
|
||||
p := func(f string, x ...interface{}) {
|
||||
if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}
|
||||
|
||||
ls := len(c.sparseBlocks)
|
||||
p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
|
||||
p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
|
||||
|
||||
ns := c.sparseCount
|
||||
p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
|
||||
p("var %sSparseValues = [%d]valueRange {", c.name, ns)
|
||||
for i, b := range c.sparseBlocks {
|
||||
p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
|
||||
var v int
|
||||
stride := mostFrequentStride(b)
|
||||
n := countSparseEntries(b)
|
||||
p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
|
||||
for i, nv := range b {
|
||||
if int(nv)-v != stride {
|
||||
if v != 0 {
|
||||
p(",hi:%#02x},", 0x80+i-1)
|
||||
}
|
||||
if nv != 0 {
|
||||
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
|
||||
}
|
||||
}
|
||||
v = int(nv)
|
||||
}
|
||||
if v != 0 {
|
||||
p(",hi:%#02x},", 0x80+len(b)-1)
|
||||
}
|
||||
}
|
||||
p("\n}\n\n")
|
||||
return
|
||||
}
|
|
@ -1,29 +1,33 @@
|
|||
# cloud.google.com/go v0.39.0
|
||||
cloud.google.com/go/monitoring/apiv3
|
||||
cloud.google.com/go/storage
|
||||
cloud.google.com/go/spanner
|
||||
cloud.google.com/go/kms/apiv1
|
||||
cloud.google.com/go/civil
|
||||
cloud.google.com/go/compute/metadata
|
||||
cloud.google.com/go/iam
|
||||
cloud.google.com/go/internal
|
||||
cloud.google.com/go/internal/fields
|
||||
cloud.google.com/go/internal/optional
|
||||
cloud.google.com/go/internal/protostruct
|
||||
cloud.google.com/go/internal/trace
|
||||
cloud.google.com/go/internal/version
|
||||
cloud.google.com/go/kms/apiv1
|
||||
cloud.google.com/go/monitoring/apiv3
|
||||
cloud.google.com/go/spanner
|
||||
cloud.google.com/go/internal/fields
|
||||
cloud.google.com/go/internal/protostruct
|
||||
cloud.google.com/go/spanner/internal/backoff
|
||||
cloud.google.com/go/storage
|
||||
# code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f
|
||||
code.cloudfoundry.org/gofileutils/fileutils
|
||||
# github.com/Azure/azure-sdk-for-go v36.2.0+incompatible
|
||||
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute
|
||||
github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac
|
||||
github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault
|
||||
github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization
|
||||
# contrib.go.opencensus.io/exporter/ocagent v0.4.12
|
||||
contrib.go.opencensus.io/exporter/ocagent
|
||||
# github.com/Azure/azure-sdk-for-go v29.0.0+incompatible
|
||||
github.com/Azure/azure-sdk-for-go/storage
|
||||
github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault
|
||||
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute
|
||||
github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac
|
||||
github.com/Azure/azure-sdk-for-go/version
|
||||
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Azure/go-ansiterm
|
||||
github.com/Azure/go-ansiterm/winterm
|
||||
# github.com/Azure/go-autorest/autorest v0.9.2
|
||||
github.com/Azure/go-autorest/autorest
|
||||
|
@ -44,6 +48,20 @@ github.com/Azure/go-autorest/autorest/validation
|
|||
github.com/Azure/go-autorest/logger
|
||||
# github.com/Azure/go-autorest/tracing v0.5.0
|
||||
github.com/Azure/go-autorest/tracing
|
||||
=======
|
||||
github.com/Azure/go-ansiterm
|
||||
# github.com/Azure/go-autorest v11.7.1+incompatible
|
||||
github.com/Azure/go-autorest/autorest/azure
|
||||
github.com/Azure/go-autorest/autorest
|
||||
github.com/Azure/go-autorest/autorest/azure/auth
|
||||
github.com/Azure/go-autorest/autorest/to
|
||||
github.com/Azure/go-autorest/autorest/date
|
||||
github.com/Azure/go-autorest/tracing
|
||||
github.com/Azure/go-autorest/autorest/validation
|
||||
github.com/Azure/go-autorest/autorest/adal
|
||||
github.com/Azure/go-autorest/logger
|
||||
github.com/Azure/go-autorest/autorest/azure/cli
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
# github.com/BurntSushi/toml v0.3.1
|
||||
github.com/BurntSushi/toml
|
||||
# github.com/DataDog/datadog-go v3.2.0+incompatible
|
||||
|
@ -62,27 +80,27 @@ github.com/Nvveen/Gotty
|
|||
# github.com/SAP/go-hdb v0.14.1
|
||||
github.com/SAP/go-hdb/driver
|
||||
github.com/SAP/go-hdb/driver/sqltrace
|
||||
github.com/SAP/go-hdb/internal/bufio
|
||||
github.com/SAP/go-hdb/internal/protocol
|
||||
github.com/SAP/go-hdb/internal/bufio
|
||||
github.com/SAP/go-hdb/internal/unicode
|
||||
github.com/SAP/go-hdb/internal/unicode/cesu8
|
||||
# github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6
|
||||
github.com/StackExchange/wmi
|
||||
# github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/services/kms
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/services/sts
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/services/kms
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/services/ram
|
||||
github.com/aliyun/alibaba-cloud-sdk-go/services/sts
|
||||
# github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5
|
||||
github.com/aliyun/aliyun-oss-go-sdk/oss
|
||||
# github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2
|
||||
|
@ -103,53 +121,53 @@ github.com/armon/go-radix
|
|||
github.com/asaskevich/govalidator
|
||||
# github.com/aws/aws-sdk-go v1.25.41
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/credentials
|
||||
github.com/aws/aws-sdk-go/aws/credentials/stscreds
|
||||
github.com/aws/aws-sdk-go/aws/endpoints
|
||||
github.com/aws/aws-sdk-go/aws/session
|
||||
github.com/aws/aws-sdk-go/service/ec2
|
||||
github.com/aws/aws-sdk-go/service/iam
|
||||
github.com/aws/aws-sdk-go/service/sts
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
github.com/aws/aws-sdk-go/aws/awserr
|
||||
github.com/aws/aws-sdk-go/aws/awsutil
|
||||
github.com/aws/aws-sdk-go/aws/client
|
||||
github.com/aws/aws-sdk-go/aws/client/metadata
|
||||
github.com/aws/aws-sdk-go/aws/corehandlers
|
||||
github.com/aws/aws-sdk-go/aws/credentials
|
||||
github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
|
||||
github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
|
||||
github.com/aws/aws-sdk-go/aws/credentials/processcreds
|
||||
github.com/aws/aws-sdk-go/aws/credentials/stscreds
|
||||
github.com/aws/aws-sdk-go/aws/crr
|
||||
github.com/aws/aws-sdk-go/aws/csm
|
||||
github.com/aws/aws-sdk-go/service/iam/iamiface
|
||||
github.com/aws/aws-sdk-go/service/sts/stsiface
|
||||
github.com/aws/aws-sdk-go/aws/defaults
|
||||
github.com/aws/aws-sdk-go/aws/ec2metadata
|
||||
github.com/aws/aws-sdk-go/aws/endpoints
|
||||
github.com/aws/aws-sdk-go/aws/request
|
||||
github.com/aws/aws-sdk-go/aws/session
|
||||
github.com/aws/aws-sdk-go/aws/signer/v4
|
||||
github.com/aws/aws-sdk-go/internal/ini
|
||||
github.com/aws/aws-sdk-go/internal/s3err
|
||||
github.com/aws/aws-sdk-go/service/dynamodb
|
||||
github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute
|
||||
github.com/aws/aws-sdk-go/service/s3
|
||||
github.com/aws/aws-sdk-go/service/kms
|
||||
github.com/aws/aws-sdk-go/service/kms/kmsiface
|
||||
github.com/aws/aws-sdk-go/internal/sdkio
|
||||
github.com/aws/aws-sdk-go/internal/sdkmath
|
||||
github.com/aws/aws-sdk-go/internal/sdkrand
|
||||
github.com/aws/aws-sdk-go/internal/sdkuri
|
||||
github.com/aws/aws-sdk-go/internal/ini
|
||||
github.com/aws/aws-sdk-go/internal/shareddefaults
|
||||
github.com/aws/aws-sdk-go/aws/client
|
||||
github.com/aws/aws-sdk-go/internal/sdkrand
|
||||
github.com/aws/aws-sdk-go/aws/corehandlers
|
||||
github.com/aws/aws-sdk-go/aws/credentials/processcreds
|
||||
github.com/aws/aws-sdk-go/aws/csm
|
||||
github.com/aws/aws-sdk-go/aws/awsutil
|
||||
github.com/aws/aws-sdk-go/aws/client/metadata
|
||||
github.com/aws/aws-sdk-go/aws/signer/v4
|
||||
github.com/aws/aws-sdk-go/private/protocol
|
||||
github.com/aws/aws-sdk-go/private/protocol/ec2query
|
||||
github.com/aws/aws-sdk-go/private/protocol/query
|
||||
github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
|
||||
github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
|
||||
github.com/aws/aws-sdk-go/internal/sdkuri
|
||||
github.com/aws/aws-sdk-go/aws/crr
|
||||
github.com/aws/aws-sdk-go/private/protocol/jsonrpc
|
||||
github.com/aws/aws-sdk-go/internal/s3err
|
||||
github.com/aws/aws-sdk-go/private/protocol/eventstream
|
||||
github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
|
||||
github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
|
||||
github.com/aws/aws-sdk-go/private/protocol/jsonrpc
|
||||
github.com/aws/aws-sdk-go/private/protocol/query
|
||||
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
|
||||
github.com/aws/aws-sdk-go/private/protocol/rest
|
||||
github.com/aws/aws-sdk-go/private/protocol/restxml
|
||||
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
|
||||
github.com/aws/aws-sdk-go/service/dynamodb
|
||||
github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute
|
||||
github.com/aws/aws-sdk-go/service/ec2
|
||||
github.com/aws/aws-sdk-go/service/iam
|
||||
github.com/aws/aws-sdk-go/service/iam/iamiface
|
||||
github.com/aws/aws-sdk-go/service/kms
|
||||
github.com/aws/aws-sdk-go/service/kms/kmsiface
|
||||
github.com/aws/aws-sdk-go/service/s3
|
||||
github.com/aws/aws-sdk-go/service/sts
|
||||
github.com/aws/aws-sdk-go/service/sts/stsiface
|
||||
github.com/aws/aws-sdk-go/internal/sdkmath
|
||||
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
|
||||
github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
|
||||
# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973
|
||||
github.com/beorn7/perks/quantile
|
||||
# github.com/bgentry/speakeasy v0.1.0
|
||||
|
@ -159,10 +177,10 @@ github.com/boombuler/barcode
|
|||
github.com/boombuler/barcode/qr
|
||||
github.com/boombuler/barcode/utils
|
||||
# github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f
|
||||
github.com/briankassouf/jose
|
||||
github.com/briankassouf/jose/crypto
|
||||
github.com/briankassouf/jose/jws
|
||||
github.com/briankassouf/jose/jwt
|
||||
github.com/briankassouf/jose
|
||||
# github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/cenkalti/backoff
|
||||
# github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f
|
||||
|
@ -173,8 +191,8 @@ github.com/chrismalek/oktasdk-go/okta
|
|||
# github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
|
||||
github.com/circonus-labs/circonus-gometrics
|
||||
github.com/circonus-labs/circonus-gometrics/api
|
||||
github.com/circonus-labs/circonus-gometrics/api/config
|
||||
github.com/circonus-labs/circonus-gometrics/checkmgr
|
||||
github.com/circonus-labs/circonus-gometrics/api/config
|
||||
# github.com/circonus-labs/circonusllhist v0.1.3
|
||||
github.com/circonus-labs/circonusllhist
|
||||
# github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381
|
||||
|
@ -205,12 +223,12 @@ github.com/docker/go-connections/nat
|
|||
# github.com/docker/go-units v0.4.0
|
||||
github.com/docker/go-units
|
||||
# github.com/dsnet/compress v0.0.1
|
||||
github.com/dsnet/compress
|
||||
github.com/dsnet/compress/bzip2
|
||||
github.com/dsnet/compress/bzip2/internal/sais
|
||||
github.com/dsnet/compress/internal
|
||||
github.com/dsnet/compress/internal/errors
|
||||
github.com/dsnet/compress/internal/prefix
|
||||
github.com/dsnet/compress
|
||||
# github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74
|
||||
github.com/duosecurity/duo_api_golang
|
||||
github.com/duosecurity/duo_api_golang/authapi
|
||||
|
@ -247,21 +265,29 @@ github.com/gocql/gocql/internal/lru
|
|||
github.com/gocql/gocql/internal/murmur
|
||||
github.com/gocql/gocql/internal/streams
|
||||
# github.com/gogo/protobuf v1.2.1
|
||||
github.com/gogo/protobuf/gogoproto
|
||||
github.com/gogo/protobuf/io
|
||||
github.com/gogo/protobuf/proto
|
||||
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
||||
github.com/gogo/protobuf/sortkeys
|
||||
github.com/gogo/protobuf/gogoproto
|
||||
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
||||
# github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/protobuf/proto
|
||||
<<<<<<< HEAD
|
||||
github.com/golang/protobuf/protoc-gen-go/descriptor
|
||||
=======
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
github.com/golang/protobuf/ptypes
|
||||
github.com/golang/protobuf/ptypes/timestamp
|
||||
github.com/golang/protobuf/ptypes/any
|
||||
github.com/golang/protobuf/ptypes/duration
|
||||
github.com/golang/protobuf/ptypes/empty
|
||||
github.com/golang/protobuf/ptypes/struct
|
||||
github.com/golang/protobuf/ptypes/timestamp
|
||||
github.com/golang/protobuf/ptypes/empty
|
||||
github.com/golang/protobuf/ptypes/wrappers
|
||||
github.com/golang/protobuf/protoc-gen-go/descriptor
|
||||
github.com/golang/protobuf/jsonpb
|
||||
github.com/golang/protobuf/protoc-gen-go/generator
|
||||
github.com/golang/protobuf/protoc-gen-go/generator/internal/remap
|
||||
github.com/golang/protobuf/protoc-gen-go/plugin
|
||||
# github.com/golang/snappy v0.0.1
|
||||
github.com/golang/snappy
|
||||
# github.com/google/go-github v17.0.0+incompatible
|
||||
|
@ -278,16 +304,25 @@ github.com/google/uuid
|
|||
github.com/googleapis/gax-go/v2
|
||||
# github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75
|
||||
github.com/gorhill/cronexpr
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
# github.com/gorilla/websocket v1.4.1
|
||||
github.com/gorilla/websocket
|
||||
# github.com/grpc-ecosystem/grpc-gateway v1.8.5
|
||||
github.com/grpc-ecosystem/grpc-gateway/runtime
|
||||
github.com/grpc-ecosystem/grpc-gateway/utilities
|
||||
github.com/grpc-ecosystem/grpc-gateway/internal
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
# github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed
|
||||
github.com/hailocab/go-hostpool
|
||||
# github.com/hashicorp/consul-template v0.22.0
|
||||
github.com/hashicorp/consul-template/child
|
||||
github.com/hashicorp/consul-template/config
|
||||
github.com/hashicorp/consul-template/dependency
|
||||
github.com/hashicorp/consul-template/logging
|
||||
github.com/hashicorp/consul-template/manager
|
||||
github.com/hashicorp/consul-template/renderer
|
||||
github.com/hashicorp/consul-template/signals
|
||||
github.com/hashicorp/consul-template/child
|
||||
github.com/hashicorp/consul-template/dependency
|
||||
github.com/hashicorp/consul-template/renderer
|
||||
github.com/hashicorp/consul-template/template
|
||||
github.com/hashicorp/consul-template/version
|
||||
github.com/hashicorp/consul-template/watch
|
||||
|
@ -328,7 +363,7 @@ github.com/hashicorp/go-raftchunking
|
|||
github.com/hashicorp/go-raftchunking/types
|
||||
# github.com/hashicorp/go-retryablehttp v0.6.2
|
||||
github.com/hashicorp/go-retryablehttp
|
||||
# github.com/hashicorp/go-rootcerts v1.0.1
|
||||
# github.com/hashicorp/go-rootcerts v1.0.2
|
||||
github.com/hashicorp/go-rootcerts
|
||||
# github.com/hashicorp/go-sockaddr v1.0.2
|
||||
github.com/hashicorp/go-sockaddr
|
||||
|
@ -340,55 +375,55 @@ github.com/hashicorp/go-uuid
|
|||
# github.com/hashicorp/go-version v1.2.0
|
||||
github.com/hashicorp/go-version
|
||||
# github.com/hashicorp/gokrb5 v7.3.1-0.20191209171754-1a6fa9886ec3+incompatible
|
||||
github.com/hashicorp/gokrb5/asn1tools
|
||||
github.com/hashicorp/gokrb5/client
|
||||
github.com/hashicorp/gokrb5/config
|
||||
github.com/hashicorp/gokrb5/keytab
|
||||
github.com/hashicorp/gokrb5/service
|
||||
github.com/hashicorp/gokrb5/spnego
|
||||
github.com/hashicorp/gokrb5/credentials
|
||||
github.com/hashicorp/gokrb5/crypto
|
||||
github.com/hashicorp/gokrb5/crypto/common
|
||||
github.com/hashicorp/gokrb5/crypto/etype
|
||||
github.com/hashicorp/gokrb5/iana/errorcode
|
||||
github.com/hashicorp/gokrb5/iana/flags
|
||||
github.com/hashicorp/gokrb5/iana/keyusage
|
||||
github.com/hashicorp/gokrb5/iana/nametype
|
||||
github.com/hashicorp/gokrb5/iana/patype
|
||||
github.com/hashicorp/gokrb5/kadmin
|
||||
github.com/hashicorp/gokrb5/krberror
|
||||
github.com/hashicorp/gokrb5/messages
|
||||
github.com/hashicorp/gokrb5/types
|
||||
github.com/hashicorp/gokrb5/iana/etypeID
|
||||
github.com/hashicorp/gokrb5/asn1tools
|
||||
github.com/hashicorp/gokrb5/gssapi
|
||||
github.com/hashicorp/gokrb5/iana/chksumtype
|
||||
github.com/hashicorp/gokrb5/iana/msgtype
|
||||
github.com/hashicorp/gokrb5/crypto/common
|
||||
github.com/hashicorp/gokrb5/crypto/rfc3961
|
||||
github.com/hashicorp/gokrb5/crypto/rfc3962
|
||||
github.com/hashicorp/gokrb5/crypto/rfc4757
|
||||
github.com/hashicorp/gokrb5/crypto/rfc8009
|
||||
github.com/hashicorp/gokrb5/gssapi
|
||||
github.com/hashicorp/gokrb5/iana
|
||||
github.com/hashicorp/gokrb5/iana/addrtype
|
||||
github.com/hashicorp/gokrb5/iana/adtype
|
||||
github.com/hashicorp/gokrb5/iana/asnAppTag
|
||||
github.com/hashicorp/gokrb5/iana/chksumtype
|
||||
github.com/hashicorp/gokrb5/iana/errorcode
|
||||
github.com/hashicorp/gokrb5/iana/etypeID
|
||||
github.com/hashicorp/gokrb5/iana/flags
|
||||
github.com/hashicorp/gokrb5/iana/keyusage
|
||||
github.com/hashicorp/gokrb5/iana/msgtype
|
||||
github.com/hashicorp/gokrb5/iana/nametype
|
||||
github.com/hashicorp/gokrb5/iana/patype
|
||||
github.com/hashicorp/gokrb5/kadmin
|
||||
github.com/hashicorp/gokrb5/keytab
|
||||
github.com/hashicorp/gokrb5/krberror
|
||||
github.com/hashicorp/gokrb5/messages
|
||||
github.com/hashicorp/gokrb5/pac
|
||||
github.com/hashicorp/gokrb5/service
|
||||
github.com/hashicorp/gokrb5/spnego
|
||||
github.com/hashicorp/gokrb5/types
|
||||
github.com/hashicorp/gokrb5/iana/addrtype
|
||||
# github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/hashicorp/golang-lru
|
||||
github.com/hashicorp/golang-lru/simplelru
|
||||
# github.com/hashicorp/hcl v1.0.0
|
||||
github.com/hashicorp/hcl
|
||||
github.com/hashicorp/hcl/hcl/ast
|
||||
github.com/hashicorp/hcl/hcl/parser
|
||||
github.com/hashicorp/hcl/hcl/printer
|
||||
github.com/hashicorp/hcl/hcl/scanner
|
||||
github.com/hashicorp/hcl/hcl/strconv
|
||||
github.com/hashicorp/hcl/hcl/parser
|
||||
github.com/hashicorp/hcl/hcl/token
|
||||
github.com/hashicorp/hcl/json/parser
|
||||
github.com/hashicorp/hcl/hcl/scanner
|
||||
github.com/hashicorp/hcl/hcl/strconv
|
||||
github.com/hashicorp/hcl/json/scanner
|
||||
github.com/hashicorp/hcl/json/token
|
||||
# github.com/hashicorp/logutils v1.0.0
|
||||
github.com/hashicorp/logutils
|
||||
# github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf
|
||||
# github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d
|
||||
github.com/hashicorp/nomad/api
|
||||
github.com/hashicorp/nomad/api/contexts
|
||||
# github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17
|
||||
|
@ -406,11 +441,11 @@ github.com/hashicorp/vault-plugin-auth-azure
|
|||
github.com/hashicorp/vault-plugin-auth-centrify
|
||||
# github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee
|
||||
github.com/hashicorp/vault-plugin-auth-cf
|
||||
github.com/hashicorp/vault-plugin-auth-cf/models
|
||||
github.com/hashicorp/vault-plugin-auth-cf/signatures
|
||||
github.com/hashicorp/vault-plugin-auth-cf/models
|
||||
github.com/hashicorp/vault-plugin-auth-cf/util
|
||||
github.com/hashicorp/vault-plugin-auth-cf/testing/certificates
|
||||
github.com/hashicorp/vault-plugin-auth-cf/testing/cf
|
||||
github.com/hashicorp/vault-plugin-auth-cf/util
|
||||
# github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190930204802-acfd134850c2
|
||||
github.com/hashicorp/vault-plugin-auth-gcp/plugin
|
||||
github.com/hashicorp/vault-plugin-auth-gcp/plugin/cache
|
||||
|
@ -443,6 +478,7 @@ github.com/hashicorp/vault-plugin-secrets-gcpkms
|
|||
github.com/hashicorp/vault-plugin-secrets-kv
|
||||
# github.com/hashicorp/vault/api v1.0.5-0.20200111014044-ba76c080ad1f => ./api
|
||||
github.com/hashicorp/vault/api
|
||||
<<<<<<< HEAD
|
||||
# github.com/hashicorp/vault/sdk v0.1.14-0.20200111013952-157e805b97be => ./sdk
|
||||
github.com/hashicorp/vault/sdk/database/dbplugin
|
||||
github.com/hashicorp/vault/sdk/database/helper/connutil
|
||||
|
@ -452,44 +488,65 @@ github.com/hashicorp/vault/sdk/framework
|
|||
github.com/hashicorp/vault/sdk/helper/awsutil
|
||||
github.com/hashicorp/vault/sdk/helper/base62
|
||||
github.com/hashicorp/vault/sdk/helper/certutil
|
||||
=======
|
||||
# github.com/hashicorp/vault/sdk v0.1.14-0.20191218213202-9caafff72a1f => ./sdk
|
||||
github.com/hashicorp/vault/sdk/helper/salt
|
||||
github.com/hashicorp/vault/sdk/helper/strutil
|
||||
github.com/hashicorp/vault/sdk/helper/wrapping
|
||||
github.com/hashicorp/vault/sdk/logical
|
||||
github.com/hashicorp/vault/sdk/helper/parseutil
|
||||
github.com/hashicorp/vault/sdk/framework
|
||||
github.com/hashicorp/vault/sdk/helper/policyutil
|
||||
github.com/hashicorp/vault/sdk/plugin
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
github.com/hashicorp/vault/sdk/helper/cidrutil
|
||||
github.com/hashicorp/vault/sdk/helper/compressutil
|
||||
github.com/hashicorp/vault/sdk/helper/consts
|
||||
github.com/hashicorp/vault/sdk/helper/cryptoutil
|
||||
github.com/hashicorp/vault/sdk/helper/locksutil
|
||||
github.com/hashicorp/vault/sdk/helper/tokenutil
|
||||
github.com/hashicorp/vault/sdk/helper/jsonutil
|
||||
github.com/hashicorp/vault/sdk/helper/certutil
|
||||
github.com/hashicorp/vault/sdk/helper/password
|
||||
github.com/hashicorp/vault/sdk/helper/ldaputil
|
||||
github.com/hashicorp/vault/sdk/helper/tlsutil
|
||||
github.com/hashicorp/vault/sdk/database/dbplugin
|
||||
github.com/hashicorp/vault/sdk/database/helper/dbutil
|
||||
github.com/hashicorp/vault/sdk/queue
|
||||
github.com/hashicorp/vault/sdk/helper/dbtxn
|
||||
github.com/hashicorp/vault/sdk/helper/errutil
|
||||
<<<<<<< HEAD
|
||||
github.com/hashicorp/vault/sdk/helper/hclutil
|
||||
github.com/hashicorp/vault/sdk/helper/identitytpl
|
||||
github.com/hashicorp/vault/sdk/helper/jsonutil
|
||||
github.com/hashicorp/vault/sdk/helper/kdf
|
||||
=======
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
github.com/hashicorp/vault/sdk/helper/keysutil
|
||||
github.com/hashicorp/vault/sdk/helper/ldaputil
|
||||
github.com/hashicorp/vault/sdk/helper/license
|
||||
github.com/hashicorp/vault/sdk/helper/locksutil
|
||||
github.com/hashicorp/vault/sdk/helper/base62
|
||||
github.com/hashicorp/vault/sdk/helper/logging
|
||||
github.com/hashicorp/vault/sdk/helper/mlock
|
||||
github.com/hashicorp/vault/sdk/helper/parseutil
|
||||
github.com/hashicorp/vault/sdk/helper/password
|
||||
github.com/hashicorp/vault/sdk/helper/pathmanager
|
||||
github.com/hashicorp/vault/sdk/helper/pluginutil
|
||||
github.com/hashicorp/vault/sdk/helper/pointerutil
|
||||
github.com/hashicorp/vault/sdk/helper/policyutil
|
||||
github.com/hashicorp/vault/sdk/helper/salt
|
||||
github.com/hashicorp/vault/sdk/helper/strutil
|
||||
github.com/hashicorp/vault/sdk/helper/tlsutil
|
||||
github.com/hashicorp/vault/sdk/helper/tokenutil
|
||||
github.com/hashicorp/vault/sdk/helper/useragent
|
||||
github.com/hashicorp/vault/sdk/helper/wrapping
|
||||
github.com/hashicorp/vault/sdk/logical
|
||||
github.com/hashicorp/vault/sdk/physical
|
||||
github.com/hashicorp/vault/sdk/physical/file
|
||||
github.com/hashicorp/vault/sdk/physical/inmem
|
||||
github.com/hashicorp/vault/sdk/plugin
|
||||
github.com/hashicorp/vault/sdk/plugin/mock
|
||||
github.com/hashicorp/vault/sdk/plugin/pb
|
||||
github.com/hashicorp/vault/sdk/queue
|
||||
github.com/hashicorp/vault/sdk/version
|
||||
<<<<<<< HEAD
|
||||
# github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb
|
||||
=======
|
||||
github.com/hashicorp/vault/sdk/helper/cryptoutil
|
||||
github.com/hashicorp/vault/sdk/helper/pointerutil
|
||||
github.com/hashicorp/vault/sdk/helper/hclutil
|
||||
github.com/hashicorp/vault/sdk/database/helper/credsutil
|
||||
github.com/hashicorp/vault/sdk/helper/compressutil
|
||||
github.com/hashicorp/vault/sdk/helper/pathmanager
|
||||
github.com/hashicorp/vault/sdk/plugin/pb
|
||||
github.com/hashicorp/vault/sdk/database/helper/connutil
|
||||
github.com/hashicorp/vault/sdk/helper/license
|
||||
github.com/hashicorp/vault/sdk/helper/pluginutil
|
||||
github.com/hashicorp/vault/sdk/helper/entropy
|
||||
github.com/hashicorp/vault/sdk/helper/kdf
|
||||
github.com/hashicorp/vault/sdk/plugin/mock
|
||||
# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
github.com/hashicorp/yamux
|
||||
# github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4
|
||||
github.com/influxdata/influxdb/client/v2
|
||||
|
@ -497,11 +554,11 @@ github.com/influxdata/influxdb/models
|
|||
github.com/influxdata/influxdb/pkg/escape
|
||||
# github.com/jackc/pgx v3.3.0+incompatible
|
||||
github.com/jackc/pgx
|
||||
github.com/jackc/pgx/chunkreader
|
||||
github.com/jackc/pgx/internal/sanitize
|
||||
github.com/jackc/pgx/pgio
|
||||
github.com/jackc/pgx/pgproto3
|
||||
github.com/jackc/pgx/pgtype
|
||||
github.com/jackc/pgx/chunkreader
|
||||
# github.com/jcmturner/gofork v1.0.0
|
||||
github.com/jcmturner/gofork/encoding/asn1
|
||||
github.com/jcmturner/gofork/x/crypto/pbkdf2
|
||||
|
@ -516,25 +573,25 @@ github.com/jmespath/go-jmespath
|
|||
# github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869
|
||||
github.com/joyent/triton-go
|
||||
github.com/joyent/triton-go/authentication
|
||||
github.com/joyent/triton-go/client
|
||||
github.com/joyent/triton-go/errors
|
||||
github.com/joyent/triton-go/storage
|
||||
github.com/joyent/triton-go/client
|
||||
# github.com/json-iterator/go v1.1.6
|
||||
github.com/json-iterator/go
|
||||
# github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f
|
||||
github.com/keybase/go-crypto/openpgp
|
||||
github.com/keybase/go-crypto/openpgp/packet
|
||||
github.com/keybase/go-crypto/openpgp/armor
|
||||
github.com/keybase/go-crypto/openpgp/errors
|
||||
github.com/keybase/go-crypto/openpgp/s2k
|
||||
github.com/keybase/go-crypto/rsa
|
||||
github.com/keybase/go-crypto/brainpool
|
||||
github.com/keybase/go-crypto/cast5
|
||||
github.com/keybase/go-crypto/curve25519
|
||||
github.com/keybase/go-crypto/ed25519
|
||||
github.com/keybase/go-crypto/ed25519/internal/edwards25519
|
||||
github.com/keybase/go-crypto/openpgp
|
||||
github.com/keybase/go-crypto/openpgp/armor
|
||||
github.com/keybase/go-crypto/openpgp/ecdh
|
||||
github.com/keybase/go-crypto/openpgp/elgamal
|
||||
github.com/keybase/go-crypto/openpgp/errors
|
||||
github.com/keybase/go-crypto/openpgp/packet
|
||||
github.com/keybase/go-crypto/openpgp/s2k
|
||||
github.com/keybase/go-crypto/rsa
|
||||
github.com/keybase/go-crypto/ed25519/internal/edwards25519
|
||||
# github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||
github.com/konsorten/go-windows-terminal-sequences
|
||||
# github.com/kr/pretty v0.1.0
|
||||
|
@ -586,15 +643,15 @@ github.com/oklog/run
|
|||
# github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/go-digest
|
||||
# github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
# github.com/opencontainers/runc v0.1.1
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
# github.com/oracle/oci-go-sdk v12.5.0+incompatible
|
||||
github.com/oracle/oci-go-sdk/common
|
||||
github.com/oracle/oci-go-sdk/common/auth
|
||||
github.com/oracle/oci-go-sdk/keymanagement
|
||||
github.com/oracle/oci-go-sdk/objectstorage
|
||||
github.com/oracle/oci-go-sdk/keymanagement
|
||||
# github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/ory/dockertest
|
||||
github.com/ory/dockertest/docker
|
||||
|
@ -602,23 +659,23 @@ github.com/ory/dockertest/docker/opts
|
|||
github.com/ory/dockertest/docker/pkg/archive
|
||||
github.com/ory/dockertest/docker/pkg/fileutils
|
||||
github.com/ory/dockertest/docker/pkg/homedir
|
||||
github.com/ory/dockertest/docker/pkg/jsonmessage
|
||||
github.com/ory/dockertest/docker/pkg/stdcopy
|
||||
github.com/ory/dockertest/docker/types/registry
|
||||
github.com/ory/dockertest/docker/types
|
||||
github.com/ory/dockertest/docker/pkg/idtools
|
||||
github.com/ory/dockertest/docker/pkg/ioutils
|
||||
github.com/ory/dockertest/docker/pkg/jsonmessage
|
||||
github.com/ory/dockertest/docker/pkg/longpath
|
||||
github.com/ory/dockertest/docker/pkg/mount
|
||||
github.com/ory/dockertest/docker/pkg/pools
|
||||
github.com/ory/dockertest/docker/pkg/stdcopy
|
||||
github.com/ory/dockertest/docker/pkg/system
|
||||
github.com/ory/dockertest/docker/pkg/term
|
||||
github.com/ory/dockertest/docker/pkg/term/windows
|
||||
github.com/ory/dockertest/docker/types
|
||||
github.com/ory/dockertest/docker/types/blkiodev
|
||||
github.com/ory/dockertest/docker/types/container
|
||||
github.com/ory/dockertest/docker/types/filters
|
||||
github.com/ory/dockertest/docker/types/mount
|
||||
github.com/ory/dockertest/docker/types/network
|
||||
github.com/ory/dockertest/docker/types/registry
|
||||
github.com/ory/dockertest/docker/pkg/mount
|
||||
github.com/ory/dockertest/docker/pkg/term/windows
|
||||
github.com/ory/dockertest/docker/types/blkiodev
|
||||
github.com/ory/dockertest/docker/types/strslice
|
||||
github.com/ory/dockertest/docker/types/versions
|
||||
# github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
|
@ -632,31 +689,31 @@ github.com/pkg/errors
|
|||
github.com/pmezard/go-difflib/difflib
|
||||
# github.com/posener/complete v1.2.1
|
||||
github.com/posener/complete
|
||||
github.com/posener/complete/cmd
|
||||
github.com/posener/complete/cmd/install
|
||||
github.com/posener/complete/cmd
|
||||
github.com/posener/complete/match
|
||||
# github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35
|
||||
github.com/pquerna/cachecontrol
|
||||
github.com/pquerna/cachecontrol/cacheobject
|
||||
# github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d
|
||||
github.com/pquerna/otp
|
||||
github.com/pquerna/otp/hotp
|
||||
github.com/pquerna/otp/totp
|
||||
github.com/pquerna/otp/hotp
|
||||
# github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
github.com/prometheus/client_golang/prometheus/internal
|
||||
github.com/prometheus/client_golang/prometheus/push
|
||||
github.com/prometheus/client_golang/prometheus/internal
|
||||
# github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.2.0
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||
github.com/prometheus/common/model
|
||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||
# github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
github.com/prometheus/procfs/nfs
|
||||
github.com/prometheus/procfs/xfs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
# github.com/ryanuber/columnize v2.1.0+incompatible
|
||||
github.com/ryanuber/columnize
|
||||
# github.com/ryanuber/go-glob v1.0.0
|
||||
|
@ -669,10 +726,10 @@ github.com/satori/go.uuid
|
|||
github.com/shirou/gopsutil/cpu
|
||||
github.com/shirou/gopsutil/disk
|
||||
github.com/shirou/gopsutil/host
|
||||
github.com/shirou/gopsutil/internal/common
|
||||
github.com/shirou/gopsutil/mem
|
||||
github.com/shirou/gopsutil/net
|
||||
github.com/shirou/gopsutil/internal/common
|
||||
github.com/shirou/gopsutil/process
|
||||
github.com/shirou/gopsutil/net
|
||||
# github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4
|
||||
github.com/shirou/w32
|
||||
# github.com/sirupsen/logrus v1.4.2
|
||||
|
@ -686,118 +743,135 @@ github.com/tv42/httpunix
|
|||
github.com/ugorji/go/codec
|
||||
# github.com/ulikunitz/xz v0.5.6
|
||||
github.com/ulikunitz/xz
|
||||
github.com/ulikunitz/xz/internal/hash
|
||||
github.com/ulikunitz/xz/internal/xlog
|
||||
github.com/ulikunitz/xz/lzma
|
||||
github.com/ulikunitz/xz/internal/hash
|
||||
# github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8
|
||||
github.com/xi2/xz
|
||||
# go.etcd.io/bbolt v1.3.2
|
||||
go.etcd.io/bbolt
|
||||
# go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971
|
||||
go.etcd.io/etcd/auth/authpb
|
||||
go.etcd.io/etcd/client
|
||||
go.etcd.io/etcd/clientv3
|
||||
go.etcd.io/etcd/clientv3/concurrency
|
||||
go.etcd.io/etcd/pkg/transport
|
||||
go.etcd.io/etcd/pkg/pathutil
|
||||
go.etcd.io/etcd/pkg/srv
|
||||
go.etcd.io/etcd/pkg/types
|
||||
go.etcd.io/etcd/version
|
||||
go.etcd.io/etcd/auth/authpb
|
||||
go.etcd.io/etcd/clientv3/balancer
|
||||
go.etcd.io/etcd/clientv3/balancer/picker
|
||||
go.etcd.io/etcd/clientv3/balancer/resolver/endpoint
|
||||
go.etcd.io/etcd/clientv3/concurrency
|
||||
go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
|
||||
go.etcd.io/etcd/etcdserver/etcdserverpb
|
||||
go.etcd.io/etcd/mvcc/mvccpb
|
||||
go.etcd.io/etcd/pkg/logutil
|
||||
go.etcd.io/etcd/pkg/pathutil
|
||||
go.etcd.io/etcd/pkg/srv
|
||||
go.etcd.io/etcd/pkg/systemd
|
||||
go.etcd.io/etcd/pkg/tlsutil
|
||||
go.etcd.io/etcd/pkg/transport
|
||||
go.etcd.io/etcd/pkg/types
|
||||
go.etcd.io/etcd/pkg/systemd
|
||||
go.etcd.io/etcd/raft
|
||||
go.etcd.io/etcd/raft/raftpb
|
||||
go.etcd.io/etcd/version
|
||||
# go.opencensus.io v0.21.0
|
||||
go.opencensus.io
|
||||
go.opencensus.io/internal
|
||||
go.opencensus.io/internal/tagencoding
|
||||
go.opencensus.io/metric/metricdata
|
||||
go.opencensus.io/metric/metricproducer
|
||||
go.opencensus.io/plugin/ocgrpc
|
||||
go.opencensus.io/stats
|
||||
go.opencensus.io/stats/view
|
||||
go.opencensus.io/plugin/ochttp
|
||||
<<<<<<< HEAD
|
||||
go.opencensus.io/plugin/ochttp/propagation/b3
|
||||
go.opencensus.io/resource
|
||||
go.opencensus.io/stats
|
||||
go.opencensus.io/stats/internal
|
||||
go.opencensus.io/stats/view
|
||||
go.opencensus.io/tag
|
||||
=======
|
||||
go.opencensus.io/plugin/ochttp/propagation/tracecontext
|
||||
go.opencensus.io/trace
|
||||
go.opencensus.io/trace/internal
|
||||
go.opencensus.io/metric/metricdata
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
go.opencensus.io/stats/internal
|
||||
go.opencensus.io/tag
|
||||
go.opencensus.io/internal/tagencoding
|
||||
go.opencensus.io/metric/metricproducer
|
||||
go.opencensus.io/plugin/ocgrpc
|
||||
go.opencensus.io/plugin/ochttp/propagation/b3
|
||||
go.opencensus.io/trace/propagation
|
||||
go.opencensus.io
|
||||
go.opencensus.io/resource
|
||||
go.opencensus.io/trace/tracestate
|
||||
go.opencensus.io/internal
|
||||
go.opencensus.io/trace/internal
|
||||
# go.uber.org/atomic v1.4.0
|
||||
go.uber.org/atomic
|
||||
# go.uber.org/multierr v1.1.0
|
||||
go.uber.org/multierr
|
||||
# go.uber.org/zap v1.9.1
|
||||
go.uber.org/zap
|
||||
go.uber.org/zap/buffer
|
||||
go.uber.org/zap/zapcore
|
||||
go.uber.org/zap/internal/bufferpool
|
||||
go.uber.org/zap/buffer
|
||||
go.uber.org/zap/internal/color
|
||||
go.uber.org/zap/internal/exit
|
||||
go.uber.org/zap/zapcore
|
||||
# golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4
|
||||
golang.org/x/crypto/bcrypt
|
||||
golang.org/x/crypto/blake2b
|
||||
golang.org/x/crypto/blowfish
|
||||
golang.org/x/crypto/chacha20poly1305
|
||||
golang.org/x/crypto/cryptobyte
|
||||
golang.org/x/crypto/cryptobyte/asn1
|
||||
golang.org/x/crypto/curve25519
|
||||
golang.org/x/crypto/ed25519
|
||||
golang.org/x/crypto/ed25519/internal/edwards25519
|
||||
golang.org/x/crypto/hkdf
|
||||
golang.org/x/crypto/internal/chacha20
|
||||
golang.org/x/crypto/internal/subtle
|
||||
golang.org/x/crypto/md4
|
||||
golang.org/x/crypto/pbkdf2
|
||||
golang.org/x/crypto/pkcs12
|
||||
golang.org/x/crypto/pkcs12/internal/rc2
|
||||
golang.org/x/crypto/poly1305
|
||||
golang.org/x/crypto/ssh
|
||||
golang.org/x/crypto/ssh/agent
|
||||
golang.org/x/crypto/curve25519
|
||||
golang.org/x/crypto/ssh/terminal
|
||||
golang.org/x/crypto/blowfish
|
||||
golang.org/x/crypto/md4
|
||||
golang.org/x/crypto/ed25519/internal/edwards25519
|
||||
golang.org/x/crypto/internal/chacha20
|
||||
golang.org/x/crypto/poly1305
|
||||
golang.org/x/crypto/chacha20poly1305
|
||||
golang.org/x/crypto/hkdf
|
||||
golang.org/x/crypto/pbkdf2
|
||||
golang.org/x/crypto/blake2b
|
||||
golang.org/x/crypto/pkcs12
|
||||
golang.org/x/crypto/internal/subtle
|
||||
golang.org/x/crypto/pkcs12/internal/rc2
|
||||
# golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/context/ctxhttp
|
||||
golang.org/x/net/http/httpguts
|
||||
golang.org/x/net/http/httpproxy
|
||||
golang.org/x/net/http2
|
||||
golang.org/x/net/http2/hpack
|
||||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/http/httpproxy
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/http2
|
||||
golang.org/x/net/http/httpguts
|
||||
golang.org/x/net/http2/hpack
|
||||
golang.org/x/net/trace
|
||||
<<<<<<< HEAD
|
||||
# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
=======
|
||||
golang.org/x/net/context/ctxhttp
|
||||
golang.org/x/net/internal/timeseries
|
||||
# golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/clientcredentials
|
||||
golang.org/x/oauth2/google
|
||||
golang.org/x/oauth2/internal
|
||||
golang.org/x/oauth2/jws
|
||||
golang.org/x/oauth2/google
|
||||
golang.org/x/oauth2/jwt
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
golang.org/x/oauth2/jws
|
||||
golang.org/x/oauth2/clientcredentials
|
||||
# golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sync/semaphore
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
# golang.org/x/sys v0.0.0-20191008105621-543471e840be
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
golang.org/x/sys/cpu
|
||||
# golang.org/x/text v0.3.2
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/encoding/unicode
|
||||
golang.org/x/text/encoding
|
||||
golang.org/x/text/encoding/internal
|
||||
golang.org/x/text/encoding/internal/identifier
|
||||
golang.org/x/text/encoding/unicode
|
||||
golang.org/x/text/internal/utf8internal
|
||||
golang.org/x/text/runes
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
golang.org/x/time/rate
|
||||
<<<<<<< HEAD
|
||||
# google.golang.org/api v0.14.0
|
||||
google.golang.org/api/cloudresourcemanager/v1
|
||||
google.golang.org/api/compute/v1
|
||||
|
@ -808,53 +882,86 @@ google.golang.org/api/internal
|
|||
google.golang.org/api/internal/gensupport
|
||||
google.golang.org/api/internal/third_party/uritemplates
|
||||
google.golang.org/api/iterator
|
||||
google.golang.org/api/oauth2/v2
|
||||
=======
|
||||
# google.golang.org/api v0.5.0
|
||||
google.golang.org/api/option
|
||||
google.golang.org/api/storage/v1
|
||||
google.golang.org/api/iam/v1
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/iterator
|
||||
google.golang.org/api/transport
|
||||
google.golang.org/api/transport/grpc
|
||||
google.golang.org/api/cloudresourcemanager/v1
|
||||
google.golang.org/api/compute/v1
|
||||
google.golang.org/api/internal
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
google.golang.org/api/oauth2/v2
|
||||
google.golang.org/api/gensupport
|
||||
google.golang.org/api/transport/http
|
||||
google.golang.org/api/storage/v1
|
||||
<<<<<<< HEAD
|
||||
google.golang.org/api/transport
|
||||
=======
|
||||
google.golang.org/api/googleapi/internal/uritemplates
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
google.golang.org/api/transport/grpc
|
||||
google.golang.org/api/googleapi/transport
|
||||
google.golang.org/api/transport/http/internal/propagation
|
||||
google.golang.org/api/support/bundler
|
||||
# google.golang.org/appengine v1.6.0
|
||||
google.golang.org/appengine
|
||||
google.golang.org/appengine/cloudsql
|
||||
google.golang.org/appengine/urlfetch
|
||||
google.golang.org/appengine
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/internal
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/internal/app_identity
|
||||
google.golang.org/appengine/internal/modules
|
||||
google.golang.org/appengine/internal/socket
|
||||
google.golang.org/appengine/internal/base
|
||||
google.golang.org/appengine/internal/datastore
|
||||
google.golang.org/appengine/internal/log
|
||||
google.golang.org/appengine/internal/modules
|
||||
google.golang.org/appengine/internal/remote_api
|
||||
google.golang.org/appengine/internal/socket
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64
|
||||
<<<<<<< HEAD
|
||||
google.golang.org/genproto/googleapis/api
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/api/distribution
|
||||
google.golang.org/genproto/googleapis/api/label
|
||||
=======
|
||||
google.golang.org/genproto/googleapis/cloud/kms/v1
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
google.golang.org/genproto/googleapis/api/metric
|
||||
google.golang.org/genproto/googleapis/api/monitoredres
|
||||
google.golang.org/genproto/googleapis/cloud/kms/v1
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
google.golang.org/genproto/googleapis/monitoring/v3
|
||||
google.golang.org/genproto/googleapis/rpc/code
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
google.golang.org/genproto/googleapis/spanner/v1
|
||||
google.golang.org/genproto/googleapis/type/expr
|
||||
google.golang.org/genproto/googleapis/api/distribution
|
||||
google.golang.org/genproto/protobuf/field_mask
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/spanner/v1
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/api
|
||||
google.golang.org/genproto/googleapis/api/label
|
||||
google.golang.org/genproto/googleapis/rpc/code
|
||||
google.golang.org/genproto/googleapis/type/expr
|
||||
google.golang.org/genproto/googleapis/api/httpbody
|
||||
# google.golang.org/grpc v1.22.0
|
||||
google.golang.org/grpc/grpclog
|
||||
google.golang.org/grpc/codes
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/keepalive
|
||||
google.golang.org/grpc/status
|
||||
google.golang.org/grpc/metadata
|
||||
google.golang.org/grpc/credentials
|
||||
google.golang.org/grpc/balancer
|
||||
<<<<<<< HEAD
|
||||
google.golang.org/grpc/balancer/base
|
||||
google.golang.org/grpc/balancer/grpclb
|
||||
google.golang.org/grpc/balancer/grpclb/grpc_lb_v1
|
||||
=======
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
google.golang.org/grpc/balancer/roundrobin
|
||||
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
||||
google.golang.org/grpc/codes
|
||||
google.golang.org/grpc/connectivity
|
||||
<<<<<<< HEAD
|
||||
google.golang.org/grpc/credentials
|
||||
google.golang.org/grpc/credentials/alts
|
||||
google.golang.org/grpc/credentials/alts/internal
|
||||
|
@ -866,11 +973,10 @@ google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp
|
|||
google.golang.org/grpc/credentials/google
|
||||
google.golang.org/grpc/credentials/internal
|
||||
google.golang.org/grpc/credentials/oauth
|
||||
=======
|
||||
>>>>>>> Add TLS options per Nomad backend
|
||||
google.golang.org/grpc/encoding
|
||||
google.golang.org/grpc/encoding/proto
|
||||
google.golang.org/grpc/grpclog
|
||||
google.golang.org/grpc/health
|
||||
google.golang.org/grpc/health/grpc_health_v1
|
||||
google.golang.org/grpc/internal
|
||||
google.golang.org/grpc/internal/backoff
|
||||
google.golang.org/grpc/internal/balancerload
|
||||
|
@ -879,10 +985,7 @@ google.golang.org/grpc/internal/channelz
|
|||
google.golang.org/grpc/internal/envconfig
|
||||
google.golang.org/grpc/internal/grpcrand
|
||||
google.golang.org/grpc/internal/grpcsync
|
||||
google.golang.org/grpc/internal/syscall
|
||||
google.golang.org/grpc/internal/transport
|
||||
google.golang.org/grpc/keepalive
|
||||
google.golang.org/grpc/metadata
|
||||
google.golang.org/grpc/naming
|
||||
google.golang.org/grpc/peer
|
||||
google.golang.org/grpc/resolver
|
||||
|
@ -890,8 +993,14 @@ google.golang.org/grpc/resolver/dns
|
|||
google.golang.org/grpc/resolver/passthrough
|
||||
google.golang.org/grpc/serviceconfig
|
||||
google.golang.org/grpc/stats
|
||||
google.golang.org/grpc/status
|
||||
google.golang.org/grpc/tap
|
||||
google.golang.org/grpc/health
|
||||
google.golang.org/grpc/health/grpc_health_v1
|
||||
google.golang.org/grpc/credentials/internal
|
||||
google.golang.org/grpc/credentials/oauth
|
||||
google.golang.org/grpc/balancer/base
|
||||
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
||||
google.golang.org/grpc/internal/syscall
|
||||
# gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/inf.v0
|
||||
# gopkg.in/ini.v1 v1.42.0
|
||||
|
@ -908,43 +1017,43 @@ gopkg.in/jcmturner/rpc.v1/ndr
|
|||
# gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
|
||||
gopkg.in/mgo.v2
|
||||
gopkg.in/mgo.v2/bson
|
||||
gopkg.in/mgo.v2/internal/json
|
||||
gopkg.in/mgo.v2/internal/sasl
|
||||
gopkg.in/mgo.v2/internal/scram
|
||||
gopkg.in/mgo.v2/internal/json
|
||||
# gopkg.in/ory-am/dockertest.v3 v3.3.4
|
||||
gopkg.in/ory-am/dockertest.v3
|
||||
# gopkg.in/square/go-jose.v2 v2.3.1
|
||||
gopkg.in/square/go-jose.v2
|
||||
gopkg.in/square/go-jose.v2/cipher
|
||||
gopkg.in/square/go-jose.v2/json
|
||||
gopkg.in/square/go-jose.v2/jwt
|
||||
gopkg.in/square/go-jose.v2
|
||||
gopkg.in/square/go-jose.v2/json
|
||||
gopkg.in/square/go-jose.v2/cipher
|
||||
# gopkg.in/yaml.v2 v2.2.2
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.0.0-20190409092523-d687e77c8ae9
|
||||
k8s.io/api/authentication/v1
|
||||
# k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
k8s.io/apimachinery/pkg/conversion
|
||||
k8s.io/apimachinery/pkg/conversion/queryparams
|
||||
k8s.io/apimachinery/pkg/fields
|
||||
k8s.io/apimachinery/pkg/labels
|
||||
k8s.io/apimachinery/pkg/runtime
|
||||
k8s.io/apimachinery/pkg/runtime/schema
|
||||
k8s.io/apimachinery/pkg/selection
|
||||
k8s.io/apimachinery/pkg/types
|
||||
k8s.io/apimachinery/pkg/util/errors
|
||||
k8s.io/apimachinery/pkg/util/validation/field
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
k8s.io/apimachinery/pkg/conversion
|
||||
k8s.io/apimachinery/pkg/fields
|
||||
k8s.io/apimachinery/pkg/labels
|
||||
k8s.io/apimachinery/pkg/selection
|
||||
k8s.io/apimachinery/pkg/util/intstr
|
||||
k8s.io/apimachinery/pkg/util/runtime
|
||||
k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/pkg/conversion/queryparams
|
||||
k8s.io/apimachinery/pkg/util/errors
|
||||
k8s.io/apimachinery/pkg/util/json
|
||||
k8s.io/apimachinery/pkg/util/naming
|
||||
k8s.io/apimachinery/pkg/util/net
|
||||
k8s.io/apimachinery/pkg/util/runtime
|
||||
k8s.io/apimachinery/pkg/util/sets
|
||||
k8s.io/apimachinery/pkg/util/validation
|
||||
k8s.io/apimachinery/pkg/util/validation/field
|
||||
k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
k8s.io/apimachinery/pkg/util/validation
|
||||
k8s.io/apimachinery/pkg/util/net
|
||||
# k8s.io/klog v0.0.0-20190306015804-8e90cee79f82
|
||||
k8s.io/klog
|
||||
# layeh.com/radius v0.0.0-20190322222518-890bc1058917
|
||||
|
|
|
@ -45,6 +45,15 @@ Nomad tokens.
|
|||
0.8.3 and earlier, the default is `64`. For Nomad version 0.8.4 and later, the default is
|
||||
`256`.
|
||||
|
||||
- `ca_cert` `(string: "")` - CA certificate to use when verifying Nomad server certificate,
|
||||
must be x509 PEM encoded.
|
||||
|
||||
- `client_cert` `(string: "")` - Client certificate used for Nomad's TLS communication,
|
||||
must be x509 PEM encoded and if this is set you need to also set `client_key`.
|
||||
|
||||
- `client_key` `(string: "")` - Client key used for Nomad's TLS communication,
|
||||
must be x509 PEM encoded and if this is set you need to also set `client_cert`.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```json
|
||||
|
|
Loading…
Reference in New Issue