Add couchbase, elasticsearch, and mongodbatlas back (#10222)

Updated the `Serve` function so these can be added back into Vault
This commit is contained in:
Michael Golowka 2020-10-22 17:20:17 -06:00 committed by GitHub
parent e6c8ee24ea
commit bd79fbafb3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
256 changed files with 48577 additions and 66 deletions

View File

@ -10,9 +10,8 @@ import (
"testing"
"time"
// mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
"github.com/go-test/deep"
mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
"github.com/hashicorp/vault/helper/namespace"
postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql"
vaulthttp "github.com/hashicorp/vault/http"
@ -49,7 +48,7 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Postgres", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Mongo", []string{}, "")
// vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoAtlas", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoAtlas", []string{}, "")
return cluster, sys
}
@ -80,27 +79,18 @@ func TestBackend_PluginMain_Mongo(t *testing.T) {
v5.Serve(dbType.(v5.Database))
}
// func TestBackend_PluginMain_MongoAtlas(t *testing.T) {
// if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
// return
// }
//
// caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
// if caPEM == "" {
// t.Fatal("CA cert not passed in")
// }
//
// args := []string{"--ca-cert=" + caPEM}
//
// apiClientMeta := &api.PluginAPIClientMeta{}
// flags := apiClientMeta.FlagSet()
// flags.Parse(args)
//
// err := mongodbatlas.Run(apiClientMeta.GetTLSConfig())
// if err != nil {
// t.Fatal(err)
// }
// }
func TestBackend_PluginMain_MongoAtlas(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
return
}
dbType, err := mongodbatlas.New()
if err != nil {
t.Fatalf("Failed to initialize mongodbatlas: %s", err)
}
v5.Serve(dbType.(v5.Database))
}
func TestBackend_RoleUpgrade(t *testing.T) {

View File

@ -353,8 +353,8 @@ func TestPredict_Plugins(t *testing.T) {
"cert",
"cf",
"consul",
// "couchbase-database-plugin",
// "elasticsearch-database-plugin",
"couchbase-database-plugin",
"elasticsearch-database-plugin",
"gcp",
"gcpkms",
"github",
@ -369,7 +369,7 @@ func TestPredict_Plugins(t *testing.T) {
"mongodb",
"mongodb-database-plugin",
"mongodbatlas",
// "mongodbatlas-database-plugin",
"mongodbatlas-database-plugin",
"mssql",
"mssql-database-plugin",
"mysql",

9
go.mod
View File

@ -29,7 +29,6 @@ require (
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0
github.com/client9/misspell v0.3.4
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe // indirect
github.com/coreos/go-semver v0.2.0
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc
github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible
@ -45,7 +44,7 @@ require (
github.com/go-ldap/ldap/v3 v3.1.10
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/go-sql-driver/mysql v1.5.0
github.com/go-test/deep v1.0.3
github.com/go-test/deep v1.0.7
github.com/gocql/gocql v0.0.0-20200624222514-34081eda590e
github.com/golang/protobuf v1.4.2
github.com/google/go-github v17.0.0+incompatible
@ -68,7 +67,6 @@ require (
github.com/hashicorp/go-sockaddr v1.0.2
github.com/hashicorp/go-syslog v1.0.0
github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/go-version v1.2.1 // indirect
github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d
@ -83,6 +81,9 @@ require (
github.com/hashicorp/vault-plugin-auth-kerberos v0.1.6
github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.1-0.20200921171209-a8c355e565cb
github.com/hashicorp/vault-plugin-auth-oci v0.5.5
github.com/hashicorp/vault-plugin-database-couchbase v0.1.1-0.20201022222321-52b89dc4ff04
github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.6-0.20201022222154-56454997629c
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.3-0.20201022222205-830fcbc5fdd2
github.com/hashicorp/vault-plugin-mock v0.16.1
github.com/hashicorp/vault-plugin-secrets-ad v0.7.1-0.20201009192637-c613b2a27345
github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5
@ -93,7 +94,7 @@ require (
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.5
github.com/hashicorp/vault/api v1.0.5-0.20201001211907-38d91b749c77
github.com/hashicorp/vault/sdk v0.1.14-0.20201015192012-a69ee0f65a28
github.com/hashicorp/vault/sdk v0.1.14-0.20201022214319-d87657199d4b
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4
github.com/jcmturner/gokrb5/v8 v8.0.0
github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f

20
go.sum
View File

@ -249,6 +249,10 @@ github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQa
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/couchbase/gocb/v2 v2.1.4 h1:HRuVhqZpVNIck3FwzTxWh5TnmGXeTmSfjhxkjeradLg=
github.com/couchbase/gocb/v2 v2.1.4/go.mod h1:lESKM6wCEajrFVSZUewYuRzNtuNtnRey5wOfcZZsH90=
github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8=
github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@ -360,8 +364,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M=
github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@ -562,6 +566,7 @@ github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER
github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo=
github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@ -623,6 +628,12 @@ github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.1-0.20200921171209-a8c355
github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.1-0.20200921171209-a8c355e565cb/go.mod h1:2c/k3nsoGPKV+zpAWCiajt4e66vncEq8Li/eKLqErAc=
github.com/hashicorp/vault-plugin-auth-oci v0.5.5 h1:nIP8g+VZd2V+LY/D5omWhLSnhHuogIJx7Bz6JyLt628=
github.com/hashicorp/vault-plugin-auth-oci v0.5.5/go.mod h1:Cn5cjR279Y+snw8LTaiLTko3KGrbigRbsQPOd2D5xDw=
github.com/hashicorp/vault-plugin-database-couchbase v0.1.1-0.20201022222321-52b89dc4ff04 h1:JyOcql4dEOtWYTAgFiWg66XsT8FpIkl3FXRodUUopXI=
github.com/hashicorp/vault-plugin-database-couchbase v0.1.1-0.20201022222321-52b89dc4ff04/go.mod h1:/746Pabh8/0b/4vEcJWYYVgiCaGgM4ntk1ULuxk9Uuw=
github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.6-0.20201022222154-56454997629c h1:ESVB07rRLx4mk5PA43kXfPFiykQe5UbE0S+BC9ffQqQ=
github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.6-0.20201022222154-56454997629c/go.mod h1:813Nvr1IQqAKdlk3yIY97M5WyxMhWOrXtYioPf9PqJg=
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.3-0.20201022222205-830fcbc5fdd2 h1:IXU1Pf3rmqmrNizUinVBD9zgcbFhSxqFLUaHxn6swNI=
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.3-0.20201022222205-830fcbc5fdd2/go.mod h1:2So20ndRRsDAMDyG52az6nd7NwFOZTQER9EsrgPCgVg=
github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0=
github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM=
github.com/hashicorp/vault-plugin-secrets-ad v0.7.1-0.20201009192637-c613b2a27345 h1:1/kWUsS8mE2GUsNYTC8XjKDzVf+hrL1K5HZw5/tJJ4Q=
@ -879,6 +890,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY=
github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@ -891,6 +904,7 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/oracle/oci-go-sdk v12.5.0+incompatible h1:pr08ECoaDKHWO9tnzJB1YqClEs7ZK1CFOez2DQocH14=
github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso=
@ -1098,6 +1112,8 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM=
go.mongodb.org/atlas v0.5.0 h1:WoeXdXSLCyquhSqSqTa0PwpjxWZbm70E1Gkbx+w3Sco=
go.mongodb.org/atlas v0.5.0/go.mod h1:CIaBeO8GLHhtYLw7xSSXsw7N90Z4MFY87Oy9qcPyuEs=
go.mongodb.org/mongo-driver v1.4.2 h1:WlnEglfTg/PfPq4WXs2Vkl/5ICC6hoG8+r+LraPmGk4=
go.mongodb.org/mongo-driver v1.4.2/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=

View File

@ -11,9 +11,9 @@ import (
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
// dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase"
// dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
// dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase"
dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
credAppId "github.com/hashicorp/vault/builtin/credential/app-id"
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
credAws "github.com/hashicorp/vault/builtin/credential/aws"
@ -99,16 +99,16 @@ func newRegistry() *registry {
"mysql-rds-database-plugin": dbMysql.New(true),
"mysql-legacy-database-plugin": dbMysql.New(true),
"cassandra-database-plugin": dbCass.New,
// "couchbase-database-plugin": dbCouchbase.New,
// "elasticsearch-database-plugin": dbElastic.New,
"hana-database-plugin": dbHana.New,
"influxdb-database-plugin": dbInflux.New,
"mongodb-database-plugin": dbMongo.New,
// "mongodbatlas-database-plugin": dbMongoAtlas.New,
"mssql-database-plugin": dbMssql.New,
"postgresql-database-plugin": dbPostgres.New,
"redshift-database-plugin": dbRedshift.New(true),
"cassandra-database-plugin": dbCass.New,
"couchbase-database-plugin": dbCouchbase.New,
"elasticsearch-database-plugin": dbElastic.New,
"hana-database-plugin": dbHana.New,
"influxdb-database-plugin": dbInflux.New,
"mongodb-database-plugin": dbMongo.New,
"mongodbatlas-database-plugin": dbMongoAtlas.New,
"mssql-database-plugin": dbMssql.New,
"postgresql-database-plugin": dbPostgres.New,
"redshift-database-plugin": dbRedshift.New(true),
},
logicalBackends: map[string]logical.Factory{
"ad": logicalAd.Factory,

View File

@ -2045,12 +2045,12 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
"mysql-legacy-database-plugin",
"cassandra-database-plugin",
// "couchbase-database-plugin",
// "elasticsearch-database-plugin",
"couchbase-database-plugin",
"elasticsearch-database-plugin",
"hana-database-plugin",
"influxdb-database-plugin",
"mongodb-database-plugin",
// "mongodbatlas-database-plugin",
"mongodbatlas-database-plugin",
"mssql-database-plugin",
"postgresql-database-plugin",
"redshift-database-plugin",

3
vendor/github.com/couchbase/gocb/v2/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
*~
.project

3
vendor/github.com/couchbase/gocb/v2/.gitmodules generated vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "testdata/sdk-testcases"]
path = testdata/sdk-testcases
url = https://github.com/couchbaselabs/sdk-testcases

18
vendor/github.com/couchbase/gocb/v2/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
run:
modules-download-mode: readonly
tests: false
skip-files:
- logging.go # Logging has some utility functions that are useful to have around which get flagged up
linters:
enable:
- bodyclose
- golint
- gosec
- unconvert
linters-settings:
golint:
set-exit-status: true
min-confidence: 0.81
errcheck:
check-type-assertions: true
check-blank: true

202
vendor/github.com/couchbase/gocb/v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

39
vendor/github.com/couchbase/gocb/v2/Makefile generated vendored Normal file
View File

@ -0,0 +1,39 @@
devsetup:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
go get github.com/vektra/mockery/.../
git submodule update --remote --init --recursive
test:
go test ./
fasttest:
go test -short ./
cover:
go test -coverprofile=cover.out ./
lint:
golangci-lint run -v
check: lint
go test -short -cover -race ./
bench:
go test -bench=. -run=none --disable-logger=true
updatetestcases:
git submodule update --remote --init --recursive
updatemocks:
mockery -name connectionManager -output . -testonly -inpkg
mockery -name kvProvider -output . -testonly -inpkg
mockery -name httpProvider -output . -testonly -inpkg
mockery -name diagnosticsProvider -output . -testonly -inpkg
mockery -name mgmtProvider -output . -testonly -inpkg
mockery -name analyticsProvider -output . -testonly -inpkg
mockery -name queryProvider -output . -testonly -inpkg
mockery -name searchProvider -output . -testonly -inpkg
mockery -name viewProvider -output . -testonly -inpkg
mockery -name waitUntilReadyProvider -output . -testonly -inpkg
# pendingOp is manually mocked
.PHONY: all test devsetup fasttest lint cover check bench updatetestcases updatemocks

53
vendor/github.com/couchbase/gocb/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,53 @@
[![GoDoc](https://godoc.org/github.com/couchbase/gocb?status.png)](https://godoc.org/github.com/couchbase/gocb)
# Couchbase Go Client
This is the official Couchbase Go SDK. If you are looking for our
previous unofficial prototype Go client library, please see:
[http://www.github.com/couchbase/go-couchbase](http://www.github.com/couchbase/go-couchbase).
The Go SDK library allows you to connect to a Couchbase cluster from
Go. It is written in pure Go, and uses the included gocbcore library to
handle communicating to the cluster over the Couchbase binary
protocol.
## Useful Links
### Source
The project source is hosted at [http://github.com/couchbase/gocb](http://github.com/couchbase/gocb).
### Documentation
You can explore our API reference through godoc at [https://godoc.org/github.com/couchbase/gocb](https://godoc.org/github.com/couchbase/gocb).
You can also find documentation for the Go SDK at the Couchbase [Developer Portal](https://developer.couchbase.com/documentation/server/current/sdk/go/start-using-sdk.html).
### Bug Tracker
Issues are tracked on Couchbase's public [issues.couchbase.com](http://www.couchbase.com/issues/browse/GOCBC).
Contact [the site admins](https://issues.couchbase.com/secure/ContactAdministrators!default.jspa)
regarding login or other problems at issues.couchbase.com (officially) or ask
around in [couchbase/discuss on gitter.im](https://gitter.im/couchbase/discuss)
(unofficially).
## Installing
To install the latest stable version, run:
```bash
go get github.com/couchbase/gocb/v2
```
To install the latest developer version, run:
```bash
go get github.com/couchbase/gocb
```
## License
Copyright 2016 Couchbase Inc.
Licensed under the Apache License, Version 2.0.
See
[LICENSE](https://github.com/couchbase/gocb/blob/master/LICENSE)
for further details.

View File

@ -0,0 +1,89 @@
package gocb
import (
"strings"
"time"
"github.com/google/uuid"
)
// AnalyticsScanConsistency indicates the level of data consistency desired for an analytics query.
type AnalyticsScanConsistency uint
const (
// AnalyticsScanConsistencyNotBounded indicates no data consistency is required.
AnalyticsScanConsistencyNotBounded AnalyticsScanConsistency = iota + 1
// AnalyticsScanConsistencyRequestPlus indicates that request-level data consistency is required.
AnalyticsScanConsistencyRequestPlus
)
// AnalyticsOptions is the set of options available to an Analytics query.
type AnalyticsOptions struct {
// ClientContextID provides a unique ID for this query which can be used matching up requests between connectionManager and
// server. If not provided will be assigned a uuid value.
ClientContextID string
// Priority sets whether this query should be assigned as high priority by the analytics engine.
Priority bool
PositionalParameters []interface{}
NamedParameters map[string]interface{}
Readonly bool
ScanConsistency AnalyticsScanConsistency
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]interface{}
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *AnalyticsOptions) toMap() (map[string]interface{}, error) {
execOpts := make(map[string]interface{})
if opts.ClientContextID == "" {
execOpts["client_context_id"] = uuid.New().String()
} else {
execOpts["client_context_id"] = opts.ClientContextID
}
if opts.ScanConsistency != 0 {
if opts.ScanConsistency == AnalyticsScanConsistencyNotBounded {
execOpts["scan_consistency"] = "not_bounded"
} else if opts.ScanConsistency == AnalyticsScanConsistencyRequestPlus {
execOpts["scan_consistency"] = "request_plus"
} else {
return nil, makeInvalidArgumentsError("unexpected consistency option")
}
}
if opts.PositionalParameters != nil && opts.NamedParameters != nil {
return nil, makeInvalidArgumentsError("positional and named parameters must be used exclusively")
}
if opts.PositionalParameters != nil {
execOpts["args"] = opts.PositionalParameters
}
if opts.NamedParameters != nil {
for key, value := range opts.NamedParameters {
if !strings.HasPrefix(key, "$") {
key = "$" + key
}
execOpts[key] = value
}
}
if opts.Readonly {
execOpts["readonly"] = true
}
if opts.Raw != nil {
for k, v := range opts.Raw {
execOpts[k] = v
}
}
return execOpts, nil
}

36
vendor/github.com/couchbase/gocb/v2/asyncopmanager.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package gocb
import (
gocbcore "github.com/couchbase/gocbcore/v9"
)
type asyncOpManager struct {
signal chan struct{}
wasResolved bool
}
func (m *asyncOpManager) Reject() {
m.signal <- struct{}{}
}
func (m *asyncOpManager) Resolve() {
m.wasResolved = true
m.signal <- struct{}{}
}
func (m *asyncOpManager) Wait(op gocbcore.PendingOp, err error) error {
if err != nil {
return err
}
<-m.signal
return nil
}
func newAsyncOpManager() *asyncOpManager {
return &asyncOpManager{
signal: make(chan struct{}, 1),
}
}

143
vendor/github.com/couchbase/gocb/v2/auth.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package gocb
import (
"crypto/tls"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// UserPassPair represents a username and password pair.
// VOLATILE: This API is subject to change at any time.
type UserPassPair gocbcore.UserPassPair
// AuthCredsRequest encapsulates the data for a credential request
// from the new Authenticator interface.
// VOLATILE: This API is subject to change at any time.
type AuthCredsRequest struct {
Service ServiceType
Endpoint string
}
// AuthCertRequest encapsulates the data for a certificate request
// from the new Authenticator interface.
// VOLATILE: This API is subject to change at any time.
type AuthCertRequest struct {
Service ServiceType
Endpoint string
}
// Authenticator provides an interface to authenticate to each service. Note that
// only authenticators implemented via the SDK are stable.
type Authenticator interface {
// VOLATILE: This API is subject to change at any time.
SupportsTLS() bool
// VOLATILE: This API is subject to change at any time.
SupportsNonTLS() bool
// VOLATILE: This API is subject to change at any time.
Certificate(req AuthCertRequest) (*tls.Certificate, error)
// VOLATILE: This API is subject to change at any time.
Credentials(req AuthCredsRequest) ([]UserPassPair, error)
}
// PasswordAuthenticator implements an Authenticator which uses an RBAC username and password.
type PasswordAuthenticator struct {
Username string
Password string
}
// SupportsTLS returns whether this authenticator can authenticate a TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) SupportsTLS() bool {
return true
}
// SupportsNonTLS returns whether this authenticator can authenticate a non-TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) SupportsNonTLS() bool {
return true
}
// Certificate returns the certificate to use when connecting to a specified server.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) Certificate(req AuthCertRequest) (*tls.Certificate, error) {
return nil, nil
}
// Credentials returns the credentials for a particular service.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: ra.Username,
Password: ra.Password,
}}, nil
}
// CertificateAuthenticator implements an Authenticator which can be used with certificate authentication.
type CertificateAuthenticator struct {
ClientCertificate *tls.Certificate
}
// SupportsTLS returns whether this authenticator can authenticate a TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) SupportsTLS() bool {
return true
}
// SupportsNonTLS returns whether this authenticator can authenticate a non-TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) SupportsNonTLS() bool {
return false
}
// Certificate returns the certificate to use when connecting to a specified server.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) Certificate(req AuthCertRequest) (*tls.Certificate, error) {
return ca.ClientCertificate, nil
}
// Credentials returns the credentials for a particular service.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: "",
Password: "",
}}, nil
}
type coreAuthWrapper struct {
auth Authenticator
}
func (auth *coreAuthWrapper) SupportsTLS() bool {
return auth.auth.SupportsTLS()
}
func (auth *coreAuthWrapper) SupportsNonTLS() bool {
return auth.auth.SupportsNonTLS()
}
func (auth *coreAuthWrapper) Certificate(req gocbcore.AuthCertRequest) (*tls.Certificate, error) {
return auth.auth.Certificate(AuthCertRequest{
Service: ServiceType(req.Service),
Endpoint: req.Endpoint,
})
}
func (auth *coreAuthWrapper) Credentials(req gocbcore.AuthCredsRequest) ([]gocbcore.UserPassPair, error) {
creds, err := auth.auth.Credentials(AuthCredsRequest{
Service: ServiceType(req.Service),
Endpoint: req.Endpoint,
})
if err != nil {
return nil, err
}
coreCreds := make([]gocbcore.UserPassPair, len(creds))
for credIdx, userPass := range creds {
coreCreds[credIdx] = gocbcore.UserPassPair(userPass)
}
return coreCreds, nil
}

153
vendor/github.com/couchbase/gocb/v2/bucket.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
)
// Bucket represents a single bucket within a cluster.
type Bucket struct {
bucketName string
timeoutsConfig TimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
tracer requestTracer
useServerDurations bool
useMutationTokens bool
bootstrapError error
connectionManager connectionManager
}
func newBucket(c *Cluster, bucketName string) *Bucket {
return &Bucket{
bucketName: bucketName,
timeoutsConfig: c.timeoutsConfig,
transcoder: c.transcoder,
retryStrategyWrapper: c.retryStrategyWrapper,
tracer: c.tracer,
useServerDurations: c.useServerDurations,
useMutationTokens: c.useMutationTokens,
connectionManager: c.connectionManager,
}
}
func (b *Bucket) setBootstrapError(err error) {
b.bootstrapError = err
}
func (b *Bucket) getKvProvider() (kvProvider, error) {
if b.bootstrapError != nil {
return nil, b.bootstrapError
}
agent, err := b.connectionManager.getKvProvider(b.bucketName)
if err != nil {
return nil, err
}
return agent, nil
}
// Name returns the name of the bucket.
func (b *Bucket) Name() string {
return b.bucketName
}
// Scope returns an instance of a Scope.
// VOLATILE: This API is subject to change at any time.
func (b *Bucket) Scope(scopeName string) *Scope {
return newScope(b, scopeName)
}
// DefaultScope returns an instance of the default scope.
// VOLATILE: This API is subject to change at any time.
func (b *Bucket) DefaultScope() *Scope {
return b.Scope("_default")
}
// Collection returns an instance of a collection from within the default scope.
// VOLATILE: This API is subject to change at any time.
func (b *Bucket) Collection(collectionName string) *Collection {
return b.DefaultScope().Collection(collectionName)
}
// DefaultCollection returns an instance of the default collection.
func (b *Bucket) DefaultCollection() *Collection {
return b.DefaultScope().Collection("_default")
}
// ViewIndexes returns a ViewIndexManager instance for managing views.
func (b *Bucket) ViewIndexes() *ViewIndexManager {
return &ViewIndexManager{
mgmtProvider: b,
bucketName: b.Name(),
tracer: b.tracer,
}
}
// Collections provides functions for managing collections.
func (b *Bucket) Collections() *CollectionManager {
// TODO: return error for unsupported collections
return &CollectionManager{
mgmtProvider: b,
bucketName: b.Name(),
tracer: b.tracer,
}
}
// WaitUntilReady will wait for the bucket object to be ready for use.
// At present this will wait until memd connections have been established with the server and are ready
// to be used before performing a ping against the specified services (except KeyValue) which also
// exist in the cluster map.
// If no services are specified then will wait until KeyValue is ready.
// Valid service types are: ServiceTypeKeyValue, ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch,
// ServiceTypeAnalytics, ServiceTypeViews.
func (b *Bucket) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error {
if opts == nil {
opts = &WaitUntilReadyOptions{}
}
if b.bootstrapError != nil {
return b.bootstrapError
}
provider, err := b.connectionManager.getWaitUntilReadyProvider(b.bucketName)
if err != nil {
return err
}
desiredState := opts.DesiredState
if desiredState == 0 {
desiredState = ClusterStateOnline
}
services := opts.ServiceTypes
gocbcoreServices := make([]gocbcore.ServiceType, len(services))
for i, svc := range services {
gocbcoreServices[i] = gocbcore.ServiceType(svc)
}
err = provider.WaitUntilReady(
time.Now().Add(timeout),
gocbcore.WaitUntilReadyOptions{
DesiredState: gocbcore.ClusterState(desiredState),
ServiceTypes: gocbcoreServices,
},
)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,389 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/couchbase/gocbcore/v9"
)
// CollectionSpec describes the specification of a collection.
type CollectionSpec struct {
Name string
ScopeName string
MaxExpiry time.Duration
}
// ScopeSpec describes the specification of a scope.
type ScopeSpec struct {
Name string
Collections []CollectionSpec
}
// These 3 types are temporary. They are necessary for now as the server beta was released with ns_server returning
// a different jsonManifest format to what it will return in the future.
type jsonManifest struct {
UID uint64 `json:"uid"`
Scopes map[string]jsonManifestScope `json:"scopes"`
}
type jsonManifestScope struct {
UID uint32 `json:"uid"`
Collections map[string]jsonManifestCollection `json:"collections"`
}
type jsonManifestCollection struct {
UID uint32 `json:"uid"`
}
// CollectionManager provides methods for performing collections management.
type CollectionManager struct {
mgmtProvider mgmtProvider
bucketName string
tracer requestTracer
}
func (cm *CollectionManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("failed to read http body: %s", err)
return nil
}
errText := strings.ToLower(string(b))
if resp.StatusCode == 404 {
if strings.Contains(errText, "not found") && strings.Contains(errText, "scope") {
return makeGenericMgmtError(ErrScopeNotFound, req, resp)
} else if strings.Contains(errText, "not found") && strings.Contains(errText, "scope") {
return makeGenericMgmtError(ErrScopeNotFound, req, resp)
}
}
if strings.Contains(errText, "already exists") && strings.Contains(errText, "collection") {
return makeGenericMgmtError(ErrCollectionExists, req, resp)
} else if strings.Contains(errText, "already exists") && strings.Contains(errText, "scope") {
return makeGenericMgmtError(ErrScopeExists, req, resp)
}
return makeGenericMgmtError(errors.New(errText), req, resp)
}
// GetAllScopesOptions is the set of options available to the GetAllScopes operation.
type GetAllScopesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllScopes gets all scopes from the bucket.
func (cm *CollectionManager) GetAllScopes(opts *GetAllScopesOptions) ([]ScopeSpec, error) {
if opts == nil {
opts = &GetAllScopesOptions{}
}
span := cm.tracer.StartSpan("GetAllScopes", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections", cm.bucketName),
Method: "GET",
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return nil, colErr
}
return nil, makeMgmtBadStatusError("failed to get all scopes", &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
return nil, makeMgmtBadStatusError("failed to get all scopes", &req, resp)
}
var scopes []ScopeSpec
var mfest gocbcore.Manifest
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&mfest)
if err == nil {
for _, scope := range mfest.Scopes {
var collections []CollectionSpec
for _, col := range scope.Collections {
collections = append(collections, CollectionSpec{
Name: col.Name,
ScopeName: scope.Name,
})
}
scopes = append(scopes, ScopeSpec{
Name: scope.Name,
Collections: collections,
})
}
} else {
// Temporary support for older server version
var oldMfest jsonManifest
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&oldMfest)
if err != nil {
return nil, err
}
for scopeName, scope := range oldMfest.Scopes {
var collections []CollectionSpec
for colName := range scope.Collections {
collections = append(collections, CollectionSpec{
Name: colName,
ScopeName: scopeName,
})
}
scopes = append(scopes, ScopeSpec{
Name: scopeName,
Collections: collections,
})
}
}
return scopes, nil
}
// CreateCollectionOptions is the set of options available to the CreateCollection operation.
type CreateCollectionOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateCollection creates a new collection on the bucket.
func (cm *CollectionManager) CreateCollection(spec CollectionSpec, opts *CreateCollectionOptions) error {
if spec.Name == "" {
return makeInvalidArgumentsError("collection name cannot be empty")
}
if spec.ScopeName == "" {
return makeInvalidArgumentsError("scope name cannot be empty")
}
if opts == nil {
opts = &CreateCollectionOptions{}
}
span := cm.tracer.StartSpan("CreateCollection", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts := url.Values{}
posts.Add("name", spec.Name)
if spec.MaxExpiry > 0 {
posts.Add("maxTTL", fmt.Sprintf("%d", int(spec.MaxExpiry.Seconds())))
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s", cm.bucketName, spec.ScopeName),
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to create collection", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}
// DropCollectionOptions is the set of options available to the DropCollection operation.
type DropCollectionOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropCollection removes a collection.
func (cm *CollectionManager) DropCollection(spec CollectionSpec, opts *DropCollectionOptions) error {
if spec.Name == "" {
return makeInvalidArgumentsError("collection name cannot be empty")
}
if spec.ScopeName == "" {
return makeInvalidArgumentsError("scope name cannot be empty")
}
if opts == nil {
opts = &DropCollectionOptions{}
}
span := cm.tracer.StartSpan("DropCollection", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s/%s", cm.bucketName, spec.ScopeName, spec.Name),
Method: "DELETE",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to drop collection", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}
// CreateScopeOptions is the set of options available to the CreateScope operation.
type CreateScopeOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateScope creates a new scope on the bucket.
func (cm *CollectionManager) CreateScope(scopeName string, opts *CreateScopeOptions) error {
if scopeName == "" {
return makeInvalidArgumentsError("scope name cannot be empty")
}
if opts == nil {
opts = &CreateScopeOptions{}
}
span := cm.tracer.StartSpan("CreateScope", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts := url.Values{}
posts.Add("name", scopeName)
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections", cm.bucketName),
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to create scope", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}
// DropScopeOptions is the set of options available to the DropScope operation.
type DropScopeOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropScope removes a scope.
func (cm *CollectionManager) DropScope(scopeName string, opts *DropScopeOptions) error {
if opts == nil {
opts = &DropScopeOptions{}
}
span := cm.tracer.StartSpan("DropScope", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s", cm.bucketName, scopeName),
Method: "DELETE",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to drop scope", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}

20
vendor/github.com/couchbase/gocb/v2/bucket_internal.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// InternalBucket is used for internal functionality.
// Internal: This should never be used and is not supported.
type InternalBucket struct {
bucket *Bucket
}
// Internal returns a CollectionInternal.
// Internal: This should never be used and is not supported.
func (b *Bucket) Internal() *InternalBucket {
return &InternalBucket{bucket: b}
}
// IORouter returns the collection's internal core router.
func (ib *InternalBucket) IORouter() (*gocbcore.Agent, error) {
return ib.bucket.connectionManager.connection(ib.bucket.Name())
}

95
vendor/github.com/couchbase/gocb/v2/bucket_ping.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
package gocb
import (
"encoding/json"
"time"
)
// EndpointPingReport represents a single entry in a ping report.
type EndpointPingReport struct {
ID string
Local string
Remote string
State PingState
Error string
Namespace string
Latency time.Duration
}
// PingResult encapsulates the details from a executed ping operation.
type PingResult struct {
ID string
Services map[ServiceType][]EndpointPingReport
sdk string
}
type jsonEndpointPingReport struct {
ID string `json:"id,omitempty"`
Local string `json:"local,omitempty"`
Remote string `json:"remote,omitempty"`
State string `json:"state,omitempty"`
Error string `json:"error,omitempty"`
Namespace string `json:"namespace,omitempty"`
LatencyUs uint64 `json:"latency_us"`
}
type jsonPingReport struct {
Version uint16 `json:"version"`
SDK string `json:"sdk,omitempty"`
ID string `json:"id,omitempty"`
Services map[string][]jsonEndpointPingReport `json:"services,omitempty"`
}
// MarshalJSON generates a JSON representation of this ping report.
func (report *PingResult) MarshalJSON() ([]byte, error) {
jsonReport := jsonPingReport{
Version: 2,
SDK: report.sdk,
ID: report.ID,
Services: make(map[string][]jsonEndpointPingReport),
}
for serviceType, serviceInfo := range report.Services {
serviceStr := serviceTypeToString(serviceType)
if _, ok := jsonReport.Services[serviceStr]; !ok {
jsonReport.Services[serviceStr] = make([]jsonEndpointPingReport, 0)
}
for _, service := range serviceInfo {
jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonEndpointPingReport{
ID: service.ID,
Local: service.Local,
Remote: service.Remote,
State: pingStateToString(service.State),
Error: service.Error,
Namespace: service.Namespace,
LatencyUs: uint64(service.Latency / time.Nanosecond),
})
}
}
return json.Marshal(&jsonReport)
}
// PingOptions are the options available to the Ping operation.
type PingOptions struct {
ServiceTypes []ServiceType
ReportID string
Timeout time.Duration
}
// Ping will ping a list of services and verify they are active and
// responding in an acceptable period of time.
func (b *Bucket) Ping(opts *PingOptions) (*PingResult, error) {
if opts == nil {
opts = &PingOptions{}
}
provider, err := b.connectionManager.getDiagnosticsProvider(b.bucketName)
if err != nil {
return nil, err
}
return ping(provider, opts, b.timeoutsConfig)
}

View File

@ -0,0 +1,460 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/pkg/errors"
)
// DesignDocumentNamespace represents which namespace a design document resides in.
type DesignDocumentNamespace uint
const (
// DesignDocumentNamespaceProduction means that a design document resides in the production namespace.
DesignDocumentNamespaceProduction DesignDocumentNamespace = iota
// DesignDocumentNamespaceDevelopment means that a design document resides in the development namespace.
DesignDocumentNamespaceDevelopment
)
// View represents a Couchbase view within a design document.
type jsonView struct {
Map string `json:"map,omitempty"`
Reduce string `json:"reduce,omitempty"`
}
// DesignDocument represents a Couchbase design document containing multiple views.
type jsonDesignDocument struct {
Views map[string]jsonView `json:"views,omitempty"`
}
// View represents a Couchbase view within a design document.
type View struct {
Map string
Reduce string
}
func (v *View) fromData(data jsonView) error {
v.Map = data.Map
v.Reduce = data.Reduce
return nil
}
func (v *View) toData() (jsonView, error) {
var data jsonView
data.Map = v.Map
data.Reduce = v.Reduce
return data, nil
}
// DesignDocument represents a Couchbase design document containing multiple views.
type DesignDocument struct {
Name string
Views map[string]View
}
func (dd *DesignDocument) fromData(data jsonDesignDocument, name string) error {
dd.Name = name
views := make(map[string]View)
for viewName, viewData := range data.Views {
var view View
err := view.fromData(viewData)
if err != nil {
return err
}
views[viewName] = view
}
dd.Views = views
return nil
}
func (dd *DesignDocument) toData() (jsonDesignDocument, string, error) {
var data jsonDesignDocument
views := make(map[string]jsonView)
for viewName, view := range dd.Views {
viewData, err := view.toData()
if err != nil {
return jsonDesignDocument{}, "", err
}
views[viewName] = viewData
}
data.Views = views
return data, dd.Name, nil
}
// ViewIndexManager provides methods for performing View management.
type ViewIndexManager struct {
mgmtProvider mgmtProvider
bucketName string
tracer requestTracer
}
func (vm *ViewIndexManager) tryParseErrorMessage(req mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read view index manager response body: %s", err)
return nil
}
if resp.StatusCode == 404 {
if strings.Contains(strings.ToLower(string(b)), "not_found") {
return makeGenericMgmtError(ErrDesignDocumentNotFound, &req, resp)
}
return makeGenericMgmtError(errors.New(string(b)), &req, resp)
}
var mgrErr bucketMgrErrorResp
err = json.Unmarshal(b, &mgrErr)
if err != nil {
logDebugf("Failed to unmarshal error body: %s", err)
return makeGenericMgmtError(errors.New(string(b)), &req, resp)
}
var bodyErr error
var firstErr string
for _, err := range mgrErr.Errors {
firstErr = strings.ToLower(err)
break
}
if strings.Contains(firstErr, "bucket with given name already exists") {
bodyErr = ErrBucketExists
} else {
bodyErr = errors.New(firstErr)
}
return makeGenericMgmtError(bodyErr, &req, resp)
}
func (vm *ViewIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) {
resp, err := vm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// GetDesignDocumentOptions is the set of options available to the ViewIndexManager GetDesignDocument operation.
type GetDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
func (vm *ViewIndexManager) ddocName(name string, namespace DesignDocumentNamespace) string {
if namespace == DesignDocumentNamespaceProduction {
if strings.HasPrefix(name, "dev_") {
name = strings.TrimLeft(name, "dev_")
}
} else {
if !strings.HasPrefix(name, "dev_") {
name = "dev_" + name
}
}
return name
}
// GetDesignDocument retrieves a single design document for the given bucket.
func (vm *ViewIndexManager) GetDesignDocument(name string, namespace DesignDocumentNamespace, opts *GetDesignDocumentOptions) (*DesignDocument, error) {
if opts == nil {
opts = &GetDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("GetDesignDocument", nil).SetTag("couchbase.service", "view")
defer span.Finish()
return vm.getDesignDocument(span.Context(), name, namespace, time.Now(), opts)
}
func (vm *ViewIndexManager) getDesignDocument(tracectx requestSpanContext, name string, namespace DesignDocumentNamespace,
startTime time.Time, opts *GetDesignDocumentOptions) (*DesignDocument, error) {
name = vm.ddocName(name, namespace)
req := mgmtRequest{
Service: ServiceTypeViews,
Path: fmt.Sprintf("/_design/%s", name),
Method: "GET",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: tracectx,
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return nil, vwErr
}
return nil, makeGenericMgmtError(errors.New("failed to get design document"), &req, resp)
}
var ddocData jsonDesignDocument
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&ddocData)
if err != nil {
return nil, err
}
ddocName := strings.TrimPrefix(name, "dev_")
var ddoc DesignDocument
err = ddoc.fromData(ddocData, ddocName)
if err != nil {
return nil, err
}
return &ddoc, nil
}
// GetAllDesignDocumentsOptions is the set of options available to the ViewIndexManager GetAllDesignDocuments operation.
type GetAllDesignDocumentsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllDesignDocuments will retrieve all design documents for the given bucket.
func (vm *ViewIndexManager) GetAllDesignDocuments(namespace DesignDocumentNamespace, opts *GetAllDesignDocumentsOptions) ([]DesignDocument, error) {
if opts == nil {
opts = &GetAllDesignDocumentsOptions{}
}
span := vm.tracer.StartSpan("GetAllDesignDocuments", nil).SetTag("couchbase.service", "view")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/ddocs", vm.bucketName),
Method: "GET",
IsIdempotent: true,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span.Context(),
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return nil, vwErr
}
return nil, makeGenericMgmtError(errors.New("failed to get design documents"), &req, resp)
}
var ddocsResp struct {
Rows []struct {
Doc struct {
Meta struct {
ID string `json:"id"`
}
JSON jsonDesignDocument `json:"json"`
} `json:"doc"`
} `json:"rows"`
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&ddocsResp)
if err != nil {
return nil, err
}
ddocs := make([]DesignDocument, len(ddocsResp.Rows))
for ddocIdx, ddocData := range ddocsResp.Rows {
ddocName := strings.TrimPrefix(ddocData.Doc.Meta.ID[8:], "dev_")
err := ddocs[ddocIdx].fromData(ddocData.Doc.JSON, ddocName)
if err != nil {
return nil, err
}
}
return ddocs, nil
}
// UpsertDesignDocumentOptions is the set of options available to the ViewIndexManager UpsertDesignDocument operation.
type UpsertDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpsertDesignDocument will insert a design document to the given bucket, or update
// an existing design document with the same name.
func (vm *ViewIndexManager) UpsertDesignDocument(ddoc DesignDocument, namespace DesignDocumentNamespace, opts *UpsertDesignDocumentOptions) error {
if opts == nil {
opts = &UpsertDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("UpsertDesignDocument", nil).SetTag("couchbase.service", "view")
defer span.Finish()
return vm.upsertDesignDocument(span.Context(), ddoc, namespace, time.Now(), opts)
}
func (vm *ViewIndexManager) upsertDesignDocument(
tracectx requestSpanContext,
ddoc DesignDocument,
namespace DesignDocumentNamespace,
startTime time.Time,
opts *UpsertDesignDocumentOptions,
) error {
ddocData, ddocName, err := ddoc.toData()
if err != nil {
return err
}
espan := vm.tracer.StartSpan("encode", tracectx)
data, err := json.Marshal(&ddocData)
espan.Finish()
if err != nil {
return err
}
ddocName = vm.ddocName(ddocName, namespace)
req := mgmtRequest{
Service: ServiceTypeViews,
Path: fmt.Sprintf("/_design/%s", ddocName),
Method: "PUT",
Body: data,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 201 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return vwErr
}
return makeGenericMgmtError(errors.New("failed to upsert design document"), &req, resp)
}
return nil
}
// DropDesignDocumentOptions is the set of options available to the ViewIndexManager Upsert operation.
type DropDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropDesignDocument will remove a design document from the given bucket.
func (vm *ViewIndexManager) DropDesignDocument(name string, namespace DesignDocumentNamespace, opts *DropDesignDocumentOptions) error {
if opts == nil {
opts = &DropDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("DropDesignDocument", nil).SetTag("couchbase.service", "view")
defer span.Finish()
return vm.dropDesignDocument(span.Context(), name, namespace, time.Now(), opts)
}
func (vm *ViewIndexManager) dropDesignDocument(tracectx requestSpanContext, name string, namespace DesignDocumentNamespace,
startTime time.Time, opts *DropDesignDocumentOptions) error {
name = vm.ddocName(name, namespace)
req := mgmtRequest{
Service: ServiceTypeViews,
Path: fmt.Sprintf("/_design/%s", name),
Method: "DELETE",
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return vwErr
}
return makeGenericMgmtError(errors.New("failed to drop design document"), &req, resp)
}
return nil
}
// PublishDesignDocumentOptions is the set of options available to the ViewIndexManager PublishDesignDocument operation.
type PublishDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// PublishDesignDocument publishes a design document to the given bucket.
func (vm *ViewIndexManager) PublishDesignDocument(name string, opts *PublishDesignDocumentOptions) error {
startTime := time.Now()
if opts == nil {
opts = &PublishDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("PublishDesignDocument", nil).
SetTag("couchbase.service", "view")
defer span.Finish()
devdoc, err := vm.getDesignDocument(
span.Context(),
name,
DesignDocumentNamespaceDevelopment,
startTime,
&GetDesignDocumentOptions{
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
})
if err != nil {
return err
}
err = vm.upsertDesignDocument(
span.Context(),
*devdoc,
DesignDocumentNamespaceProduction,
startTime,
&UpsertDesignDocumentOptions{
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
})
if err != nil {
return err
}
return nil
}

206
vendor/github.com/couchbase/gocb/v2/bucket_viewquery.go generated vendored Normal file
View File

@ -0,0 +1,206 @@
package gocb
import (
"encoding/json"
"net/url"
"strings"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
type jsonViewResponse struct {
TotalRows uint64 `json:"total_rows,omitempty"`
DebugInfo interface{} `json:"debug_info,omitempty"`
}
type jsonViewRow struct {
ID string `json:"id"`
Key json.RawMessage `json:"key"`
Value json.RawMessage `json:"value"`
}
// ViewMetaData provides access to the meta-data properties of a view query result.
type ViewMetaData struct {
TotalRows uint64
Debug interface{}
}
func (meta *ViewMetaData) fromData(data jsonViewResponse) error {
meta.TotalRows = data.TotalRows
meta.Debug = data.DebugInfo
return nil
}
// ViewRow represents a single row returned from a view query.
type ViewRow struct {
ID string
keyBytes []byte
valueBytes []byte
}
// Key returns the key associated with this view row.
func (vr *ViewRow) Key(valuePtr interface{}) error {
return json.Unmarshal(vr.keyBytes, valuePtr)
}
// Value returns the value associated with this view row.
func (vr *ViewRow) Value(valuePtr interface{}) error {
return json.Unmarshal(vr.valueBytes, valuePtr)
}
type viewRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
}
// ViewResult implements an iterator interface which can be used to iterate over the rows of the query results.
type ViewResult struct {
reader viewRowReader
currentRow ViewRow
}
func newViewResult(reader viewRowReader) *ViewResult {
return &ViewResult{
reader: reader,
}
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *ViewResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.currentRow = ViewRow{}
var rowData jsonViewRow
if err := json.Unmarshal(rowBytes, &rowData); err == nil {
r.currentRow.ID = rowData.ID
r.currentRow.keyBytes = rowData.Key
r.currentRow.valueBytes = rowData.Value
}
return true
}
// Row returns the contents of the current row.
func (r *ViewResult) Row() ViewRow {
return r.currentRow
}
// Err returns any errors that have occurred on the stream
func (r *ViewResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *ViewResult) Close() error {
return r.reader.Close()
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *ViewResult) MetaData() (*ViewMetaData, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return nil, err
}
var jsonResp jsonViewResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return nil, err
}
var metaData ViewMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
// ViewQuery performs a view query and returns a list of rows or an error.
func (b *Bucket) ViewQuery(designDoc string, viewName string, opts *ViewOptions) (*ViewResult, error) {
if opts == nil {
opts = &ViewOptions{}
}
span := b.tracer.StartSpan("ViewQuery", opts.parentSpan).
SetTag("couchbase.service", "view")
defer span.Finish()
designDoc = b.maybePrefixDevDocument(opts.Namespace, designDoc)
timeout := opts.Timeout
if timeout == 0 {
timeout = b.timeoutsConfig.ViewTimeout
}
deadline := time.Now().Add(timeout)
retryWrapper := b.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryWrapper = newRetryStrategyWrapper(opts.RetryStrategy)
}
urlValues, err := opts.toURLValues()
if err != nil {
return nil, errors.Wrap(err, "could not parse query options")
}
return b.execViewQuery(span.Context(), "_view", designDoc, viewName, *urlValues, deadline, retryWrapper)
}
func (b *Bucket) execViewQuery(
span requestSpanContext,
viewType, ddoc, viewName string,
options url.Values,
deadline time.Time,
wrapper *retryStrategyWrapper,
) (*ViewResult, error) {
provider, err := b.connectionManager.getViewProvider()
if err != nil {
return nil, ViewError{
InnerError: wrapError(err, "failed to get query provider"),
DesignDocumentName: ddoc,
ViewName: viewName,
}
}
res, err := provider.ViewQuery(gocbcore.ViewQueryOptions{
DesignDocumentName: ddoc,
ViewType: viewType,
ViewName: viewName,
Options: options,
RetryStrategy: wrapper,
Deadline: deadline,
TraceContext: span,
})
if err != nil {
return nil, maybeEnhanceViewError(err)
}
return newViewResult(res), nil
}
func (b *Bucket) maybePrefixDevDocument(namespace DesignDocumentNamespace, ddoc string) string {
designDoc := ddoc
if namespace == DesignDocumentNamespaceProduction {
designDoc = strings.TrimPrefix(ddoc, "dev_")
} else {
if !strings.HasPrefix(ddoc, "dev_") {
designDoc = "dev_" + ddoc
}
}
return designDoc
}

18
vendor/github.com/couchbase/gocb/v2/circuitbreaker.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package gocb
import "time"
// CircuitBreakerCallback is the callback used by the circuit breaker to determine if an error should count toward
// the circuit breaker failure count.
type CircuitBreakerCallback func(error) bool
// CircuitBreakerConfig are the settings for configuring circuit breakers.
type CircuitBreakerConfig struct {
Disabled bool
VolumeThreshold int64
ErrorThresholdPercentage float64
SleepWindow time.Duration
RollingWindow time.Duration
CompletionCallback CircuitBreakerCallback
CanaryTimeout time.Duration
}

231
vendor/github.com/couchbase/gocb/v2/client.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
package gocb
import (
"crypto/x509"
"sync"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
type connectionManager interface {
connect() error
openBucket(bucketName string) error
buildConfig(cluster *Cluster) error
getKvProvider(bucketName string) (kvProvider, error)
getViewProvider() (viewProvider, error)
getQueryProvider() (queryProvider, error)
getAnalyticsProvider() (analyticsProvider, error)
getSearchProvider() (searchProvider, error)
getHTTPProvider() (httpProvider, error)
getDiagnosticsProvider(bucketName string) (diagnosticsProvider, error)
getWaitUntilReadyProvider(bucketName string) (waitUntilReadyProvider, error)
connection(bucketName string) (*gocbcore.Agent, error)
close() error
}
type stdConnectionMgr struct {
lock sync.Mutex
agentgroup *gocbcore.AgentGroup
config *gocbcore.AgentGroupConfig
}
func newConnectionMgr() *stdConnectionMgr {
client := &stdConnectionMgr{}
return client
}
func (c *stdConnectionMgr) buildConfig(cluster *Cluster) error {
c.lock.Lock()
defer c.lock.Unlock()
breakerCfg := cluster.circuitBreakerConfig
var completionCallback func(err error) bool
if breakerCfg.CompletionCallback != nil {
completionCallback = func(err error) bool {
wrappedErr := maybeEnhanceKVErr(err, "", "", "", "")
return breakerCfg.CompletionCallback(wrappedErr)
}
}
var tlsRootCAProvider func() *x509.CertPool
if cluster.internalConfig.TLSRootCAProvider == nil {
tlsRootCAProvider = func() *x509.CertPool {
if cluster.securityConfig.TLSSkipVerify {
return nil
}
return cluster.securityConfig.TLSRootCAs
}
} else {
tlsRootCAProvider = cluster.internalConfig.TLSRootCAProvider
}
config := &gocbcore.AgentGroupConfig{
AgentConfig: gocbcore.AgentConfig{
UserAgent: Identifier(),
TLSRootCAProvider: tlsRootCAProvider,
ConnectTimeout: cluster.timeoutsConfig.ConnectTimeout,
UseMutationTokens: cluster.useMutationTokens,
KVConnectTimeout: 7000 * time.Millisecond,
UseDurations: cluster.useServerDurations,
UseCollections: true,
UseZombieLogger: cluster.orphanLoggerEnabled,
ZombieLoggerInterval: cluster.orphanLoggerInterval,
ZombieLoggerSampleSize: int(cluster.orphanLoggerSampleSize),
NoRootTraceSpans: true,
Tracer: &requestTracerWrapper{cluster.tracer},
CircuitBreakerConfig: gocbcore.CircuitBreakerConfig{
Enabled: !breakerCfg.Disabled,
VolumeThreshold: breakerCfg.VolumeThreshold,
ErrorThresholdPercentage: breakerCfg.ErrorThresholdPercentage,
SleepWindow: breakerCfg.SleepWindow,
RollingWindow: breakerCfg.RollingWindow,
CanaryTimeout: breakerCfg.CanaryTimeout,
CompletionCallback: completionCallback,
},
DefaultRetryStrategy: cluster.retryStrategyWrapper,
},
}
err := config.FromConnStr(cluster.connSpec().String())
if err != nil {
return err
}
config.Auth = &coreAuthWrapper{
auth: cluster.authenticator(),
}
c.config = config
return nil
}
func (c *stdConnectionMgr) connect() error {
c.lock.Lock()
defer c.lock.Unlock()
var err error
c.agentgroup, err = gocbcore.CreateAgentGroup(c.config)
if err != nil {
return maybeEnhanceKVErr(err, "", "", "", "")
}
return nil
}
func (c *stdConnectionMgr) openBucket(bucketName string) error {
if c.agentgroup == nil {
return errors.New("cluster not yet connected")
}
return c.agentgroup.OpenBucket(bucketName)
}
func (c *stdConnectionMgr) getKvProvider(bucketName string) (kvProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("bucket not yet connected")
}
return agent, nil
}
func (c *stdConnectionMgr) getViewProvider() (viewProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &viewProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getQueryProvider() (queryProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &queryProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getAnalyticsProvider() (analyticsProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &analyticsProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getSearchProvider() (searchProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &searchProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getHTTPProvider() (httpProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &httpProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getDiagnosticsProvider(bucketName string) (diagnosticsProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
if bucketName == "" {
return &diagnosticsProviderWrapper{provider: c.agentgroup}, nil
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("bucket not yet connected")
}
return &diagnosticsProviderWrapper{provider: agent}, nil
}
func (c *stdConnectionMgr) getWaitUntilReadyProvider(bucketName string) (waitUntilReadyProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
if bucketName == "" {
return &waitUntilReadyProviderWrapper{provider: c.agentgroup}, nil
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("provider not yet connected")
}
return &waitUntilReadyProviderWrapper{provider: agent}, nil
}
func (c *stdConnectionMgr) connection(bucketName string) (*gocbcore.Agent, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("bucket not yet connected")
}
return agent, nil
}
func (c *stdConnectionMgr) close() error {
c.lock.Lock()
if c.agentgroup == nil {
c.lock.Unlock()
return errors.New("cluster not yet connected")
}
defer c.lock.Unlock()
return c.agentgroup.Close()
}

474
vendor/github.com/couchbase/gocb/v2/cluster.go generated vendored Normal file
View File

@ -0,0 +1,474 @@
package gocb
import (
"crypto/x509"
"fmt"
"strconv"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
gocbconnstr "github.com/couchbase/gocbcore/v9/connstr"
"github.com/pkg/errors"
)
// Cluster represents a connection to a specific Couchbase cluster.
type Cluster struct {
cSpec gocbconnstr.ConnSpec
auth Authenticator
connectionManager connectionManager
useServerDurations bool
useMutationTokens bool
timeoutsConfig TimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
orphanLoggerEnabled bool
orphanLoggerInterval time.Duration
orphanLoggerSampleSize uint32
tracer requestTracer
circuitBreakerConfig CircuitBreakerConfig
securityConfig SecurityConfig
internalConfig InternalConfig
}
// IoConfig specifies IO related configuration options.
type IoConfig struct {
DisableMutationTokens bool
DisableServerDurations bool
}
// TimeoutsConfig specifies options for various operation timeouts.
type TimeoutsConfig struct {
ConnectTimeout time.Duration
KVTimeout time.Duration
// Volatile: This option is subject to change at any time.
KVDurableTimeout time.Duration
ViewTimeout time.Duration
QueryTimeout time.Duration
AnalyticsTimeout time.Duration
SearchTimeout time.Duration
ManagementTimeout time.Duration
}
// OrphanReporterConfig specifies options for controlling the orphan
// reporter which records when the SDK receives responses for requests
// that are no longer in the system (usually due to being timed out).
type OrphanReporterConfig struct {
Disabled bool
ReportInterval time.Duration
SampleSize uint32
}
// SecurityConfig specifies options for controlling security related
// items such as TLS root certificates and verification skipping.
type SecurityConfig struct {
TLSRootCAs *x509.CertPool
TLSSkipVerify bool
}
// InternalConfig specifies options for controlling various internal
// items.
// Internal: This should never be used and is not supported.
type InternalConfig struct {
TLSRootCAProvider func() *x509.CertPool
}
// ClusterOptions is the set of options available for creating a Cluster.
type ClusterOptions struct {
// Authenticator specifies the authenticator to use with the cluster.
Authenticator Authenticator
// Username & Password specifies the cluster username and password to
// authenticate with. This is equivalent to passing PasswordAuthenticator
// as the Authenticator parameter with the same values.
Username string
Password string
// Timeouts specifies various operation timeouts.
TimeoutsConfig TimeoutsConfig
// Transcoder is used for trancoding data used in KV operations.
Transcoder Transcoder
// RetryStrategy is used to automatically retry operations if they fail.
RetryStrategy RetryStrategy
// Tracer specifies the tracer to use for requests.
// VOLATILE: This API is subject to change at any time.
Tracer requestTracer
// OrphanReporterConfig specifies options for the orphan reporter.
OrphanReporterConfig OrphanReporterConfig
// CircuitBreakerConfig specifies options for the circuit breakers.
CircuitBreakerConfig CircuitBreakerConfig
// IoConfig specifies IO related configuration options.
IoConfig IoConfig
// SecurityConfig specifies security related configuration options.
SecurityConfig SecurityConfig
// Internal: This should never be used and is not supported.
InternalConfig InternalConfig
}
// ClusterCloseOptions is the set of options available when
// disconnecting from a Cluster.
type ClusterCloseOptions struct {
}
func clusterFromOptions(opts ClusterOptions) *Cluster {
if opts.Authenticator == nil {
opts.Authenticator = PasswordAuthenticator{
Username: opts.Username,
Password: opts.Password,
}
}
connectTimeout := 10000 * time.Millisecond
kvTimeout := 2500 * time.Millisecond
kvDurableTimeout := 10000 * time.Millisecond
viewTimeout := 75000 * time.Millisecond
queryTimeout := 75000 * time.Millisecond
analyticsTimeout := 75000 * time.Millisecond
searchTimeout := 75000 * time.Millisecond
managementTimeout := 75000 * time.Millisecond
if opts.TimeoutsConfig.ConnectTimeout > 0 {
connectTimeout = opts.TimeoutsConfig.ConnectTimeout
}
if opts.TimeoutsConfig.KVTimeout > 0 {
kvTimeout = opts.TimeoutsConfig.KVTimeout
}
if opts.TimeoutsConfig.KVDurableTimeout > 0 {
kvDurableTimeout = opts.TimeoutsConfig.KVDurableTimeout
}
if opts.TimeoutsConfig.ViewTimeout > 0 {
viewTimeout = opts.TimeoutsConfig.ViewTimeout
}
if opts.TimeoutsConfig.QueryTimeout > 0 {
queryTimeout = opts.TimeoutsConfig.QueryTimeout
}
if opts.TimeoutsConfig.AnalyticsTimeout > 0 {
analyticsTimeout = opts.TimeoutsConfig.AnalyticsTimeout
}
if opts.TimeoutsConfig.SearchTimeout > 0 {
searchTimeout = opts.TimeoutsConfig.SearchTimeout
}
if opts.TimeoutsConfig.ManagementTimeout > 0 {
managementTimeout = opts.TimeoutsConfig.ManagementTimeout
}
if opts.Transcoder == nil {
opts.Transcoder = NewJSONTranscoder()
}
if opts.RetryStrategy == nil {
opts.RetryStrategy = NewBestEffortRetryStrategy(nil)
}
useMutationTokens := true
useServerDurations := true
if opts.IoConfig.DisableMutationTokens {
useMutationTokens = false
}
if opts.IoConfig.DisableServerDurations {
useServerDurations = false
}
var initialTracer requestTracer
if opts.Tracer != nil {
initialTracer = opts.Tracer
} else {
initialTracer = newThresholdLoggingTracer(nil)
}
tracerAddRef(initialTracer)
return &Cluster{
auth: opts.Authenticator,
timeoutsConfig: TimeoutsConfig{
ConnectTimeout: connectTimeout,
QueryTimeout: queryTimeout,
AnalyticsTimeout: analyticsTimeout,
SearchTimeout: searchTimeout,
ViewTimeout: viewTimeout,
KVTimeout: kvTimeout,
KVDurableTimeout: kvDurableTimeout,
ManagementTimeout: managementTimeout,
},
transcoder: opts.Transcoder,
useMutationTokens: useMutationTokens,
retryStrategyWrapper: newRetryStrategyWrapper(opts.RetryStrategy),
orphanLoggerEnabled: !opts.OrphanReporterConfig.Disabled,
orphanLoggerInterval: opts.OrphanReporterConfig.ReportInterval,
orphanLoggerSampleSize: opts.OrphanReporterConfig.SampleSize,
useServerDurations: useServerDurations,
tracer: initialTracer,
circuitBreakerConfig: opts.CircuitBreakerConfig,
securityConfig: opts.SecurityConfig,
internalConfig: opts.InternalConfig,
}
}
// Connect creates and returns a Cluster instance created using the
// provided options and a connection string.
func Connect(connStr string, opts ClusterOptions) (*Cluster, error) {
connSpec, err := gocbconnstr.Parse(connStr)
if err != nil {
return nil, err
}
if connSpec.Scheme == "http" {
return nil, errors.New("http scheme is not supported, use couchbase or couchbases instead")
}
cluster := clusterFromOptions(opts)
cluster.cSpec = connSpec
err = cluster.parseExtraConnStrOptions(connSpec)
if err != nil {
return nil, err
}
cli := newConnectionMgr()
err = cli.buildConfig(cluster)
if err != nil {
return nil, err
}
err = cli.connect()
if err != nil {
return nil, err
}
cluster.connectionManager = cli
return cluster, nil
}
func (c *Cluster) parseExtraConnStrOptions(spec gocbconnstr.ConnSpec) error {
fetchOption := func(name string) (string, bool) {
optValue := spec.Options[name]
if len(optValue) == 0 {
return "", false
}
return optValue[len(optValue)-1], true
}
if valStr, ok := fetchOption("query_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("query_timeout option must be a number")
}
c.timeoutsConfig.QueryTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("analytics_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("analytics_timeout option must be a number")
}
c.timeoutsConfig.AnalyticsTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("search_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("search_timeout option must be a number")
}
c.timeoutsConfig.SearchTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("view_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("view_timeout option must be a number")
}
c.timeoutsConfig.ViewTimeout = time.Duration(val) * time.Millisecond
}
return nil
}
// Bucket connects the cluster to server(s) and returns a new Bucket instance.
func (c *Cluster) Bucket(bucketName string) *Bucket {
b := newBucket(c, bucketName)
err := c.connectionManager.openBucket(bucketName)
if err != nil {
b.setBootstrapError(err)
}
return b
}
func (c *Cluster) authenticator() Authenticator {
return c.auth
}
func (c *Cluster) connSpec() gocbconnstr.ConnSpec {
return c.cSpec
}
// WaitUntilReadyOptions is the set of options available to the WaitUntilReady operations.
type WaitUntilReadyOptions struct {
DesiredState ClusterState
ServiceTypes []ServiceType
}
// WaitUntilReady will wait for the cluster object to be ready for use.
// At present this will wait until memd connections have been established with the server and are ready
// to be used before performing a ping against the specified services which also
// exist in the cluster map.
// If no services are specified then ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, ServiceTypeAnalytics
// will be pinged.
// Valid service types are: ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, ServiceTypeAnalytics.
func (c *Cluster) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error {
if opts == nil {
opts = &WaitUntilReadyOptions{}
}
cli := c.connectionManager
if cli == nil {
return errors.New("cluster is not connected")
}
provider, err := cli.getWaitUntilReadyProvider("")
if err != nil {
return err
}
desiredState := opts.DesiredState
if desiredState == 0 {
desiredState = ClusterStateOnline
}
services := opts.ServiceTypes
gocbcoreServices := make([]gocbcore.ServiceType, len(services))
for i, svc := range services {
gocbcoreServices[i] = gocbcore.ServiceType(svc)
}
err = provider.WaitUntilReady(
time.Now().Add(timeout),
gocbcore.WaitUntilReadyOptions{
DesiredState: gocbcore.ClusterState(desiredState),
ServiceTypes: gocbcoreServices,
},
)
if err != nil {
return err
}
return nil
}
// Close shuts down all buckets in this cluster and invalidates any references this cluster has.
func (c *Cluster) Close(opts *ClusterCloseOptions) error {
var overallErr error
if c.connectionManager != nil {
err := c.connectionManager.close()
if err != nil {
logWarnf("Failed to close cluster connectionManager in cluster close: %s", err)
overallErr = err
}
}
if c.tracer != nil {
tracerDecRef(c.tracer)
c.tracer = nil
}
return overallErr
}
func (c *Cluster) getDiagnosticsProvider() (diagnosticsProvider, error) {
provider, err := c.connectionManager.getDiagnosticsProvider("")
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getQueryProvider() (queryProvider, error) {
provider, err := c.connectionManager.getQueryProvider()
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getAnalyticsProvider() (analyticsProvider, error) {
provider, err := c.connectionManager.getAnalyticsProvider()
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getSearchProvider() (searchProvider, error) {
provider, err := c.connectionManager.getSearchProvider()
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getHTTPProvider() (httpProvider, error) {
provider, err := c.connectionManager.getHTTPProvider()
if err != nil {
return nil, err
}
return provider, nil
}
// Users returns a UserManager for managing users.
func (c *Cluster) Users() *UserManager {
return &UserManager{
provider: c,
tracer: c.tracer,
}
}
// Buckets returns a BucketManager for managing buckets.
func (c *Cluster) Buckets() *BucketManager {
return &BucketManager{
provider: c,
tracer: c.tracer,
}
}
// AnalyticsIndexes returns an AnalyticsIndexManager for managing analytics indexes.
func (c *Cluster) AnalyticsIndexes() *AnalyticsIndexManager {
return &AnalyticsIndexManager{
aProvider: c,
mgmtProvider: c,
globalTimeout: c.timeoutsConfig.ManagementTimeout,
tracer: c.tracer,
}
}
// QueryIndexes returns a QueryIndexManager for managing query indexes.
func (c *Cluster) QueryIndexes() *QueryIndexManager {
return &QueryIndexManager{
provider: c,
globalTimeout: c.timeoutsConfig.ManagementTimeout,
tracer: c.tracer,
}
}
// SearchIndexes returns a SearchIndexManager for managing search indexes.
func (c *Cluster) SearchIndexes() *SearchIndexManager {
return &SearchIndexManager{
mgmtProvider: c,
tracer: c.tracer,
}
}

View File

@ -0,0 +1,597 @@
package gocb
import (
"encoding/json"
"fmt"
"strings"
"time"
)
// AnalyticsIndexManager provides methods for performing Couchbase Analytics index management.
type AnalyticsIndexManager struct {
aProvider analyticsIndexQueryProvider
mgmtProvider mgmtProvider
globalTimeout time.Duration
tracer requestTracer
}
type analyticsIndexQueryProvider interface {
AnalyticsQuery(statement string, opts *AnalyticsOptions) (*AnalyticsResult, error)
}
func (am *AnalyticsIndexManager) doAnalyticsQuery(q string, opts *AnalyticsOptions) ([][]byte, error) {
if opts.Timeout == 0 {
opts.Timeout = am.globalTimeout
}
result, err := am.aProvider.AnalyticsQuery(q, opts)
if err != nil {
return nil, err
}
var rows [][]byte
for result.Next() {
var row json.RawMessage
err := result.Row(&row)
if err != nil {
logWarnf("management operation failed to read row: %s", err)
} else {
rows = append(rows, row)
}
}
err = result.Err()
if err != nil {
return nil, err
}
return rows, nil
}
func (am *AnalyticsIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) {
resp, err := am.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
type jsonAnalyticsDataset struct {
DatasetName string `json:"DatasetName"`
DataverseName string `json:"DataverseName"`
LinkName string `json:"LinkName"`
BucketName string `json:"BucketName"`
}
type jsonAnalyticsIndex struct {
IndexName string `json:"IndexName"`
DatasetName string `json:"DatasetName"`
DataverseName string `json:"DataverseName"`
IsPrimary bool `json:"IsPrimary"`
}
// AnalyticsDataset contains information about an analytics dataset.
type AnalyticsDataset struct {
Name string
DataverseName string
LinkName string
BucketName string
}
func (ad *AnalyticsDataset) fromData(data jsonAnalyticsDataset) error {
ad.Name = data.DatasetName
ad.DataverseName = data.DataverseName
ad.LinkName = data.LinkName
ad.BucketName = data.BucketName
return nil
}
// AnalyticsIndex contains information about an analytics index.
type AnalyticsIndex struct {
Name string
DatasetName string
DataverseName string
IsPrimary bool
}
func (ai *AnalyticsIndex) fromData(data jsonAnalyticsIndex) error {
ai.Name = data.IndexName
ai.DatasetName = data.DatasetName
ai.DataverseName = data.DataverseName
ai.IsPrimary = data.IsPrimary
return nil
}
// CreateAnalyticsDataverseOptions is the set of options available to the AnalyticsManager CreateDataverse operation.
type CreateAnalyticsDataverseOptions struct {
IgnoreIfExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateDataverse creates a new analytics dataset.
func (am *AnalyticsIndexManager) CreateDataverse(dataverseName string, opts *CreateAnalyticsDataverseOptions) error {
if opts == nil {
opts = &CreateAnalyticsDataverseOptions{}
}
if dataverseName == "" {
return invalidArgumentsError{
message: "dataset name cannot be empty",
}
}
span := am.tracer.StartSpan("CreateDataverse", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfExists {
ignoreStr = "IF NOT EXISTS"
}
q := fmt.Sprintf("CREATE DATAVERSE `%s` %s", dataverseName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DropAnalyticsDataverseOptions is the set of options available to the AnalyticsManager DropDataverse operation.
type DropAnalyticsDataverseOptions struct {
IgnoreIfNotExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropDataverse drops an analytics dataset.
func (am *AnalyticsIndexManager) DropDataverse(dataverseName string, opts *DropAnalyticsDataverseOptions) error {
if opts == nil {
opts = &DropAnalyticsDataverseOptions{}
}
span := am.tracer.StartSpan("DropDataverse", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfNotExists {
ignoreStr = "IF EXISTS"
}
q := fmt.Sprintf("DROP DATAVERSE %s %s", dataverseName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return err
}
// CreateAnalyticsDatasetOptions is the set of options available to the AnalyticsManager CreateDataset operation.
type CreateAnalyticsDatasetOptions struct {
IgnoreIfExists bool
Condition string
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateDataset creates a new analytics dataset.
func (am *AnalyticsIndexManager) CreateDataset(datasetName, bucketName string, opts *CreateAnalyticsDatasetOptions) error {
if opts == nil {
opts = &CreateAnalyticsDatasetOptions{}
}
if datasetName == "" {
return invalidArgumentsError{
message: "dataset name cannot be empty",
}
}
span := am.tracer.StartSpan("CreateDataset", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfExists {
ignoreStr = "IF NOT EXISTS"
}
var where string
if opts.Condition != "" {
if !strings.HasPrefix(strings.ToUpper(opts.Condition), "WHERE") {
where = "WHERE "
}
where += opts.Condition
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("CREATE DATASET %s %s ON `%s` %s", ignoreStr, datasetName, bucketName, where)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DropAnalyticsDatasetOptions is the set of options available to the AnalyticsManager DropDataset operation.
type DropAnalyticsDatasetOptions struct {
IgnoreIfNotExists bool
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropDataset drops an analytics dataset.
func (am *AnalyticsIndexManager) DropDataset(datasetName string, opts *DropAnalyticsDatasetOptions) error {
if opts == nil {
opts = &DropAnalyticsDatasetOptions{}
}
span := am.tracer.StartSpan("DropDataset", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfNotExists {
ignoreStr = "IF EXISTS"
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("DROP DATASET %s %s", datasetName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// GetAllAnalyticsDatasetsOptions is the set of options available to the AnalyticsManager GetAllDatasets operation.
type GetAllAnalyticsDatasetsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllDatasets gets all analytics datasets.
func (am *AnalyticsIndexManager) GetAllDatasets(opts *GetAllAnalyticsDatasetsOptions) ([]AnalyticsDataset, error) {
if opts == nil {
opts = &GetAllAnalyticsDatasetsOptions{}
}
span := am.tracer.StartSpan("GetAllDatasets", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
q := "SELECT d.* FROM Metadata.`Dataset` d WHERE d.DataverseName <> \"Metadata\""
rows, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return nil, err
}
datasets := make([]AnalyticsDataset, len(rows))
for rowIdx, row := range rows {
var datasetData jsonAnalyticsDataset
err := json.Unmarshal(row, &datasetData)
if err != nil {
return nil, err
}
err = datasets[rowIdx].fromData(datasetData)
if err != nil {
return nil, err
}
}
return datasets, nil
}
// CreateAnalyticsIndexOptions is the set of options available to the AnalyticsManager CreateIndex operation.
type CreateAnalyticsIndexOptions struct {
IgnoreIfExists bool
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateIndex creates a new analytics dataset.
func (am *AnalyticsIndexManager) CreateIndex(datasetName, indexName string, fields map[string]string, opts *CreateAnalyticsIndexOptions) error {
if opts == nil {
opts = &CreateAnalyticsIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{
message: "index name cannot be empty",
}
}
if len(fields) <= 0 {
return invalidArgumentsError{
message: "you must specify at least one field to index",
}
}
span := am.tracer.StartSpan("CreateIndex", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfExists {
ignoreStr = "IF NOT EXISTS"
}
var indexFields []string
for name, typ := range fields {
indexFields = append(indexFields, name+":"+typ)
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("CREATE INDEX `%s` %s ON %s (%s)", indexName, ignoreStr, datasetName, strings.Join(indexFields, ","))
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DropAnalyticsIndexOptions is the set of options available to the AnalyticsManager DropIndex operation.
type DropAnalyticsIndexOptions struct {
IgnoreIfNotExists bool
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropIndex drops an analytics index.
func (am *AnalyticsIndexManager) DropIndex(datasetName, indexName string, opts *DropAnalyticsIndexOptions) error {
if opts == nil {
opts = &DropAnalyticsIndexOptions{}
}
span := am.tracer.StartSpan("DropIndex", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfNotExists {
ignoreStr = "IF EXISTS"
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("DROP INDEX %s.%s %s", datasetName, indexName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// GetAllAnalyticsIndexesOptions is the set of options available to the AnalyticsManager GetAllIndexes operation.
type GetAllAnalyticsIndexesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllIndexes gets all analytics indexes.
func (am *AnalyticsIndexManager) GetAllIndexes(opts *GetAllAnalyticsIndexesOptions) ([]AnalyticsIndex, error) {
if opts == nil {
opts = &GetAllAnalyticsIndexesOptions{}
}
span := am.tracer.StartSpan("GetAllIndexes", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
q := "SELECT d.* FROM Metadata.`Index` d WHERE d.DataverseName <> \"Metadata\""
rows, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return nil, err
}
indexes := make([]AnalyticsIndex, len(rows))
for rowIdx, row := range rows {
var indexData jsonAnalyticsIndex
err := json.Unmarshal(row, &indexData)
if err != nil {
return nil, err
}
err = indexes[rowIdx].fromData(indexData)
if err != nil {
return nil, err
}
}
return indexes, nil
}
// ConnectAnalyticsLinkOptions is the set of options available to the AnalyticsManager ConnectLink operation.
type ConnectAnalyticsLinkOptions struct {
LinkName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// ConnectLink connects an analytics link.
func (am *AnalyticsIndexManager) ConnectLink(opts *ConnectAnalyticsLinkOptions) error {
if opts == nil {
opts = &ConnectAnalyticsLinkOptions{}
}
span := am.tracer.StartSpan("ConnectLink", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
if opts.LinkName == "" {
opts.LinkName = "Local"
}
q := fmt.Sprintf("CONNECT LINK %s", opts.LinkName)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DisconnectAnalyticsLinkOptions is the set of options available to the AnalyticsManager DisconnectLink operation.
type DisconnectAnalyticsLinkOptions struct {
LinkName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DisconnectLink disconnects an analytics link.
func (am *AnalyticsIndexManager) DisconnectLink(opts *DisconnectAnalyticsLinkOptions) error {
if opts == nil {
opts = &DisconnectAnalyticsLinkOptions{}
}
span := am.tracer.StartSpan("DisconnectLink", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
if opts.LinkName == "" {
opts.LinkName = "Local"
}
q := fmt.Sprintf("DISCONNECT LINK %s", opts.LinkName)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// GetPendingMutationsAnalyticsOptions is the set of options available to the user manager GetPendingMutations operation.
type GetPendingMutationsAnalyticsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetPendingMutations returns the number of pending mutations for all indexes in the form of dataverse.dataset:mutations.
func (am *AnalyticsIndexManager) GetPendingMutations(opts *GetPendingMutationsAnalyticsOptions) (map[string]uint64, error) {
if opts == nil {
opts = &GetPendingMutationsAnalyticsOptions{}
}
span := am.tracer.StartSpan("GetPendingMutations", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
timeout := opts.Timeout
if timeout == 0 {
timeout = am.globalTimeout
}
req := mgmtRequest{
Service: ServiceTypeAnalytics,
Method: "GET",
Path: "/analytics/node/agg/stats/remaining",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: timeout,
parentSpan: span.Context(),
}
resp, err := am.doMgmtRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, makeMgmtBadStatusError("failed to get pending mutations", &req, resp)
}
pending := make(map[string]uint64)
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&pending)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return pending, nil
}

View File

@ -0,0 +1,300 @@
package gocb
import (
"encoding/json"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type jsonAnalyticsMetrics struct {
ElapsedTime string `json:"elapsedTime"`
ExecutionTime string `json:"executionTime"`
ResultCount uint64 `json:"resultCount"`
ResultSize uint64 `json:"resultSize"`
MutationCount uint64 `json:"mutationCount,omitempty"`
SortCount uint64 `json:"sortCount,omitempty"`
ErrorCount uint64 `json:"errorCount,omitempty"`
WarningCount uint64 `json:"warningCount,omitempty"`
ProcessedObjects uint64 `json:"processedObjects,omitempty"`
}
type jsonAnalyticsWarning struct {
Code uint32 `json:"code"`
Message string `json:"msg"`
}
type jsonAnalyticsResponse struct {
RequestID string `json:"requestID"`
ClientContextID string `json:"clientContextID"`
Status string `json:"status"`
Warnings []jsonAnalyticsWarning `json:"warnings"`
Metrics jsonAnalyticsMetrics `json:"metrics"`
Signature interface{} `json:"signature"`
}
// AnalyticsMetrics encapsulates various metrics gathered during a queries execution.
type AnalyticsMetrics struct {
ElapsedTime time.Duration
ExecutionTime time.Duration
ResultCount uint64
ResultSize uint64
MutationCount uint64
SortCount uint64
ErrorCount uint64
WarningCount uint64
ProcessedObjects uint64
}
func (metrics *AnalyticsMetrics) fromData(data jsonAnalyticsMetrics) error {
elapsedTime, err := time.ParseDuration(data.ElapsedTime)
if err != nil {
logDebugf("Failed to parse query metrics elapsed time: %s", err)
}
executionTime, err := time.ParseDuration(data.ExecutionTime)
if err != nil {
logDebugf("Failed to parse query metrics execution time: %s", err)
}
metrics.ElapsedTime = elapsedTime
metrics.ExecutionTime = executionTime
metrics.ResultCount = data.ResultCount
metrics.ResultSize = data.ResultSize
metrics.MutationCount = data.MutationCount
metrics.SortCount = data.SortCount
metrics.ErrorCount = data.ErrorCount
metrics.WarningCount = data.WarningCount
metrics.ProcessedObjects = data.ProcessedObjects
return nil
}
// AnalyticsWarning encapsulates any warnings returned by a query.
type AnalyticsWarning struct {
Code uint32
Message string
}
func (warning *AnalyticsWarning) fromData(data jsonAnalyticsWarning) error {
warning.Code = data.Code
warning.Message = data.Message
return nil
}
// AnalyticsMetaData provides access to the meta-data properties of a query result.
type AnalyticsMetaData struct {
RequestID string
ClientContextID string
Metrics AnalyticsMetrics
Signature interface{}
Warnings []AnalyticsWarning
}
func (meta *AnalyticsMetaData) fromData(data jsonAnalyticsResponse) error {
metrics := AnalyticsMetrics{}
if err := metrics.fromData(data.Metrics); err != nil {
return err
}
warnings := make([]AnalyticsWarning, len(data.Warnings))
for wIdx, jsonWarning := range data.Warnings {
err := warnings[wIdx].fromData(jsonWarning)
if err != nil {
return err
}
}
meta.RequestID = data.RequestID
meta.ClientContextID = data.ClientContextID
meta.Metrics = metrics
meta.Signature = data.Signature
meta.Warnings = warnings
return nil
}
// AnalyticsResult allows access to the results of a query.
type AnalyticsResult struct {
reader analyticsRowReader
rowBytes []byte
}
func newAnalyticsResult(reader analyticsRowReader) *AnalyticsResult {
return &AnalyticsResult{
reader: reader,
}
}
type analyticsRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *AnalyticsResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.rowBytes = rowBytes
return true
}
// Row returns the value of the current row
func (r *AnalyticsResult) Row(valuePtr interface{}) error {
if r.rowBytes == nil {
return ErrNoResult
}
if bytesPtr, ok := valuePtr.(*json.RawMessage); ok {
*bytesPtr = r.rowBytes
return nil
}
return json.Unmarshal(r.rowBytes, valuePtr)
}
// Err returns any errors that have occurred on the stream
func (r *AnalyticsResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *AnalyticsResult) Close() error {
return r.reader.Close()
}
// One assigns the first value from the results into the value pointer.
// It will close the results but not before iterating through all remaining
// results, as such this should only be used for very small resultsets - ideally
// of, at most, length 1.
func (r *AnalyticsResult) One(valuePtr interface{}) error {
// Read the bytes from the first row
valueBytes := r.reader.NextRow()
if valueBytes == nil {
return ErrNoResult
}
// Skip through the remaining rows
for r.reader.NextRow() != nil {
// do nothing with the row
}
return json.Unmarshal(valueBytes, valuePtr)
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *AnalyticsResult) MetaData() (*AnalyticsMetaData, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return nil, err
}
var jsonResp jsonAnalyticsResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return nil, err
}
var metaData AnalyticsMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
// AnalyticsQuery executes the analytics query statement on the server.
func (c *Cluster) AnalyticsQuery(statement string, opts *AnalyticsOptions) (*AnalyticsResult, error) {
if opts == nil {
opts = &AnalyticsOptions{}
}
span := c.tracer.StartSpan("Query", opts.parentSpan).
SetTag("couchbase.service", "analytics")
defer span.Finish()
timeout := opts.Timeout
if opts.Timeout == 0 {
timeout = c.timeoutsConfig.AnalyticsTimeout
}
deadline := time.Now().Add(timeout)
retryStrategy := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy)
}
queryOpts, err := opts.toMap()
if err != nil {
return nil, AnalyticsError{
InnerError: wrapError(err, "failed to generate query options"),
Statement: statement,
ClientContextID: opts.ClientContextID,
}
}
var priorityInt int32
if opts.Priority {
priorityInt = -1
}
queryOpts["statement"] = statement
return c.execAnalyticsQuery(span, queryOpts, priorityInt, deadline, retryStrategy)
}
func maybeGetAnalyticsOption(options map[string]interface{}, name string) string {
if value, ok := options[name].(string); ok {
return value
}
return ""
}
func (c *Cluster) execAnalyticsQuery(
span requestSpan,
options map[string]interface{},
priority int32,
deadline time.Time,
retryStrategy *retryStrategyWrapper,
) (*AnalyticsResult, error) {
provider, err := c.getAnalyticsProvider()
if err != nil {
return nil, AnalyticsError{
InnerError: wrapError(err, "failed to get query provider"),
Statement: maybeGetAnalyticsOption(options, "statement"),
ClientContextID: maybeGetAnalyticsOption(options, "client_context_id"),
}
}
reqBytes, err := json.Marshal(options)
if err != nil {
return nil, AnalyticsError{
InnerError: wrapError(err, "failed to marshall query body"),
Statement: maybeGetAnalyticsOption(options, "statement"),
ClientContextID: maybeGetAnalyticsOption(options, "client_context_id"),
}
}
res, err := provider.AnalyticsQuery(gocbcore.AnalyticsQueryOptions{
Payload: reqBytes,
Priority: int(priority),
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
if err != nil {
return nil, maybeEnhanceAnalyticsError(err)
}
return newAnalyticsResult(res), nil
}

View File

@ -0,0 +1,600 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
)
// BucketType specifies the kind of bucket.
type BucketType string
const (
// CouchbaseBucketType indicates a Couchbase bucket type.
CouchbaseBucketType BucketType = "membase"
// MemcachedBucketType indicates a Memcached bucket type.
MemcachedBucketType BucketType = "memcached"
// EphemeralBucketType indicates an Ephemeral bucket type.
EphemeralBucketType BucketType = "ephemeral"
)
// ConflictResolutionType specifies the kind of conflict resolution to use for a bucket.
type ConflictResolutionType string
const (
// ConflictResolutionTypeTimestamp specifies to use timestamp conflict resolution on the bucket.
ConflictResolutionTypeTimestamp ConflictResolutionType = "lww"
// ConflictResolutionTypeSequenceNumber specifies to use sequence number conflict resolution on the bucket.
ConflictResolutionTypeSequenceNumber ConflictResolutionType = "seqno"
)
// EvictionPolicyType specifies the kind of eviction policy to use for a bucket.
type EvictionPolicyType string
const (
// EvictionPolicyTypeFull specifies to use full eviction for a couchbase bucket.
EvictionPolicyTypeFull EvictionPolicyType = "fullEviction"
// EvictionPolicyTypeValueOnly specifies to use value only eviction for a couchbase bucket.
EvictionPolicyTypeValueOnly EvictionPolicyType = "valueOnly"
// EvictionPolicyTypeNotRecentlyUsed specifies to use not recently used (nru) eviction for an ephemeral bucket.
// UNCOMMITTED: This API may change in the future.
EvictionPolicyTypeNotRecentlyUsed EvictionPolicyType = "nruEviction"
// EvictionPolicyTypeNRU specifies to use no eviction for an ephemeral bucket.
// UNCOMMITTED: This API may change in the future.
EvictionPolicyTypeNoEviction EvictionPolicyType = "noEviction"
)
// CompressionMode specifies the kind of compression to use for a bucket.
type CompressionMode string
const (
// CompressionModeOff specifies to use no compression for a bucket.
CompressionModeOff CompressionMode = "off"
// CompressionModePassive specifies to use passive compression for a bucket.
CompressionModePassive CompressionMode = "passive"
// CompressionModeActive specifies to use active compression for a bucket.
CompressionModeActive CompressionMode = "active"
)
type jsonBucketSettings struct {
Name string `json:"name"`
Controllers struct {
Flush string `json:"flush"`
} `json:"controllers"`
ReplicaIndex bool `json:"replicaIndex"`
Quota struct {
RAM uint64 `json:"ram"`
RawRAM uint64 `json:"rawRAM"`
} `json:"quota"`
ReplicaNumber uint32 `json:"replicaNumber"`
BucketType string `json:"bucketType"`
ConflictResolutionType string `json:"conflictResolutionType"`
EvictionPolicy string `json:"evictionPolicy"`
MaxTTL uint32 `json:"maxTTL"`
CompressionMode string `json:"compressionMode"`
}
// BucketSettings holds information about the settings for a bucket.
type BucketSettings struct {
Name string
FlushEnabled bool
ReplicaIndexDisabled bool // inverted so that zero value matches server default.
RAMQuotaMB uint64
NumReplicas uint32 // NOTE: If not set this will set 0 replicas.
BucketType BucketType // Defaults to CouchbaseBucketType.
EvictionPolicy EvictionPolicyType
MaxTTL time.Duration
CompressionMode CompressionMode
}
func (bs *BucketSettings) fromData(data jsonBucketSettings) error {
bs.Name = data.Name
bs.FlushEnabled = data.Controllers.Flush != ""
bs.ReplicaIndexDisabled = !data.ReplicaIndex
bs.RAMQuotaMB = data.Quota.RawRAM / 1024 / 1024
bs.NumReplicas = data.ReplicaNumber
bs.EvictionPolicy = EvictionPolicyType(data.EvictionPolicy)
bs.MaxTTL = time.Duration(data.MaxTTL) * time.Second
bs.CompressionMode = CompressionMode(data.CompressionMode)
switch data.BucketType {
case "membase":
bs.BucketType = CouchbaseBucketType
case "memcached":
bs.BucketType = MemcachedBucketType
case "ephemeral":
bs.BucketType = EphemeralBucketType
default:
return errors.New("unrecognized bucket type string")
}
return nil
}
type bucketMgrErrorResp struct {
Errors map[string]string `json:"errors"`
}
func (bm *BucketManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read bucket manager response body: %s", err)
return nil
}
if resp.StatusCode == 404 {
// If it was a 404 then there's no chance of the response body containing any structure
if strings.Contains(strings.ToLower(string(b)), "resource not found") {
return makeGenericMgmtError(ErrBucketNotFound, req, resp)
}
return makeGenericMgmtError(errors.New(string(b)), req, resp)
}
var mgrErr bucketMgrErrorResp
err = json.Unmarshal(b, &mgrErr)
if err != nil {
logDebugf("Failed to unmarshal error body: %s", err)
return makeGenericMgmtError(errors.New(string(b)), req, resp)
}
var bodyErr error
var firstErr string
for _, err := range mgrErr.Errors {
firstErr = strings.ToLower(err)
break
}
if strings.Contains(firstErr, "bucket with given name already exists") {
bodyErr = ErrBucketExists
} else {
bodyErr = errors.New(firstErr)
}
return makeGenericMgmtError(bodyErr, req, resp)
}
// Flush doesn't use the same body format as anything else...
func (bm *BucketManager) tryParseFlushErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read bucket manager response body: %s", err)
return makeMgmtBadStatusError("failed to flush bucket", req, resp)
}
var bodyErrMsgs map[string]string
err = json.Unmarshal(b, &bodyErrMsgs)
if err != nil {
return errors.New(string(b))
}
if errMsg, ok := bodyErrMsgs["_"]; ok {
if strings.Contains(strings.ToLower(errMsg), "flush is disabled") {
return ErrBucketNotFlushable
}
}
return errors.New(string(b))
}
// BucketManager provides methods for performing bucket management operations.
// See BucketManager for methods that allow creating and removing buckets themselves.
type BucketManager struct {
provider mgmtProvider
tracer requestTracer
}
// GetBucketOptions is the set of options available to the bucket manager GetBucket operation.
type GetBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetBucket returns settings for a bucket on the cluster.
func (bm *BucketManager) GetBucket(bucketName string, opts *GetBucketOptions) (*BucketSettings, error) {
if opts == nil {
opts = &GetBucketOptions{}
}
span := bm.tracer.StartSpan("GetBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
return bm.get(span.Context(), bucketName, opts.RetryStrategy, opts.Timeout)
}
func (bm *BucketManager) get(tracectx requestSpanContext, bucketName string,
strategy RetryStrategy, timeout time.Duration) (*BucketSettings, error) {
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s", bucketName),
Method: "GET",
IsIdempotent: true,
RetryStrategy: strategy,
UniqueID: uuid.New().String(),
Timeout: timeout,
parentSpan: tracectx,
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return nil, bktErr
}
return nil, makeMgmtBadStatusError("failed to get bucket", &req, resp)
}
var bucketData jsonBucketSettings
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&bucketData)
if err != nil {
return nil, err
}
var settings BucketSettings
err = settings.fromData(bucketData)
if err != nil {
return nil, err
}
return &settings, nil
}
// GetAllBucketsOptions is the set of options available to the bucket manager GetAll operation.
type GetAllBucketsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllBuckets returns a list of all active buckets on the cluster.
func (bm *BucketManager) GetAllBuckets(opts *GetAllBucketsOptions) (map[string]BucketSettings, error) {
if opts == nil {
opts = &GetAllBucketsOptions{}
}
span := bm.tracer.StartSpan("GetAllBuckets", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: "/pools/default/buckets",
Method: "GET",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return nil, bktErr
}
return nil, makeMgmtBadStatusError("failed to get all buckets", &req, resp)
}
var bucketsData []*jsonBucketSettings
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&bucketsData)
if err != nil {
return nil, err
}
buckets := make(map[string]BucketSettings, len(bucketsData))
for _, bucketData := range bucketsData {
var bucket BucketSettings
err := bucket.fromData(*bucketData)
if err != nil {
return nil, err
}
buckets[bucket.Name] = bucket
}
return buckets, nil
}
// CreateBucketSettings are the settings available when creating a bucket.
type CreateBucketSettings struct {
BucketSettings
ConflictResolutionType ConflictResolutionType
}
// CreateBucketOptions is the set of options available to the bucket manager CreateBucket operation.
type CreateBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateBucket creates a bucket on the cluster.
func (bm *BucketManager) CreateBucket(settings CreateBucketSettings, opts *CreateBucketOptions) error {
if opts == nil {
opts = &CreateBucketOptions{}
}
span := bm.tracer.StartSpan("CreateBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts, err := bm.settingsToPostData(&settings.BucketSettings)
if err != nil {
return err
}
if settings.ConflictResolutionType != "" {
posts.Add("conflictResolutionType", string(settings.ConflictResolutionType))
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: "/pools/default/buckets",
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 202 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return bktErr
}
return makeMgmtBadStatusError("failed to create bucket", &req, resp)
}
return nil
}
// UpdateBucketOptions is the set of options available to the bucket manager UpdateBucket operation.
type UpdateBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpdateBucket updates a bucket on the cluster.
func (bm *BucketManager) UpdateBucket(settings BucketSettings, opts *UpdateBucketOptions) error {
if opts == nil {
opts = &UpdateBucketOptions{}
}
span := bm.tracer.StartSpan("UpdateBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts, err := bm.settingsToPostData(&settings)
if err != nil {
return err
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s", settings.Name),
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return bktErr
}
return makeMgmtBadStatusError("failed to update bucket", &req, resp)
}
return nil
}
// DropBucketOptions is the set of options available to the bucket manager DropBucket operation.
type DropBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropBucket will delete a bucket from the cluster by name.
func (bm *BucketManager) DropBucket(name string, opts *DropBucketOptions) error {
if opts == nil {
opts = &DropBucketOptions{}
}
span := bm.tracer.StartSpan("DropBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s", name),
Method: "DELETE",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return bktErr
}
return makeMgmtBadStatusError("failed to drop bucket", &req, resp)
}
return nil
}
// FlushBucketOptions is the set of options available to the bucket manager FlushBucket operation.
type FlushBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// FlushBucket will delete all the of the data from a bucket.
// Keep in mind that you must have flushing enabled in the buckets configuration.
func (bm *BucketManager) FlushBucket(name string, opts *FlushBucketOptions) error {
if opts == nil {
opts = &FlushBucketOptions{}
}
span := bm.tracer.StartSpan("FlushBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/controller/doFlush", name),
Method: "POST",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
return bm.tryParseFlushErrorMessage(&req, resp)
}
return nil
}
func (bm *BucketManager) settingsToPostData(settings *BucketSettings) (url.Values, error) {
posts := url.Values{}
if settings.Name == "" {
return nil, makeInvalidArgumentsError("Name invalid, must be set.")
}
if settings.RAMQuotaMB < 100 {
return nil, makeInvalidArgumentsError("Memory quota invalid, must be greater than 100MB")
}
if settings.MaxTTL > 0 && settings.BucketType == MemcachedBucketType {
return nil, makeInvalidArgumentsError("maxTTL is not supported for memcached buckets")
}
posts.Add("name", settings.Name)
// posts.Add("saslPassword", settings.Password)
if settings.FlushEnabled {
posts.Add("flushEnabled", "1")
} else {
posts.Add("flushEnabled", "0")
}
// replicaIndex can't be set at all on ephemeral buckets.
if settings.BucketType != EphemeralBucketType {
if settings.ReplicaIndexDisabled {
posts.Add("replicaIndex", "0")
} else {
posts.Add("replicaIndex", "1")
}
}
switch settings.BucketType {
case CouchbaseBucketType:
posts.Add("bucketType", string(settings.BucketType))
posts.Add("replicaNumber", fmt.Sprintf("%d", settings.NumReplicas))
case MemcachedBucketType:
posts.Add("bucketType", string(settings.BucketType))
if settings.NumReplicas > 0 {
return nil, makeInvalidArgumentsError("replicas cannot be used with memcached buckets")
}
case EphemeralBucketType:
posts.Add("bucketType", string(settings.BucketType))
posts.Add("replicaNumber", fmt.Sprintf("%d", settings.NumReplicas))
default:
return nil, makeInvalidArgumentsError("Unrecognized bucket type")
}
posts.Add("ramQuotaMB", fmt.Sprintf("%d", settings.RAMQuotaMB))
if settings.EvictionPolicy != "" {
switch settings.BucketType {
case MemcachedBucketType:
return nil, makeInvalidArgumentsError("eviction policy is not valid for memcached buckets")
case CouchbaseBucketType:
if settings.EvictionPolicy == EvictionPolicyTypeNoEviction || settings.EvictionPolicy == EvictionPolicyTypeNotRecentlyUsed {
return nil, makeInvalidArgumentsError("eviction policy is not valid for couchbase buckets")
}
case EphemeralBucketType:
if settings.EvictionPolicy == EvictionPolicyTypeFull || settings.EvictionPolicy == EvictionPolicyTypeValueOnly {
return nil, makeInvalidArgumentsError("eviction policy is not valid for ephemeral buckets")
}
}
posts.Add("evictionPolicy", string(settings.EvictionPolicy))
}
if settings.MaxTTL > 0 {
posts.Add("maxTTL", fmt.Sprintf("%d", settings.MaxTTL/time.Second))
}
if settings.CompressionMode != "" {
posts.Add("compressionMode", string(settings.CompressionMode))
}
return posts, nil
}

128
vendor/github.com/couchbase/gocb/v2/cluster_diag.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
package gocb
import (
"encoding/json"
"time"
"github.com/couchbase/gocbcore/v9"
"github.com/google/uuid"
)
// EndPointDiagnostics represents a single entry in a diagnostics report.
type EndPointDiagnostics struct {
Type ServiceType
ID string
Local string
Remote string
LastActivity time.Time
State EndpointState
Namespace string
}
// DiagnosticsResult encapsulates the results of a Diagnostics operation.
type DiagnosticsResult struct {
ID string
Services map[string][]EndPointDiagnostics
sdk string
State ClusterState
}
type jsonDiagnosticEntry struct {
ID string `json:"id,omitempty"`
LastActivityUs uint64 `json:"last_activity_us,omitempty"`
Remote string `json:"remote,omitempty"`
Local string `json:"local,omitempty"`
State string `json:"state,omitempty"`
Details string `json:"details,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
type jsonDiagnosticReport struct {
Version int16 `json:"version"`
SDK string `json:"sdk,omitempty"`
ID string `json:"id,omitempty"`
Services map[string][]jsonDiagnosticEntry `json:"services"`
State string `json:"state"`
}
// MarshalJSON generates a JSON representation of this diagnostics report.
func (report *DiagnosticsResult) MarshalJSON() ([]byte, error) {
jsonReport := jsonDiagnosticReport{
Version: 2,
SDK: report.sdk,
ID: report.ID,
Services: make(map[string][]jsonDiagnosticEntry),
State: clusterStateToString(report.State),
}
for _, serviceType := range report.Services {
for _, service := range serviceType {
serviceStr := serviceTypeToString(service.Type)
stateStr := endpointStateToString(service.State)
jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonDiagnosticEntry{
ID: service.ID,
LastActivityUs: uint64(time.Since(service.LastActivity).Nanoseconds()),
Remote: service.Remote,
Local: service.Local,
State: stateStr,
Details: "",
Namespace: service.Namespace,
})
}
}
return json.Marshal(&jsonReport)
}
// DiagnosticsOptions are the options that are available for use with the Diagnostics operation.
type DiagnosticsOptions struct {
ReportID string
}
// Diagnostics returns information about the internal state of the SDK.
func (c *Cluster) Diagnostics(opts *DiagnosticsOptions) (*DiagnosticsResult, error) {
if opts == nil {
opts = &DiagnosticsOptions{}
}
if opts.ReportID == "" {
opts.ReportID = uuid.New().String()
}
provider, err := c.getDiagnosticsProvider()
if err != nil {
return nil, err
}
agentReport, err := provider.Diagnostics(gocbcore.DiagnosticsOptions{})
if err != nil {
return nil, err
}
report := &DiagnosticsResult{
ID: opts.ReportID,
Services: make(map[string][]EndPointDiagnostics),
sdk: Identifier(),
State: ClusterState(agentReport.State),
}
report.Services["kv"] = make([]EndPointDiagnostics, 0)
for _, conn := range agentReport.MemdConns {
state := EndpointState(conn.State)
report.Services["kv"] = append(report.Services["kv"], EndPointDiagnostics{
Type: ServiceTypeKeyValue,
State: state,
Local: conn.LocalAddr,
Remote: conn.RemoteAddr,
LastActivity: conn.LastActivity,
Namespace: conn.Scope,
ID: conn.ID,
})
}
return report, nil
}

92
vendor/github.com/couchbase/gocb/v2/cluster_ping.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
"github.com/google/uuid"
)
// Ping will ping a list of services and verify they are active and
// responding in an acceptable period of time.
func (c *Cluster) Ping(opts *PingOptions) (*PingResult, error) {
if opts == nil {
opts = &PingOptions{}
}
provider, err := c.getDiagnosticsProvider()
if err != nil {
return nil, err
}
return ping(provider, opts, c.timeoutsConfig)
}
func ping(provider diagnosticsProvider, opts *PingOptions, timeouts TimeoutsConfig) (*PingResult, error) {
services := opts.ServiceTypes
gocbcoreServices := make([]gocbcore.ServiceType, len(services))
for i, svc := range services {
gocbcoreServices[i] = gocbcore.ServiceType(svc)
}
coreopts := gocbcore.PingOptions{
ServiceTypes: gocbcoreServices,
}
now := time.Now()
timeout := opts.Timeout
if timeout == 0 {
coreopts.KVDeadline = now.Add(timeouts.KVTimeout)
coreopts.CapiDeadline = now.Add(timeouts.ViewTimeout)
coreopts.N1QLDeadline = now.Add(timeouts.QueryTimeout)
coreopts.CbasDeadline = now.Add(timeouts.AnalyticsTimeout)
coreopts.FtsDeadline = now.Add(timeouts.SearchTimeout)
coreopts.MgmtDeadline = now.Add(timeouts.ManagementTimeout)
} else {
coreopts.KVDeadline = now.Add(timeout)
coreopts.CapiDeadline = now.Add(timeout)
coreopts.N1QLDeadline = now.Add(timeout)
coreopts.CbasDeadline = now.Add(timeout)
coreopts.FtsDeadline = now.Add(timeout)
coreopts.MgmtDeadline = now.Add(timeout)
}
id := opts.ReportID
if id == "" {
id = uuid.New().String()
}
result, err := provider.Ping(coreopts)
if err != nil {
return nil, err
}
reportSvcs := make(map[ServiceType][]EndpointPingReport)
for svcType, svc := range result.Services {
st := ServiceType(svcType)
svcs := make([]EndpointPingReport, len(svc))
for i, rep := range svc {
var errStr string
if rep.Error != nil {
errStr = rep.Error.Error()
}
svcs[i] = EndpointPingReport{
ID: rep.ID,
Remote: rep.Endpoint,
State: PingState(rep.State),
Error: errStr,
Namespace: rep.Scope,
Latency: rep.Latency,
}
}
reportSvcs[st] = svcs
}
return &PingResult{
ID: id,
sdk: Identifier() + " " + "gocbcore/" + gocbcore.Version(),
Services: reportSvcs,
}, nil
}

314
vendor/github.com/couchbase/gocb/v2/cluster_query.go generated vendored Normal file
View File

@ -0,0 +1,314 @@
package gocb
import (
"encoding/json"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type jsonQueryMetrics struct {
ElapsedTime string `json:"elapsedTime"`
ExecutionTime string `json:"executionTime"`
ResultCount uint64 `json:"resultCount"`
ResultSize uint64 `json:"resultSize"`
MutationCount uint64 `json:"mutationCount,omitempty"`
SortCount uint64 `json:"sortCount,omitempty"`
ErrorCount uint64 `json:"errorCount,omitempty"`
WarningCount uint64 `json:"warningCount,omitempty"`
}
type jsonQueryWarning struct {
Code uint32 `json:"code"`
Message string `json:"msg"`
}
type jsonQueryResponse struct {
RequestID string `json:"requestID"`
ClientContextID string `json:"clientContextID"`
Status QueryStatus `json:"status"`
Warnings []jsonQueryWarning `json:"warnings"`
Metrics jsonQueryMetrics `json:"metrics"`
Profile interface{} `json:"profile"`
Signature interface{} `json:"signature"`
Prepared string `json:"prepared"`
}
// QueryMetrics encapsulates various metrics gathered during a queries execution.
type QueryMetrics struct {
ElapsedTime time.Duration
ExecutionTime time.Duration
ResultCount uint64
ResultSize uint64
MutationCount uint64
SortCount uint64
ErrorCount uint64
WarningCount uint64
}
func (metrics *QueryMetrics) fromData(data jsonQueryMetrics) error {
elapsedTime, err := time.ParseDuration(data.ElapsedTime)
if err != nil {
logDebugf("Failed to parse query metrics elapsed time: %s", err)
}
executionTime, err := time.ParseDuration(data.ExecutionTime)
if err != nil {
logDebugf("Failed to parse query metrics execution time: %s", err)
}
metrics.ElapsedTime = elapsedTime
metrics.ExecutionTime = executionTime
metrics.ResultCount = data.ResultCount
metrics.ResultSize = data.ResultSize
metrics.MutationCount = data.MutationCount
metrics.SortCount = data.SortCount
metrics.ErrorCount = data.ErrorCount
metrics.WarningCount = data.WarningCount
return nil
}
// QueryWarning encapsulates any warnings returned by a query.
type QueryWarning struct {
Code uint32
Message string
}
func (warning *QueryWarning) fromData(data jsonQueryWarning) error {
warning.Code = data.Code
warning.Message = data.Message
return nil
}
// QueryMetaData provides access to the meta-data properties of a query result.
type QueryMetaData struct {
RequestID string
ClientContextID string
Status QueryStatus
Metrics QueryMetrics
Signature interface{}
Warnings []QueryWarning
Profile interface{}
preparedName string
}
func (meta *QueryMetaData) fromData(data jsonQueryResponse) error {
metrics := QueryMetrics{}
if err := metrics.fromData(data.Metrics); err != nil {
return err
}
warnings := make([]QueryWarning, len(data.Warnings))
for wIdx, jsonWarning := range data.Warnings {
err := warnings[wIdx].fromData(jsonWarning)
if err != nil {
return err
}
}
meta.RequestID = data.RequestID
meta.ClientContextID = data.ClientContextID
meta.Status = data.Status
meta.Metrics = metrics
meta.Signature = data.Signature
meta.Warnings = warnings
meta.Profile = data.Profile
meta.preparedName = data.Prepared
return nil
}
// QueryResult allows access to the results of a query.
type QueryResult struct {
reader queryRowReader
rowBytes []byte
}
func newQueryResult(reader queryRowReader) *QueryResult {
return &QueryResult{
reader: reader,
}
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *QueryResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.rowBytes = rowBytes
return true
}
// Row returns the contents of the current row
func (r *QueryResult) Row(valuePtr interface{}) error {
if r.rowBytes == nil {
return ErrNoResult
}
if bytesPtr, ok := valuePtr.(*json.RawMessage); ok {
*bytesPtr = r.rowBytes
return nil
}
return json.Unmarshal(r.rowBytes, valuePtr)
}
// Err returns any errors that have occurred on the stream
func (r *QueryResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *QueryResult) Close() error {
return r.reader.Close()
}
// One assigns the first value from the results into the value pointer.
// It will close the results but not before iterating through all remaining
// results, as such this should only be used for very small resultsets - ideally
// of, at most, length 1.
func (r *QueryResult) One(valuePtr interface{}) error {
// Read the bytes from the first row
valueBytes := r.reader.NextRow()
if valueBytes == nil {
return ErrNoResult
}
// Skip through the remaining rows
for r.reader.NextRow() != nil {
// do nothing with the row
}
return json.Unmarshal(valueBytes, valuePtr)
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *QueryResult) MetaData() (*QueryMetaData, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return nil, err
}
var jsonResp jsonQueryResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return nil, err
}
var metaData QueryMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
type queryRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
PreparedName() (string, error)
}
// Query executes the query statement on the server.
func (c *Cluster) Query(statement string, opts *QueryOptions) (*QueryResult, error) {
if opts == nil {
opts = &QueryOptions{}
}
span := c.tracer.StartSpan("Query", opts.parentSpan).
SetTag("couchbase.service", "query")
defer span.Finish()
timeout := opts.Timeout
if timeout == 0 {
timeout = c.timeoutsConfig.QueryTimeout
}
deadline := time.Now().Add(timeout)
retryStrategy := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy)
}
queryOpts, err := opts.toMap()
if err != nil {
return nil, QueryError{
InnerError: wrapError(err, "failed to generate query options"),
Statement: statement,
ClientContextID: opts.ClientContextID,
}
}
queryOpts["statement"] = statement
return c.execN1qlQuery(span, queryOpts, deadline, retryStrategy, opts.Adhoc)
}
func maybeGetQueryOption(options map[string]interface{}, name string) string {
if value, ok := options[name].(string); ok {
return value
}
return ""
}
func (c *Cluster) execN1qlQuery(
span requestSpan,
options map[string]interface{},
deadline time.Time,
retryStrategy *retryStrategyWrapper,
adHoc bool,
) (*QueryResult, error) {
provider, err := c.getQueryProvider()
if err != nil {
return nil, QueryError{
InnerError: wrapError(err, "failed to get query provider"),
Statement: maybeGetQueryOption(options, "statement"),
ClientContextID: maybeGetQueryOption(options, "client_context_id"),
}
}
eSpan := c.tracer.StartSpan("request_encoding", span.Context())
reqBytes, err := json.Marshal(options)
eSpan.Finish()
if err != nil {
return nil, QueryError{
InnerError: wrapError(err, "failed to marshall query body"),
Statement: maybeGetQueryOption(options, "statement"),
ClientContextID: maybeGetQueryOption(options, "client_context_id"),
}
}
var res queryRowReader
var qErr error
if adHoc {
res, qErr = provider.N1QLQuery(gocbcore.N1QLQueryOptions{
Payload: reqBytes,
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
} else {
res, qErr = provider.PreparedN1QLQuery(gocbcore.N1QLQueryOptions{
Payload: reqBytes,
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
}
if qErr != nil {
return nil, maybeEnhanceQueryError(qErr)
}
return newQueryResult(res), nil
}

View File

@ -0,0 +1,558 @@
package gocb
import (
"encoding/json"
"errors"
"regexp"
"strings"
"time"
)
// QueryIndexManager provides methods for performing Couchbase query index management.
type QueryIndexManager struct {
provider queryIndexQueryProvider
globalTimeout time.Duration
tracer requestTracer
}
type queryIndexQueryProvider interface {
Query(statement string, opts *QueryOptions) (*QueryResult, error)
}
func (qm *QueryIndexManager) tryParseErrorMessage(err error) error {
var qErr *QueryError
if !errors.As(err, &qErr) {
return err
}
if len(qErr.Errors) == 0 {
return err
}
firstErr := qErr.Errors[0]
var innerErr error
// The server doesn't return meaningful error codes when it comes to index management so we need to go spelunking.
msg := strings.ToLower(firstErr.Message)
if match, err := regexp.MatchString(".*?ndex .*? not found.*", msg); err == nil && match {
innerErr = ErrIndexNotFound
} else if match, err := regexp.MatchString(".*?ndex .*? already exists.*", msg); err == nil && match {
innerErr = ErrIndexExists
}
if innerErr == nil {
return err
}
return QueryError{
InnerError: innerErr,
Statement: qErr.Statement,
ClientContextID: qErr.ClientContextID,
Errors: qErr.Errors,
Endpoint: qErr.Endpoint,
RetryReasons: qErr.RetryReasons,
RetryAttempts: qErr.RetryAttempts,
}
}
func (qm *QueryIndexManager) doQuery(q string, opts *QueryOptions) ([][]byte, error) {
if opts.Timeout == 0 {
opts.Timeout = qm.globalTimeout
}
result, err := qm.provider.Query(q, opts)
if err != nil {
return nil, qm.tryParseErrorMessage(err)
}
var rows [][]byte
for result.Next() {
var row json.RawMessage
err := result.Row(&row)
if err != nil {
logWarnf("management operation failed to read row: %s", err)
} else {
rows = append(rows, row)
}
}
err = result.Err()
if err != nil {
return nil, qm.tryParseErrorMessage(err)
}
return rows, nil
}
type jsonQueryIndex struct {
Name string `json:"name"`
IsPrimary bool `json:"is_primary"`
Type QueryIndexType `json:"using"`
State string `json:"state"`
Keyspace string `json:"keyspace_id"`
Namespace string `json:"namespace_id"`
IndexKey []string `json:"index_key"`
Condition string `json:"condition"`
}
// QueryIndex represents a Couchbase GSI index.
type QueryIndex struct {
Name string
IsPrimary bool
Type QueryIndexType
State string
Keyspace string
Namespace string
IndexKey []string
Condition string
}
func (index *QueryIndex) fromData(data jsonQueryIndex) error {
index.Name = data.Name
index.IsPrimary = data.IsPrimary
index.Type = data.Type
index.State = data.State
index.Keyspace = data.Keyspace
index.Namespace = data.Namespace
index.IndexKey = data.IndexKey
index.Condition = data.Condition
return nil
}
type createQueryIndexOptions struct {
IgnoreIfExists bool
Deferred bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
func (qm *QueryIndexManager) createIndex(
tracectx requestSpanContext,
bucketName, indexName string,
fields []string,
opts createQueryIndexOptions,
) error {
var qs string
if len(fields) == 0 {
qs += "CREATE PRIMARY INDEX"
} else {
qs += "CREATE INDEX"
}
if indexName != "" {
qs += " `" + indexName + "`"
}
qs += " ON `" + bucketName + "`"
if len(fields) > 0 {
qs += " ("
for i := 0; i < len(fields); i++ {
if i > 0 {
qs += ", "
}
qs += "`" + fields[i] + "`"
}
qs += ")"
}
if opts.Deferred {
qs += " WITH {\"defer_build\": true}"
}
_, err := qm.doQuery(qs, &QueryOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
})
if err == nil {
return nil
}
if opts.IgnoreIfExists && errors.Is(err, ErrIndexExists) {
return nil
}
return err
}
// CreateQueryIndexOptions is the set of options available to the query indexes CreateIndex operation.
type CreateQueryIndexOptions struct {
IgnoreIfExists bool
Deferred bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateIndex creates an index over the specified fields.
func (qm *QueryIndexManager) CreateIndex(bucketName, indexName string, fields []string, opts *CreateQueryIndexOptions) error {
if opts == nil {
opts = &CreateQueryIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{
message: "an invalid index name was specified",
}
}
if len(fields) <= 0 {
return invalidArgumentsError{
message: "you must specify at least one field to index",
}
}
span := qm.tracer.StartSpan("CreateIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.createIndex(span.Context(), bucketName, indexName, fields, createQueryIndexOptions{
IgnoreIfExists: opts.IgnoreIfExists,
Deferred: opts.Deferred,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
// CreatePrimaryQueryIndexOptions is the set of options available to the query indexes CreatePrimaryIndex operation.
type CreatePrimaryQueryIndexOptions struct {
IgnoreIfExists bool
Deferred bool
CustomName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreatePrimaryIndex creates a primary index. An empty customName uses the default naming.
func (qm *QueryIndexManager) CreatePrimaryIndex(bucketName string, opts *CreatePrimaryQueryIndexOptions) error {
if opts == nil {
opts = &CreatePrimaryQueryIndexOptions{}
}
span := qm.tracer.StartSpan("CreatePrimaryIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.createIndex(
span.Context(),
bucketName,
opts.CustomName,
nil,
createQueryIndexOptions{
IgnoreIfExists: opts.IgnoreIfExists,
Deferred: opts.Deferred,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
type dropQueryIndexOptions struct {
IgnoreIfNotExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
func (qm *QueryIndexManager) dropIndex(
tracectx requestSpanContext,
bucketName, indexName string,
opts dropQueryIndexOptions,
) error {
var qs string
if indexName == "" {
qs += "DROP PRIMARY INDEX ON `" + bucketName + "`"
} else {
qs += "DROP INDEX `" + bucketName + "`.`" + indexName + "`"
}
_, err := qm.doQuery(qs, &QueryOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
})
if err == nil {
return nil
}
if opts.IgnoreIfNotExists && errors.Is(err, ErrIndexNotFound) {
return nil
}
return err
}
// DropQueryIndexOptions is the set of options available to the query indexes DropIndex operation.
type DropQueryIndexOptions struct {
IgnoreIfNotExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropIndex drops a specific index by name.
func (qm *QueryIndexManager) DropIndex(bucketName, indexName string, opts *DropQueryIndexOptions) error {
if opts == nil {
opts = &DropQueryIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{
message: "an invalid index name was specified",
}
}
span := qm.tracer.StartSpan("DropIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.dropIndex(
span.Context(),
bucketName,
indexName,
dropQueryIndexOptions{
IgnoreIfNotExists: opts.IgnoreIfNotExists,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
// DropPrimaryQueryIndexOptions is the set of options available to the query indexes DropPrimaryIndex operation.
type DropPrimaryQueryIndexOptions struct {
IgnoreIfNotExists bool
CustomName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropPrimaryIndex drops the primary index. Pass an empty customName for unnamed primary indexes.
func (qm *QueryIndexManager) DropPrimaryIndex(bucketName string, opts *DropPrimaryQueryIndexOptions) error {
if opts == nil {
opts = &DropPrimaryQueryIndexOptions{}
}
span := qm.tracer.StartSpan("DropPrimaryIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.dropIndex(
span.Context(),
bucketName,
opts.CustomName,
dropQueryIndexOptions{
IgnoreIfNotExists: opts.IgnoreIfNotExists,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
// GetAllQueryIndexesOptions is the set of options available to the query indexes GetAllIndexes operation.
type GetAllQueryIndexesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllIndexes returns a list of all currently registered indexes.
func (qm *QueryIndexManager) GetAllIndexes(bucketName string, opts *GetAllQueryIndexesOptions) ([]QueryIndex, error) {
if opts == nil {
opts = &GetAllQueryIndexesOptions{}
}
span := qm.tracer.StartSpan("GetAllIndexes", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.getAllIndexes(span.Context(), bucketName, opts)
}
func (qm *QueryIndexManager) getAllIndexes(
tracectx requestSpanContext,
bucketName string,
opts *GetAllQueryIndexesOptions,
) ([]QueryIndex, error) {
q := "SELECT `indexes`.* FROM system:indexes WHERE keyspace_id=? AND `using`=\"gsi\""
rows, err := qm.doQuery(q, &QueryOptions{
PositionalParameters: []interface{}{bucketName},
Readonly: true,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
})
if err != nil {
return nil, err
}
var indexes []QueryIndex
for _, row := range rows {
var jsonIdx jsonQueryIndex
err := json.Unmarshal(row, &jsonIdx)
if err != nil {
return nil, err
}
var index QueryIndex
err = index.fromData(jsonIdx)
if err != nil {
return nil, err
}
indexes = append(indexes, index)
}
return indexes, nil
}
// BuildDeferredQueryIndexOptions is the set of options available to the query indexes BuildDeferredIndexes operation.
type BuildDeferredQueryIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// BuildDeferredIndexes builds all indexes which are currently in deferred state.
func (qm *QueryIndexManager) BuildDeferredIndexes(bucketName string, opts *BuildDeferredQueryIndexOptions) ([]string, error) {
if opts == nil {
opts = &BuildDeferredQueryIndexOptions{}
}
span := qm.tracer.StartSpan("BuildDeferredIndexes", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
indexList, err := qm.getAllIndexes(
span.Context(),
bucketName,
&GetAllQueryIndexesOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
if err != nil {
return nil, err
}
var deferredList []string
for i := 0; i < len(indexList); i++ {
var index = indexList[i]
if index.State == "deferred" || index.State == "pending" {
deferredList = append(deferredList, index.Name)
}
}
if len(deferredList) == 0 {
// Don't try to build an empty index list
return nil, nil
}
var qs string
qs += "BUILD INDEX ON `" + bucketName + "`("
for i := 0; i < len(deferredList); i++ {
if i > 0 {
qs += ", "
}
qs += "`" + deferredList[i] + "`"
}
qs += ")"
_, err = qm.doQuery(qs, &QueryOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return nil, err
}
return deferredList, nil
}
func checkIndexesActive(indexes []QueryIndex, checkList []string) (bool, error) {
var checkIndexes []QueryIndex
for i := 0; i < len(checkList); i++ {
indexName := checkList[i]
for j := 0; j < len(indexes); j++ {
if indexes[j].Name == indexName {
checkIndexes = append(checkIndexes, indexes[j])
break
}
}
}
if len(checkIndexes) != len(checkList) {
return false, ErrIndexNotFound
}
for i := 0; i < len(checkIndexes); i++ {
if checkIndexes[i].State != "online" {
return false, nil
}
}
return true, nil
}
// WatchQueryIndexOptions is the set of options available to the query indexes Watch operation.
type WatchQueryIndexOptions struct {
WatchPrimary bool
RetryStrategy RetryStrategy
}
// WatchIndexes waits for a set of indexes to come online.
func (qm *QueryIndexManager) WatchIndexes(bucketName string, watchList []string, timeout time.Duration, opts *WatchQueryIndexOptions) error {
if opts == nil {
opts = &WatchQueryIndexOptions{}
}
span := qm.tracer.StartSpan("WatchIndexes", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
if opts.WatchPrimary {
watchList = append(watchList, "#primary")
}
deadline := time.Now().Add(timeout)
curInterval := 50 * time.Millisecond
for {
if deadline.Before(time.Now()) {
return ErrUnambiguousTimeout
}
indexes, err := qm.getAllIndexes(
span.Context(),
bucketName,
&GetAllQueryIndexesOptions{
Timeout: time.Until(deadline),
RetryStrategy: opts.RetryStrategy,
})
if err != nil {
return err
}
allOnline, err := checkIndexesActive(indexes, watchList)
if err != nil {
return err
}
if allOnline {
break
}
curInterval += 500 * time.Millisecond
if curInterval > 1000 {
curInterval = 1000
}
// Make sure we don't sleep past our overall deadline, if we adjust the
// deadline then it will be caught at the top of this loop as a timeout.
sleepDeadline := time.Now().Add(curInterval)
if sleepDeadline.After(deadline) {
sleepDeadline = deadline
}
// wait till our next poll interval
time.Sleep(time.Until(sleepDeadline))
}
return nil
}

View File

@ -0,0 +1,670 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/pkg/errors"
)
type jsonSearchIndexResp struct {
Status string `json:"status"`
IndexDef *jsonSearchIndex `json:"indexDef"`
}
type jsonSearchIndexDefs struct {
IndexDefs map[string]jsonSearchIndex `json:"indexDefs"`
ImplVersion string `json:"implVersion"`
}
type jsonSearchIndexesResp struct {
Status string `json:"status"`
IndexDefs jsonSearchIndexDefs `json:"indexDefs"`
}
type jsonSearchIndex struct {
UUID string `json:"uuid"`
Name string `json:"name"`
SourceName string `json:"sourceName"`
Type string `json:"type"`
Params map[string]interface{} `json:"params"`
SourceUUID string `json:"sourceUUID"`
SourceParams map[string]interface{} `json:"sourceParams"`
SourceType string `json:"sourceType"`
PlanParams map[string]interface{} `json:"planParams"`
}
// SearchIndex is used to define a search index.
type SearchIndex struct {
// UUID is required for updates. It provides a means of ensuring consistency, the UUID must match the UUID value
// for the index on the server.
UUID string
// Name represents the name of this index.
Name string
// SourceName is the name of the source of the data for the index e.g. bucket name.
SourceName string
// Type is the type of index, e.g. fulltext-index or fulltext-alias.
Type string
// IndexParams are index properties such as store type and mappings.
Params map[string]interface{}
// SourceUUID is the UUID of the data source, this can be used to more tightly tie the index to a source.
SourceUUID string
// SourceParams are extra parameters to be defined. These are usually things like advanced connection and tuning
// parameters.
SourceParams map[string]interface{}
// SourceType is the type of the data source, e.g. couchbase or nil depending on the Type field.
SourceType string
// PlanParams are plan properties such as number of replicas and number of partitions.
PlanParams map[string]interface{}
}
func (si *SearchIndex) fromData(data jsonSearchIndex) error {
si.UUID = data.UUID
si.Name = data.Name
si.SourceName = data.SourceName
si.Type = data.Type
si.Params = data.Params
si.SourceUUID = data.SourceUUID
si.SourceParams = data.SourceParams
si.SourceType = data.SourceType
si.PlanParams = data.PlanParams
return nil
}
func (si *SearchIndex) toData() (jsonSearchIndex, error) {
var data jsonSearchIndex
data.UUID = si.UUID
data.Name = si.Name
data.SourceName = si.SourceName
data.Type = si.Type
data.Params = si.Params
data.SourceUUID = si.SourceUUID
data.SourceParams = si.SourceParams
data.SourceType = si.SourceType
data.PlanParams = si.PlanParams
return data, nil
}
// SearchIndexManager provides methods for performing Couchbase search index management.
type SearchIndexManager struct {
mgmtProvider mgmtProvider
tracer requestTracer
}
func (sm *SearchIndexManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read search index response body: %s", err)
return nil
}
var bodyErr error
if strings.Contains(strings.ToLower(string(b)), "index not found") {
bodyErr = ErrIndexNotFound
} else if strings.Contains(strings.ToLower(string(b)), "index with the same name already exists") {
bodyErr = ErrIndexExists
} else {
bodyErr = errors.New(string(b))
}
return makeGenericMgmtError(bodyErr, req, resp)
}
func (sm *SearchIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) {
resp, err := sm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// GetAllSearchIndexOptions is the set of options available to the search indexes GetAllIndexes operation.
type GetAllSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllIndexes retrieves all of the search indexes for the cluster.
func (sm *SearchIndexManager) GetAllIndexes(opts *GetAllSearchIndexOptions) ([]SearchIndex, error) {
if opts == nil {
opts = &GetAllSearchIndexOptions{}
}
span := sm.tracer.StartSpan("GetAllIndexes", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "GET",
Path: "/api/index",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return nil, idxErr
}
return nil, makeMgmtBadStatusError("failed to get index", &req, resp)
}
var indexesResp jsonSearchIndexesResp
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&indexesResp)
if err != nil {
return nil, err
}
indexDefs := indexesResp.IndexDefs.IndexDefs
var indexes []SearchIndex
for _, indexData := range indexDefs {
var index SearchIndex
err := index.fromData(indexData)
if err != nil {
return nil, err
}
indexes = append(indexes, index)
}
return indexes, nil
}
// GetSearchIndexOptions is the set of options available to the search indexes GetIndex operation.
type GetSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetIndex retrieves a specific search index by name.
func (sm *SearchIndexManager) GetIndex(indexName string, opts *GetSearchIndexOptions) (*SearchIndex, error) {
if opts == nil {
opts = &GetSearchIndexOptions{}
}
span := sm.tracer.StartSpan("GetIndex", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "GET",
Path: fmt.Sprintf("/api/index/%s", indexName),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return nil, idxErr
}
return nil, makeMgmtBadStatusError("failed to get index", &req, resp)
}
var indexResp jsonSearchIndexResp
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&indexResp)
if err != nil {
return nil, err
}
var indexDef SearchIndex
err = indexDef.fromData(*indexResp.IndexDef)
if err != nil {
return nil, err
}
return &indexDef, nil
}
// UpsertSearchIndexOptions is the set of options available to the search index manager UpsertIndex operation.
type UpsertSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpsertIndex creates or updates a search index.
func (sm *SearchIndexManager) UpsertIndex(indexDefinition SearchIndex, opts *UpsertSearchIndexOptions) error {
if opts == nil {
opts = &UpsertSearchIndexOptions{}
}
if indexDefinition.Name == "" {
return invalidArgumentsError{"index name cannot be empty"}
}
if indexDefinition.Type == "" {
return invalidArgumentsError{"index type cannot be empty"}
}
span := sm.tracer.StartSpan("UpsertIndex", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
indexData, err := indexDefinition.toData()
if err != nil {
return err
}
b, err := json.Marshal(indexData)
if err != nil {
return err
}
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "PUT",
Path: fmt.Sprintf("/api/index/%s", indexDefinition.Name),
Headers: map[string]string{
"cache-control": "no-cache",
},
Body: b,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return idxErr
}
return makeMgmtBadStatusError("failed to create index", &req, resp)
}
return nil
}
// DropSearchIndexOptions is the set of options available to the search index DropIndex operation.
type DropSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropIndex removes the search index with the specific name.
func (sm *SearchIndexManager) DropIndex(indexName string, opts *DropSearchIndexOptions) error {
if opts == nil {
opts = &DropSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("DropIndex", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "DELETE",
Path: fmt.Sprintf("/api/index/%s", indexName),
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
return makeMgmtBadStatusError("failed to drop the index", &req, resp)
}
return nil
}
// AnalyzeDocumentOptions is the set of options available to the search index AnalyzeDocument operation.
type AnalyzeDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// AnalyzeDocument returns how a doc is analyzed against a specific index.
func (sm *SearchIndexManager) AnalyzeDocument(indexName string, doc interface{}, opts *AnalyzeDocumentOptions) ([]interface{}, error) {
if opts == nil {
opts = &AnalyzeDocumentOptions{}
}
if indexName == "" {
return nil, invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("AnalyzeDocument", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
b, err := json.Marshal(doc)
if err != nil {
return nil, err
}
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "POST",
Path: fmt.Sprintf("/api/index/%s/analyzeDoc", indexName),
Body: b,
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return nil, idxErr
}
return nil, makeMgmtBadStatusError("failed to analyze document", &req, resp)
}
var analysis struct {
Status string `json:"status"`
Analyzed []interface{} `json:"analyzed"`
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&analysis)
if err != nil {
return nil, err
}
return analysis.Analyzed, nil
}
// GetIndexedDocumentsCountOptions is the set of options available to the search index GetIndexedDocumentsCount operation.
type GetIndexedDocumentsCountOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetIndexedDocumentsCount retrieves the document count for a search index.
func (sm *SearchIndexManager) GetIndexedDocumentsCount(indexName string, opts *GetIndexedDocumentsCountOptions) (uint64, error) {
if opts == nil {
opts = &GetIndexedDocumentsCountOptions{}
}
if indexName == "" {
return 0, invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("GetIndexedDocumentsCount", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "GET",
Path: fmt.Sprintf("/api/index/%s/count", indexName),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return 0, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return 0, idxErr
}
return 0, makeMgmtBadStatusError("failed to get the indexed documents count", &req, resp)
}
var count struct {
Count uint64 `json:"count"`
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&count)
if err != nil {
return 0, err
}
return count.Count, nil
}
func (sm *SearchIndexManager) performControlRequest(
tracectx requestSpanContext,
method, uri string,
timeout time.Duration,
retryStrategy RetryStrategy,
) error {
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: method,
Path: uri,
IsIdempotent: true,
Timeout: timeout,
RetryStrategy: retryStrategy,
parentSpan: tracectx,
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return idxErr
}
return makeMgmtBadStatusError("failed to perform the control request", &req, resp)
}
return nil
}
// PauseIngestSearchIndexOptions is the set of options available to the search index PauseIngest operation.
type PauseIngestSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// PauseIngest pauses updates and maintenance for an index.
func (sm *SearchIndexManager) PauseIngest(indexName string, opts *PauseIngestSearchIndexOptions) error {
if opts == nil {
opts = &PauseIngestSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("PauseIngest", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/ingestControl/pause", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// ResumeIngestSearchIndexOptions is the set of options available to the search index ResumeIngest operation.
type ResumeIngestSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// ResumeIngest resumes updates and maintenance for an index.
func (sm *SearchIndexManager) ResumeIngest(indexName string, opts *ResumeIngestSearchIndexOptions) error {
if opts == nil {
opts = &ResumeIngestSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("ResumeIngest", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/ingestControl/resume", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// AllowQueryingSearchIndexOptions is the set of options available to the search index AllowQuerying operation.
type AllowQueryingSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// AllowQuerying allows querying against an index.
func (sm *SearchIndexManager) AllowQuerying(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("AllowQuerying", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/queryControl/allow", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// DisallowQueryingSearchIndexOptions is the set of options available to the search index DisallowQuerying operation.
type DisallowQueryingSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DisallowQuerying disallows querying against an index.
func (sm *SearchIndexManager) DisallowQuerying(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("DisallowQuerying", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/queryControl/disallow", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// FreezePlanSearchIndexOptions is the set of options available to the search index FreezePlan operation.
type FreezePlanSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// FreezePlan freezes the assignment of index partitions to nodes.
func (sm *SearchIndexManager) FreezePlan(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("FreezePlan", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/planFreezeControl/freeze", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// UnfreezePlanSearchIndexOptions is the set of options available to the search index UnfreezePlan operation.
type UnfreezePlanSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UnfreezePlan unfreezes the assignment of index partitions to nodes.
func (sm *SearchIndexManager) UnfreezePlan(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("UnfreezePlan", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/planFreezeControl/unfreeze", indexName),
opts.Timeout,
opts.RetryStrategy)
}

View File

@ -0,0 +1,342 @@
package gocb
import (
"encoding/json"
"time"
cbsearch "github.com/couchbase/gocb/v2/search"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type jsonRowLocation struct {
Field string `json:"field"`
Term string `json:"term"`
Position uint32 `json:"position"`
Start uint32 `json:"start"`
End uint32 `json:"end"`
ArrayPositions []uint32 `json:"array_positions"`
}
type jsonSearchFacet struct {
Name string `json:"name"`
Field string `json:"field"`
Total uint64 `json:"total"`
Missing uint64 `json:"missing"`
Other uint64 `json:"other"`
}
type jsonSearchRowLocations map[string]map[string][]jsonRowLocation
type jsonSearchRow struct {
Index string `json:"index"`
ID string `json:"id"`
Score float64 `json:"score"`
Explanation interface{} `json:"explanation"`
Locations jsonSearchRowLocations `json:"locations"`
Fragments map[string][]string `json:"fragments"`
Fields json.RawMessage `json:"fields"`
}
type jsonSearchResponse struct {
Errors map[string]string `json:"errors"`
TotalHits uint64 `json:"total_hits"`
MaxScore float64 `json:"max_score"`
Took uint64 `json:"took"`
Facets map[string]jsonSearchFacet `json:"facets"`
}
// SearchMetrics encapsulates various metrics gathered during a search queries execution.
type SearchMetrics struct {
Took time.Duration
TotalRows uint64
MaxScore float64
TotalPartitionCount uint64
SuccessPartitionCount uint64
ErrorPartitionCount uint64
}
func (metrics *SearchMetrics) fromData(data jsonSearchResponse) error {
metrics.TotalRows = data.TotalHits
metrics.MaxScore = data.MaxScore
metrics.Took = time.Duration(data.Took) * time.Microsecond
return nil
}
// SearchMetaData provides access to the meta-data properties of a search query result.
type SearchMetaData struct {
Metrics SearchMetrics
Errors map[string]string
}
func (meta *SearchMetaData) fromData(data jsonSearchResponse) error {
metrics := SearchMetrics{}
if err := metrics.fromData(data); err != nil {
return err
}
meta.Metrics = metrics
meta.Errors = data.Errors
return nil
}
// SearchFacetResult provides access to the result of a faceted query.
type SearchFacetResult struct {
Name string
Field string
Total uint64
Missing uint64
Other uint64
}
func (fr *SearchFacetResult) fromData(data jsonSearchFacet) error {
fr.Name = data.Name
fr.Field = data.Field
fr.Total = data.Total
fr.Missing = data.Missing
fr.Other = data.Other
return nil
}
// SearchRowLocation represents the location of a row match
type SearchRowLocation struct {
Position uint32
Start uint32
End uint32
ArrayPositions []uint32
}
func (rl *SearchRowLocation) fromData(data jsonRowLocation) error {
rl.Position = data.Position
rl.Start = data.Start
rl.End = data.End
rl.ArrayPositions = data.ArrayPositions
return nil
}
// SearchRow represents a single hit returned from a search query.
type SearchRow struct {
Index string
ID string
Score float64
Explanation interface{}
Locations map[string]map[string][]SearchRowLocation
Fragments map[string][]string
fieldsBytes []byte
}
// Fields decodes the fields included in a search hit.
func (sr *SearchRow) Fields(valuePtr interface{}) error {
return json.Unmarshal(sr.fieldsBytes, valuePtr)
}
type searchRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
}
// SearchResult allows access to the results of a search query.
type SearchResult struct {
reader searchRowReader
currentRow SearchRow
}
func newSearchResult(reader searchRowReader) *SearchResult {
return &SearchResult{
reader: reader,
}
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *SearchResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.currentRow = SearchRow{}
var rowData jsonSearchRow
if err := json.Unmarshal(rowBytes, &rowData); err == nil {
r.currentRow.Index = rowData.Index
r.currentRow.ID = rowData.ID
r.currentRow.Score = rowData.Score
r.currentRow.Explanation = rowData.Explanation
r.currentRow.Fragments = rowData.Fragments
r.currentRow.fieldsBytes = rowData.Fields
locations := make(map[string]map[string][]SearchRowLocation)
for fieldName, fieldData := range rowData.Locations {
terms := make(map[string][]SearchRowLocation)
for termName, termData := range fieldData {
locations := make([]SearchRowLocation, len(termData))
for locIdx, locData := range termData {
err := locations[locIdx].fromData(locData)
if err != nil {
logWarnf("failed to parse search query location data: %s", err)
}
}
terms[termName] = locations
}
locations[fieldName] = terms
}
r.currentRow.Locations = locations
}
return true
}
// Row returns the contents of the current row.
func (r *SearchResult) Row() SearchRow {
return r.currentRow
}
// Err returns any errors that have occurred on the stream
func (r *SearchResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *SearchResult) Close() error {
return r.reader.Close()
}
func (r *SearchResult) getJSONResp() (jsonSearchResponse, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return jsonSearchResponse{}, err
}
var jsonResp jsonSearchResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return jsonSearchResponse{}, err
}
return jsonResp, nil
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *SearchResult) MetaData() (*SearchMetaData, error) {
jsonResp, err := r.getJSONResp()
if err != nil {
return nil, err
}
var metaData SearchMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
// Facets returns any facets that were returned with this query. Note that the
// facets will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *SearchResult) Facets() (map[string]SearchFacetResult, error) {
jsonResp, err := r.getJSONResp()
if err != nil {
return nil, err
}
facets := make(map[string]SearchFacetResult)
for facetName, facetData := range jsonResp.Facets {
var facet SearchFacetResult
err := facet.fromData(facetData)
if err != nil {
return nil, err
}
facets[facetName] = facet
}
return facets, nil
}
// SearchQuery executes the analytics query statement on the server.
func (c *Cluster) SearchQuery(indexName string, query cbsearch.Query, opts *SearchOptions) (*SearchResult, error) {
if opts == nil {
opts = &SearchOptions{}
}
span := c.tracer.StartSpan("SearchQuery", opts.parentSpan).
SetTag("couchbase.service", "search")
defer span.Finish()
timeout := opts.Timeout
if timeout == 0 {
timeout = c.timeoutsConfig.SearchTimeout
}
deadline := time.Now().Add(timeout)
retryStrategy := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy)
}
searchOpts, err := opts.toMap()
if err != nil {
return nil, SearchError{
InnerError: wrapError(err, "failed to generate query options"),
Query: query,
}
}
searchOpts["query"] = query
return c.execSearchQuery(span, indexName, searchOpts, deadline, retryStrategy)
}
func maybeGetSearchOptionQuery(options map[string]interface{}) interface{} {
if value, ok := options["query"]; ok {
return value
}
return ""
}
func (c *Cluster) execSearchQuery(
span requestSpan,
indexName string,
options map[string]interface{},
deadline time.Time,
retryStrategy *retryStrategyWrapper,
) (*SearchResult, error) {
provider, err := c.getSearchProvider()
if err != nil {
return nil, SearchError{
InnerError: wrapError(err, "failed to get query provider"),
Query: maybeGetSearchOptionQuery(options),
}
}
reqBytes, err := json.Marshal(options)
if err != nil {
return nil, SearchError{
InnerError: wrapError(err, "failed to marshall query body"),
Query: maybeGetSearchOptionQuery(options),
}
}
res, err := provider.SearchQuery(gocbcore.SearchQueryOptions{
IndexName: indexName,
Payload: reqBytes,
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
if err != nil {
return nil, maybeEnhanceSearchError(err)
}
return newSearchResult(res), nil
}

792
vendor/github.com/couchbase/gocb/v2/cluster_usermgr.go generated vendored Normal file
View File

@ -0,0 +1,792 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
)
// AuthDomain specifies the user domain of a specific user
type AuthDomain string
const (
// LocalDomain specifies users that are locally stored in Couchbase.
LocalDomain AuthDomain = "local"
// ExternalDomain specifies users that are externally stored
// (in LDAP for instance).
ExternalDomain AuthDomain = "external"
)
type jsonOrigin struct {
Type string `json:"type"`
Name string `json:"name"`
}
type jsonRole struct {
RoleName string `json:"role"`
BucketName string `json:"bucket_name"`
}
type jsonRoleDescription struct {
jsonRole
Name string `json:"name"`
Description string `json:"desc"`
}
type jsonRoleOrigins struct {
jsonRole
Origins []jsonOrigin
}
type jsonUserMetadata struct {
ID string `json:"id"`
Name string `json:"name"`
Roles []jsonRoleOrigins `json:"roles"`
Groups []string `json:"groups"`
Domain AuthDomain `json:"domain"`
ExternalGroups []string `json:"external_groups"`
PasswordChanged time.Time `json:"password_change_date"`
}
type jsonGroup struct {
Name string `json:"id"`
Description string `json:"description"`
Roles []jsonRole `json:"roles"`
LDAPGroupReference string `json:"ldap_group_ref"`
}
// Role represents a specific permission.
type Role struct {
Name string `json:"role"`
Bucket string `json:"bucket_name"`
}
func (ro *Role) fromData(data jsonRole) error {
ro.Name = data.RoleName
ro.Bucket = data.BucketName
return nil
}
// RoleAndDescription represents a role with its display name and description.
type RoleAndDescription struct {
Role
DisplayName string
Description string
}
func (rd *RoleAndDescription) fromData(data jsonRoleDescription) error {
err := rd.Role.fromData(data.jsonRole)
if err != nil {
return err
}
rd.DisplayName = data.Name
rd.Description = data.Description
return nil
}
// Origin indicates why a user has a specific role. Is the Origin Type is "user" then the role is assigned
// directly to the user. If the type is "group" then it means that the role has been inherited from the group
// identified by the Name field.
type Origin struct {
Type string
Name string
}
func (o *Origin) fromData(data jsonOrigin) error {
o.Type = data.Type
o.Name = data.Name
return nil
}
// RoleAndOrigins associates a role with its origins.
type RoleAndOrigins struct {
Role
Origins []Origin
}
func (ro *RoleAndOrigins) fromData(data jsonRoleOrigins) error {
err := ro.Role.fromData(data.jsonRole)
if err != nil {
return err
}
origins := make([]Origin, len(data.Origins))
for _, originData := range data.Origins {
var origin Origin
err := origin.fromData(originData)
if err != nil {
return err
}
origins = append(origins, origin)
}
ro.Origins = origins
return nil
}
// User represents a user which was retrieved from the server.
type User struct {
Username string
DisplayName string
// Roles are the roles assigned to the user that are of type "user".
Roles []Role
Groups []string
Password string
}
// UserAndMetadata represents a user and user meta-data from the server.
type UserAndMetadata struct {
User
Domain AuthDomain
// EffectiveRoles are all of the user's roles and the origins.
EffectiveRoles []RoleAndOrigins
ExternalGroups []string
PasswordChanged time.Time
}
func (um *UserAndMetadata) fromData(data jsonUserMetadata) error {
um.User.Username = data.ID
um.User.DisplayName = data.Name
um.User.Groups = data.Groups
um.ExternalGroups = data.ExternalGroups
um.Domain = data.Domain
um.PasswordChanged = data.PasswordChanged
var roles []Role
var effectiveRoles []RoleAndOrigins
for _, roleData := range data.Roles {
var effectiveRole RoleAndOrigins
err := effectiveRole.fromData(roleData)
if err != nil {
return err
}
effectiveRoles = append(effectiveRoles, effectiveRole)
role := effectiveRole.Role
if roleData.Origins == nil {
roles = append(roles, role)
} else {
for _, origin := range effectiveRole.Origins {
if origin.Type == "user" {
roles = append(roles, role)
break
}
}
}
}
um.EffectiveRoles = effectiveRoles
um.User.Roles = roles
return nil
}
// Group represents a user group on the server.
type Group struct {
Name string
Description string
Roles []Role
LDAPGroupReference string
}
func (g *Group) fromData(data jsonGroup) error {
g.Name = data.Name
g.Description = data.Description
g.LDAPGroupReference = data.LDAPGroupReference
roles := make([]Role, len(data.Roles))
for roleIdx, roleData := range data.Roles {
err := roles[roleIdx].fromData(roleData)
if err != nil {
return err
}
}
g.Roles = roles
return nil
}
// UserManager provides methods for performing Couchbase user management.
type UserManager struct {
provider mgmtProvider
tracer requestTracer
}
func (um *UserManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read search index response body: %s", err)
return nil
}
var bodyErr error
if resp.StatusCode == 404 {
if strings.Contains(strings.ToLower(string(b)), "unknown user") {
bodyErr = ErrUserNotFound
} else if strings.Contains(strings.ToLower(string(b)), "user was not found") {
bodyErr = ErrUserNotFound
} else if strings.Contains(strings.ToLower(string(b)), "group was not found") {
bodyErr = ErrGroupNotFound
} else if strings.Contains(strings.ToLower(string(b)), "unknown group") {
bodyErr = ErrGroupNotFound
} else {
bodyErr = errors.New(string(b))
}
} else {
bodyErr = errors.New(string(b))
}
return makeGenericMgmtError(bodyErr, req, resp)
}
// GetAllUsersOptions is the set of options available to the user manager GetAll operation.
type GetAllUsersOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// GetAllUsers returns a list of all the users from the cluster.
func (um *UserManager) GetAllUsers(opts *GetAllUsersOptions) ([]UserAndMetadata, error) {
if opts == nil {
opts = &GetAllUsersOptions{}
}
span := um.tracer.StartSpan("GetAllUsers", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: fmt.Sprintf("/settings/rbac/users/%s", opts.DomainName),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get users", &req, resp)
}
var usersData []jsonUserMetadata
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&usersData)
if err != nil {
return nil, err
}
users := make([]UserAndMetadata, len(usersData))
for userIdx, userData := range usersData {
err := users[userIdx].fromData(userData)
if err != nil {
return nil, err
}
}
return users, nil
}
// GetUserOptions is the set of options available to the user manager Get operation.
type GetUserOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// GetUser returns the data for a particular user
func (um *UserManager) GetUser(name string, opts *GetUserOptions) (*UserAndMetadata, error) {
if opts == nil {
opts = &GetUserOptions{}
}
span := um.tracer.StartSpan("GetUser", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, name),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get user", &req, resp)
}
var userData jsonUserMetadata
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&userData)
if err != nil {
return nil, err
}
var user UserAndMetadata
err = user.fromData(userData)
if err != nil {
return nil, err
}
return &user, nil
}
// UpsertUserOptions is the set of options available to the user manager Upsert operation.
type UpsertUserOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// UpsertUser updates a built-in RBAC user on the cluster.
func (um *UserManager) UpsertUser(user User, opts *UpsertUserOptions) error {
if opts == nil {
opts = &UpsertUserOptions{}
}
span := um.tracer.StartSpan("UpsertUser", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
var reqRoleStrs []string
for _, roleData := range user.Roles {
if roleData.Bucket == "" {
reqRoleStrs = append(reqRoleStrs, roleData.Name)
} else {
reqRoleStrs = append(reqRoleStrs, fmt.Sprintf("%s[%s]", roleData.Name, roleData.Bucket))
}
}
reqForm := make(url.Values)
reqForm.Add("name", user.DisplayName)
if user.Password != "" {
reqForm.Add("password", user.Password)
}
if len(user.Groups) > 0 {
reqForm.Add("groups", strings.Join(user.Groups, ","))
}
reqForm.Add("roles", strings.Join(reqRoleStrs, ","))
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "PUT",
Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, user.Username),
Body: []byte(reqForm.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to upsert user", &req, resp)
}
return nil
}
// DropUserOptions is the set of options available to the user manager Drop operation.
type DropUserOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// DropUser removes a built-in RBAC user on the cluster.
func (um *UserManager) DropUser(name string, opts *DropUserOptions) error {
if opts == nil {
opts = &DropUserOptions{}
}
span := um.tracer.StartSpan("DropUser", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "DELETE",
Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, name),
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to drop user", &req, resp)
}
return nil
}
// GetRolesOptions is the set of options available to the user manager GetRoles operation.
type GetRolesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetRoles lists the roles supported by the cluster.
func (um *UserManager) GetRoles(opts *GetRolesOptions) ([]RoleAndDescription, error) {
if opts == nil {
opts = &GetRolesOptions{}
}
span := um.tracer.StartSpan("GetRoles", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: "/settings/rbac/roles",
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get roles", &req, resp)
}
var roleDatas []jsonRoleDescription
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&roleDatas)
if err != nil {
return nil, err
}
roles := make([]RoleAndDescription, len(roleDatas))
for roleIdx, roleData := range roleDatas {
err := roles[roleIdx].fromData(roleData)
if err != nil {
return nil, err
}
}
return roles, nil
}
// GetGroupOptions is the set of options available to the group manager Get operation.
type GetGroupOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetGroup fetches a single group from the server.
func (um *UserManager) GetGroup(groupName string, opts *GetGroupOptions) (*Group, error) {
if groupName == "" {
return nil, makeInvalidArgumentsError("groupName cannot be empty")
}
if opts == nil {
opts = &GetGroupOptions{}
}
span := um.tracer.StartSpan("GetGroup", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: fmt.Sprintf("/settings/rbac/groups/%s", groupName),
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get group", &req, resp)
}
var groupData jsonGroup
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&groupData)
if err != nil {
return nil, err
}
var group Group
err = group.fromData(groupData)
if err != nil {
return nil, err
}
return &group, nil
}
// GetAllGroupsOptions is the set of options available to the group manager GetAll operation.
type GetAllGroupsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllGroups fetches all groups from the server.
func (um *UserManager) GetAllGroups(opts *GetAllGroupsOptions) ([]Group, error) {
if opts == nil {
opts = &GetAllGroupsOptions{}
}
span := um.tracer.StartSpan("GetAllGroups", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: "/settings/rbac/groups",
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get all groups", &req, resp)
}
var groupDatas []jsonGroup
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&groupDatas)
if err != nil {
return nil, err
}
groups := make([]Group, len(groupDatas))
for groupIdx, groupData := range groupDatas {
err = groups[groupIdx].fromData(groupData)
if err != nil {
return nil, err
}
}
return groups, nil
}
// UpsertGroupOptions is the set of options available to the group manager Upsert operation.
type UpsertGroupOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpsertGroup creates, or updates, a group on the server.
func (um *UserManager) UpsertGroup(group Group, opts *UpsertGroupOptions) error {
if group.Name == "" {
return makeInvalidArgumentsError("group name cannot be empty")
}
if opts == nil {
opts = &UpsertGroupOptions{}
}
span := um.tracer.StartSpan("UpsertGroup", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
var reqRoleStrs []string
for _, roleData := range group.Roles {
if roleData.Bucket == "" {
reqRoleStrs = append(reqRoleStrs, roleData.Name)
} else {
reqRoleStrs = append(reqRoleStrs, fmt.Sprintf("%s[%s]", roleData.Name, roleData.Bucket))
}
}
reqForm := make(url.Values)
reqForm.Add("description", group.Description)
reqForm.Add("ldap_group_ref", group.LDAPGroupReference)
reqForm.Add("roles", strings.Join(reqRoleStrs, ","))
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "PUT",
Path: fmt.Sprintf("/settings/rbac/groups/%s", group.Name),
Body: []byte(reqForm.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to upsert group", &req, resp)
}
return nil
}
// DropGroupOptions is the set of options available to the group manager Drop operation.
type DropGroupOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropGroup removes a group from the server.
func (um *UserManager) DropGroup(groupName string, opts *DropGroupOptions) error {
if groupName == "" {
return makeInvalidArgumentsError("groupName cannot be empty")
}
if opts == nil {
opts = &DropGroupOptions{}
}
span := um.tracer.StartSpan("DropGroup", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "DELETE",
Path: fmt.Sprintf("/settings/rbac/groups/%s", groupName),
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to drop group", &req, resp)
}
return nil
}

75
vendor/github.com/couchbase/gocb/v2/collection.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
package gocb
import "time"
type kvTimeoutsConfig struct {
KVTimeout time.Duration
KVDurableTimeout time.Duration
}
// Collection represents a single collection.
type Collection struct {
collectionName string
scope string
bucket *Bucket
timeoutsConfig kvTimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
tracer requestTracer
useMutationTokens bool
getKvProvider func() (kvProvider, error)
}
func newCollection(scope *Scope, collectionName string) *Collection {
return &Collection{
collectionName: collectionName,
scope: scope.Name(),
bucket: scope.bucket,
timeoutsConfig: scope.timeoutsConfig,
transcoder: scope.transcoder,
retryStrategyWrapper: scope.retryStrategyWrapper,
tracer: scope.tracer,
useMutationTokens: scope.useMutationTokens,
getKvProvider: scope.getKvProvider,
}
}
func (c *Collection) name() string {
return c.collectionName
}
// ScopeName returns the name of the scope to which this collection belongs.
// UNCOMMITTED: This API may change in the future.
func (c *Collection) ScopeName() string {
return c.scope
}
// Bucket returns the name of the bucket to which this collection belongs.
// UNCOMMITTED: This API may change in the future.
func (c *Collection) Bucket() *Bucket {
return c.bucket
}
// Name returns the name of the collection.
func (c *Collection) Name() string {
return c.collectionName
}
func (c *Collection) startKvOpTrace(operationName string, tracectx requestSpanContext) requestSpan {
return c.tracer.StartSpan(operationName, tracectx).
SetTag("couchbase.bucket", c.bucket).
SetTag("couchbase.collection", c.collectionName).
SetTag("couchbase.service", "kv")
}
func (c *Collection) bucketName() string {
return c.bucket.Name()
}

View File

@ -0,0 +1,312 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// BinaryCollection is a set of binary operations.
type BinaryCollection struct {
collection *Collection
}
// AppendOptions are the options available to the Append operation.
type AppendOptions struct {
Timeout time.Duration
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryAppend(id string, val []byte, opts *AppendOptions) (mutOut *MutationResult, errOut error) {
if opts == nil {
opts = &AppendOptions{}
}
opm := c.newKvOpManager("Append", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Append(gocbcore.AdjoinOptions{
Key: opm.DocumentID(),
Value: val,
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.AdjoinResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
mutOut = &MutationResult{}
mutOut.cas = Cas(res.Cas)
mutOut.mt = opm.EnhanceMt(res.MutationToken)
opm.Resolve(mutOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Append appends a byte value to a document.
func (c *BinaryCollection) Append(id string, val []byte, opts *AppendOptions) (mutOut *MutationResult, errOut error) {
return c.collection.binaryAppend(id, val, opts)
}
// PrependOptions are the options available to the Prepend operation.
type PrependOptions struct {
Timeout time.Duration
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryPrepend(id string, val []byte, opts *PrependOptions) (mutOut *MutationResult, errOut error) {
if opts == nil {
opts = &PrependOptions{}
}
opm := c.newKvOpManager("Prepend", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Prepend(gocbcore.AdjoinOptions{
Key: opm.DocumentID(),
Value: val,
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.AdjoinResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
mutOut = &MutationResult{}
mutOut.cas = Cas(res.Cas)
mutOut.mt = opm.EnhanceMt(res.MutationToken)
opm.Resolve(mutOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Prepend prepends a byte value to a document.
func (c *BinaryCollection) Prepend(id string, val []byte, opts *PrependOptions) (mutOut *MutationResult, errOut error) {
return c.collection.binaryPrepend(id, val, opts)
}
// IncrementOptions are the options available to the Increment operation.
type IncrementOptions struct {
Timeout time.Duration
// Expiry is the length of time that the document will be stored in Couchbase.
// A value of 0 will set the document to never expire.
Expiry time.Duration
// Initial, if non-negative, is the `initial` value to use for the document if it does not exist.
// If present, this is the value that will be returned by a successful operation.
Initial int64
// Delta is the value to use for incrementing/decrementing if Initial is not present.
Delta uint64
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryIncrement(id string, opts *IncrementOptions) (countOut *CounterResult, errOut error) {
if opts == nil {
opts = &IncrementOptions{}
}
opm := c.newKvOpManager("Increment", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if opts.Initial >= 0 {
realInitial = uint64(opts.Initial)
}
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Increment(gocbcore.CounterOptions{
Key: opm.DocumentID(),
Delta: opts.Delta,
Initial: realInitial,
Expiry: durationToExpiry(opts.Expiry),
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.CounterResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
countOut = &CounterResult{}
countOut.cas = Cas(res.Cas)
countOut.mt = opm.EnhanceMt(res.MutationToken)
countOut.content = res.Value
opm.Resolve(countOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Increment performs an atomic addition for an integer document. Passing a
// non-negative `initial` value will cause the document to be created if it did not
// already exist.
func (c *BinaryCollection) Increment(id string, opts *IncrementOptions) (countOut *CounterResult, errOut error) {
return c.collection.binaryIncrement(id, opts)
}
// DecrementOptions are the options available to the Decrement operation.
type DecrementOptions struct {
Timeout time.Duration
// Expiry is the length of time that the document will be stored in Couchbase.
// A value of 0 will set the document to never expire.
Expiry time.Duration
// Initial, if non-negative, is the `initial` value to use for the document if it does not exist.
// If present, this is the value that will be returned by a successful operation.
Initial int64
// Delta is the value to use for incrementing/decrementing if Initial is not present.
Delta uint64
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryDecrement(id string, opts *DecrementOptions) (countOut *CounterResult, errOut error) {
if opts == nil {
opts = &DecrementOptions{}
}
opm := c.newKvOpManager("Decrement", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if opts.Initial >= 0 {
realInitial = uint64(opts.Initial)
}
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Decrement(gocbcore.CounterOptions{
Key: opm.DocumentID(),
Delta: opts.Delta,
Initial: realInitial,
Expiry: durationToExpiry(opts.Expiry),
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.CounterResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
countOut = &CounterResult{}
countOut.cas = Cas(res.Cas)
countOut.mt = opm.EnhanceMt(res.MutationToken)
countOut.content = res.Value
opm.Resolve(countOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Decrement performs an atomic subtraction for an integer document. Passing a
// non-negative `initial` value will cause the document to be created if it did not
// already exist.
func (c *BinaryCollection) Decrement(id string, opts *DecrementOptions) (countOut *CounterResult, errOut error) {
return c.collection.binaryDecrement(id, opts)
}

745
vendor/github.com/couchbase/gocb/v2/collection_bulk.go generated vendored Normal file
View File

@ -0,0 +1,745 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
)
type bulkOp struct {
pendop gocbcore.PendingOp
span requestSpan
}
func (op *bulkOp) cancel() {
op.pendop.Cancel()
}
func (op *bulkOp) finish() {
op.span.Finish()
}
// BulkOp represents a single operation that can be submitted (within a list of more operations) to .Do()
// You can create a bulk operation by instantiating one of the implementations of BulkOp,
// such as GetOp, UpsertOp, ReplaceOp, and more.
// UNCOMMITTED: This API may change in the future.
type BulkOp interface {
execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan)
markError(err error)
cancel()
finish()
}
// BulkOpOptions are the set of options available when performing BulkOps using Do.
type BulkOpOptions struct {
Timeout time.Duration
Transcoder Transcoder
RetryStrategy RetryStrategy
}
// Do execute one or more `BulkOp` items in parallel.
// UNCOMMITTED: This API may change in the future.
func (c *Collection) Do(ops []BulkOp, opts *BulkOpOptions) error {
if opts == nil {
opts = &BulkOpOptions{}
}
span := c.startKvOpTrace("Do", nil)
timeout := opts.Timeout
if opts.Timeout == 0 {
timeout = c.timeoutsConfig.KVTimeout * time.Duration(len(ops))
}
retryWrapper := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryWrapper = newRetryStrategyWrapper(opts.RetryStrategy)
}
if opts.Transcoder == nil {
opts.Transcoder = c.transcoder
}
agent, err := c.getKvProvider()
if err != nil {
return err
}
// Make the channel big enough to hold all our ops in case
// we get delayed inside execute (don't want to block the
// individual op handlers when they dispatch their signal).
signal := make(chan BulkOp, len(ops))
for _, item := range ops {
item.execute(span.Context(), c, agent, opts.Transcoder, signal, retryWrapper, time.Now().Add(timeout), c.startKvOpTrace)
}
for range ops {
item := <-signal
// We're really just clearing the pendop from this thread,
// since it already completed, no cancel actually occurs
item.finish()
}
return nil
}
// GetOp represents a type of `BulkOp` used for Get operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type GetOp struct {
bulkOp
ID string
Result *GetResult
Err error
}
func (item *GetOp) markError(err error) {
item.Err = err
}
func (item *GetOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("GetOp", tracectx)
item.bulkOp.span = span
op, err := provider.Get(gocbcore.GetOptions{
Key: []byte(item.ID),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.GetResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &GetResult{
Result: Result{
cas: Cas(res.Cas),
},
transcoder: transcoder,
contents: res.Value,
flags: res.Flags,
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// GetAndTouchOp represents a type of `BulkOp` used for GetAndTouch operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type GetAndTouchOp struct {
bulkOp
ID string
Expiry time.Duration
Result *GetResult
Err error
}
func (item *GetAndTouchOp) markError(err error) {
item.Err = err
}
func (item *GetAndTouchOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("GetAndTouchOp", tracectx)
item.bulkOp.span = span
op, err := provider.GetAndTouch(gocbcore.GetAndTouchOptions{
Key: []byte(item.ID),
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.GetAndTouchResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &GetResult{
Result: Result{
cas: Cas(res.Cas),
},
transcoder: transcoder,
contents: res.Value,
flags: res.Flags,
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// TouchOp represents a type of `BulkOp` used for Touch operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type TouchOp struct {
bulkOp
ID string
Expiry time.Duration
Result *MutationResult
Err error
}
func (item *TouchOp) markError(err error) {
item.Err = err
}
func (item *TouchOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("TouchOp", tracectx)
item.bulkOp.span = span
op, err := provider.Touch(gocbcore.TouchOptions{
Key: []byte(item.ID),
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.TouchResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// RemoveOp represents a type of `BulkOp` used for Remove operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type RemoveOp struct {
bulkOp
ID string
Cas Cas
Result *MutationResult
Err error
}
func (item *RemoveOp) markError(err error) {
item.Err = err
}
func (item *RemoveOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("RemoveOp", tracectx)
item.bulkOp.span = span
op, err := provider.Delete(gocbcore.DeleteOptions{
Key: []byte(item.ID),
Cas: gocbcore.Cas(item.Cas),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.DeleteResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// UpsertOp represents a type of `BulkOp` used for Upsert operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type UpsertOp struct {
bulkOp
ID string
Value interface{}
Expiry time.Duration
Cas Cas
Result *MutationResult
Err error
}
func (item *UpsertOp) markError(err error) {
item.Err = err
}
func (item *UpsertOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder,
signal chan BulkOp, retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("UpsertOp", tracectx)
item.bulkOp.span = span
etrace := c.startKvOpTrace("encode", span.Context())
bytes, flags, err := transcoder.Encode(item.Value)
etrace.Finish()
if err != nil {
item.Err = err
signal <- item
return
}
op, err := provider.Set(gocbcore.SetOptions{
Key: []byte(item.ID),
Value: bytes,
Flags: flags,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.StoreResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// InsertOp represents a type of `BulkOp` used for Insert operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type InsertOp struct {
bulkOp
ID string
Value interface{}
Expiry time.Duration
Result *MutationResult
Err error
}
func (item *InsertOp) markError(err error) {
item.Err = err
}
func (item *InsertOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("InsertOp", tracectx)
item.bulkOp.span = span
etrace := c.startKvOpTrace("encode", span.Context())
bytes, flags, err := transcoder.Encode(item.Value)
if err != nil {
etrace.Finish()
item.Err = err
signal <- item
return
}
etrace.Finish()
op, err := provider.Add(gocbcore.AddOptions{
Key: []byte(item.ID),
Value: bytes,
Flags: flags,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.StoreResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// ReplaceOp represents a type of `BulkOp` used for Replace operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type ReplaceOp struct {
bulkOp
ID string
Value interface{}
Expiry time.Duration
Cas Cas
Result *MutationResult
Err error
}
func (item *ReplaceOp) markError(err error) {
item.Err = err
}
func (item *ReplaceOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("ReplaceOp", tracectx)
item.bulkOp.span = span
etrace := c.startKvOpTrace("encode", span.Context())
bytes, flags, err := transcoder.Encode(item.Value)
if err != nil {
etrace.Finish()
item.Err = err
signal <- item
return
}
etrace.Finish()
op, err := provider.Replace(gocbcore.ReplaceOptions{
Key: []byte(item.ID),
Value: bytes,
Flags: flags,
Cas: gocbcore.Cas(item.Cas),
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.StoreResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// AppendOp represents a type of `BulkOp` used for Append operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type AppendOp struct {
bulkOp
ID string
Value string
Result *MutationResult
Err error
}
func (item *AppendOp) markError(err error) {
item.Err = err
}
func (item *AppendOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("AppendOp", tracectx)
item.bulkOp.span = span
op, err := provider.Append(gocbcore.AdjoinOptions{
Key: []byte(item.ID),
Value: []byte(item.Value),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.AdjoinResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// PrependOp represents a type of `BulkOp` used for Prepend operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type PrependOp struct {
bulkOp
ID string
Value string
Result *MutationResult
Err error
}
func (item *PrependOp) markError(err error) {
item.Err = err
}
func (item *PrependOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("PrependOp", tracectx)
item.bulkOp.span = span
op, err := provider.Prepend(gocbcore.AdjoinOptions{
Key: []byte(item.ID),
Value: []byte(item.Value),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.AdjoinResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// IncrementOp represents a type of `BulkOp` used for Increment operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type IncrementOp struct {
bulkOp
ID string
Delta int64
Initial int64
Expiry time.Duration
Result *CounterResult
Err error
}
func (item *IncrementOp) markError(err error) {
item.Err = err
}
func (item *IncrementOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("IncrementOp", tracectx)
item.bulkOp.span = span
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if item.Initial > 0 {
realInitial = uint64(item.Initial)
}
op, err := provider.Increment(gocbcore.CounterOptions{
Key: []byte(item.ID),
Delta: uint64(item.Delta),
Initial: realInitial,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.CounterResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &CounterResult{
MutationResult: MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
},
content: res.Value,
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// DecrementOp represents a type of `BulkOp` used for Decrement operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type DecrementOp struct {
bulkOp
ID string
Delta int64
Initial int64
Expiry time.Duration
Result *CounterResult
Err error
}
func (item *DecrementOp) markError(err error) {
item.Err = err
}
func (item *DecrementOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("DecrementOp", tracectx)
item.bulkOp.span = span
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if item.Initial > 0 {
realInitial = uint64(item.Initial)
}
op, err := provider.Decrement(gocbcore.CounterOptions{
Key: []byte(item.ID),
Delta: uint64(item.Delta),
Initial: realInitial,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.CounterResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &CounterResult{
MutationResult: MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
},
content: res.Value,
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}

1040
vendor/github.com/couchbase/gocb/v2/collection_crud.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

476
vendor/github.com/couchbase/gocb/v2/collection_ds.go generated vendored Normal file
View File

@ -0,0 +1,476 @@
package gocb
import (
"errors"
"fmt"
)
// CouchbaseList represents a list document.
type CouchbaseList struct {
collection *Collection
id string
}
// List returns a new CouchbaseList for the document specified by id.
func (c *Collection) List(id string) *CouchbaseList {
return &CouchbaseList{
collection: c,
id: id,
}
}
// Iterator returns an iterable for all items in the list.
func (cl *CouchbaseList) Iterator() ([]interface{}, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var listContents []interface{}
err = content.Content(&listContents)
if err != nil {
return nil, err
}
return listContents, nil
}
// At retrieves the value specified at the given index from the list.
func (cl *CouchbaseList) At(index int, valuePtr interface{}) error {
ops := make([]LookupInSpec, 1)
ops[0] = GetSpec(fmt.Sprintf("[%d]", index), nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return err
}
return result.ContentAt(0, valuePtr)
}
// RemoveAt removes the value specified at the given index from the list.
func (cl *CouchbaseList) RemoveAt(index int) error {
ops := make([]MutateInSpec, 1)
ops[0] = RemoveSpec(fmt.Sprintf("[%d]", index), nil)
_, err := cl.collection.MutateIn(cl.id, ops, nil)
if err != nil {
return err
}
return nil
}
// Append appends an item to the list.
func (cl *CouchbaseList) Append(val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = ArrayAppendSpec("", val, nil)
_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// Prepend prepends an item to the list.
func (cl *CouchbaseList) Prepend(val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = ArrayPrependSpec("", val, nil)
_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// IndexOf gets the index of the item in the list.
func (cl *CouchbaseList) IndexOf(val interface{}) (int, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return 0, err
}
var listContents []interface{}
err = content.Content(&listContents)
if err != nil {
return 0, err
}
for i, item := range listContents {
if item == val {
return i, nil
}
}
return -1, nil
}
// Size returns the size of the list.
func (cl *CouchbaseList) Size() (int, error) {
ops := make([]LookupInSpec, 1)
ops[0] = CountSpec("", nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return 0, err
}
var count int
err = result.ContentAt(0, &count)
if err != nil {
return 0, err
}
return count, nil
}
// Clear clears a list, also removing it.
func (cl *CouchbaseList) Clear() error {
_, err := cl.collection.Remove(cl.id, nil)
if err != nil {
return err
}
return nil
}
// CouchbaseMap represents a map document.
type CouchbaseMap struct {
collection *Collection
id string
}
// Map returns a new CouchbaseMap.
func (c *Collection) Map(id string) *CouchbaseMap {
return &CouchbaseMap{
collection: c,
id: id,
}
}
// Iterator returns an iterable for all items in the map.
func (cl *CouchbaseMap) Iterator() (map[string]interface{}, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var mapContents map[string]interface{}
err = content.Content(&mapContents)
if err != nil {
return nil, err
}
return mapContents, nil
}
// At retrieves the item for the given id from the map.
func (cl *CouchbaseMap) At(id string, valuePtr interface{}) error {
ops := make([]LookupInSpec, 1)
ops[0] = GetSpec(fmt.Sprintf("[%s]", id), nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return err
}
return result.ContentAt(0, valuePtr)
}
// Add adds an item to the map.
func (cl *CouchbaseMap) Add(id string, val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = UpsertSpec(id, val, nil)
_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// Remove removes an item from the map.
func (cl *CouchbaseMap) Remove(id string) error {
ops := make([]MutateInSpec, 1)
ops[0] = RemoveSpec(id, nil)
_, err := cl.collection.MutateIn(cl.id, ops, nil)
if err != nil {
return err
}
return nil
}
// Exists verifies whether or a id exists in the map.
func (cl *CouchbaseMap) Exists(id string) (bool, error) {
ops := make([]LookupInSpec, 1)
ops[0] = ExistsSpec(fmt.Sprintf("[%s]", id), nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return false, err
}
return result.Exists(0), nil
}
// Size returns the size of the map.
func (cl *CouchbaseMap) Size() (int, error) {
ops := make([]LookupInSpec, 1)
ops[0] = CountSpec("", nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return 0, err
}
var count int
err = result.ContentAt(0, &count)
if err != nil {
return 0, err
}
return count, nil
}
// Keys returns all of the keys within the map.
func (cl *CouchbaseMap) Keys() ([]string, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var mapContents map[string]interface{}
err = content.Content(&mapContents)
if err != nil {
return nil, err
}
var keys []string
for id := range mapContents {
keys = append(keys, id)
}
return keys, nil
}
// Values returns all of the values within the map.
func (cl *CouchbaseMap) Values() ([]interface{}, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var mapContents map[string]interface{}
err = content.Content(&mapContents)
if err != nil {
return nil, err
}
var values []interface{}
for _, val := range mapContents {
values = append(values, val)
}
return values, nil
}
// Clear clears a map, also removing it.
func (cl *CouchbaseMap) Clear() error {
_, err := cl.collection.Remove(cl.id, nil)
if err != nil {
return err
}
return nil
}
// CouchbaseSet represents a set document.
type CouchbaseSet struct {
id string
underlying *CouchbaseList
}
// Set returns a new CouchbaseSet.
func (c *Collection) Set(id string) *CouchbaseSet {
return &CouchbaseSet{
id: id,
underlying: c.List(id),
}
}
// Iterator returns an iterable for all items in the set.
func (cs *CouchbaseSet) Iterator() ([]interface{}, error) {
return cs.underlying.Iterator()
}
// Add adds a value to the set.
func (cs *CouchbaseSet) Add(val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = ArrayAddUniqueSpec("", val, nil)
_, err := cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// Remove removes an value from the set.
func (cs *CouchbaseSet) Remove(val string) error {
for i := 0; i < 16; i++ {
content, err := cs.underlying.collection.Get(cs.id, nil)
if err != nil {
return err
}
cas := content.Cas()
var setContents []interface{}
err = content.Content(&setContents)
if err != nil {
return err
}
indexToRemove := -1
for i, item := range setContents {
if item == val {
indexToRemove = i
}
}
if indexToRemove > -1 {
ops := make([]MutateInSpec, 1)
ops[0] = RemoveSpec(fmt.Sprintf("[%d]", indexToRemove), nil)
_, err = cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{Cas: cas})
if errors.Is(err, ErrCasMismatch) {
continue
}
if err != nil {
return err
}
}
return nil
}
return errors.New("failed to perform operation after 16 retries")
}
// Values returns all of the values within the set.
func (cs *CouchbaseSet) Values() ([]interface{}, error) {
content, err := cs.underlying.collection.Get(cs.id, nil)
if err != nil {
return nil, err
}
var setContents []interface{}
err = content.Content(&setContents)
if err != nil {
return nil, err
}
return setContents, nil
}
// Contains verifies whether or not a value exists within the set.
func (cs *CouchbaseSet) Contains(val string) (bool, error) {
content, err := cs.underlying.collection.Get(cs.id, nil)
if err != nil {
return false, err
}
var setContents []interface{}
err = content.Content(&setContents)
if err != nil {
return false, err
}
for _, item := range setContents {
if item == val {
return true, nil
}
}
return false, nil
}
// Size returns the size of the set
func (cs *CouchbaseSet) Size() (int, error) {
return cs.underlying.Size()
}
// Clear clears a set, also removing it.
func (cs *CouchbaseSet) Clear() error {
err := cs.underlying.Clear()
if err != nil {
return err
}
return nil
}
// CouchbaseQueue represents a queue document.
type CouchbaseQueue struct {
id string
underlying *CouchbaseList
}
// Queue returns a new CouchbaseQueue.
func (c *Collection) Queue(id string) *CouchbaseQueue {
return &CouchbaseQueue{
id: id,
underlying: c.List(id),
}
}
// Iterator returns an iterable for all items in the queue.
func (cs *CouchbaseQueue) Iterator() ([]interface{}, error) {
return cs.underlying.Iterator()
}
// Push pushes a value onto the queue.
func (cs *CouchbaseQueue) Push(val interface{}) error {
return cs.underlying.Prepend(val)
}
// Pop pops an items off of the queue.
func (cs *CouchbaseQueue) Pop(valuePtr interface{}) error {
for i := 0; i < 16; i++ {
ops := make([]LookupInSpec, 1)
ops[0] = GetSpec("[-1]", nil)
content, err := cs.underlying.collection.LookupIn(cs.id, ops, nil)
if err != nil {
return err
}
cas := content.Cas()
err = content.ContentAt(0, valuePtr)
if err != nil {
return err
}
mutateOps := make([]MutateInSpec, 1)
mutateOps[0] = RemoveSpec("[-1]", nil)
_, err = cs.underlying.collection.MutateIn(cs.id, mutateOps, &MutateInOptions{Cas: cas})
if errors.Is(err, ErrCasMismatch) {
continue
}
if err != nil {
return err
}
return nil
}
return errors.New("failed to perform operation after 16 retries")
}
// Size returns the size of the queue.
func (cs *CouchbaseQueue) Size() (int, error) {
return cs.underlying.Size()
}
// Clear clears a queue, also removing it.
func (cs *CouchbaseQueue) Clear() error {
err := cs.underlying.Clear()
if err != nil {
return err
}
return nil
}

174
vendor/github.com/couchbase/gocb/v2/collection_dura.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
func (c *Collection) observeOnceSeqNo(
tracectx requestSpanContext,
docID string,
mt gocbcore.MutationToken,
replicaIdx int,
cancelCh chan struct{},
timeout time.Duration,
) (didReplicate, didPersist bool, errOut error) {
opm := c.newKvOpManager("observeOnceSeqNo", tracectx)
defer opm.Finish()
opm.SetDocumentID(docID)
opm.SetCancelCh(cancelCh)
opm.SetTimeout(timeout)
agent, err := c.getKvProvider()
if err != nil {
return false, false, err
}
err = opm.Wait(agent.ObserveVb(gocbcore.ObserveVbOptions{
VbID: mt.VbID,
VbUUID: mt.VbUUID,
ReplicaIdx: replicaIdx,
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.ObserveVbResult, err error) {
if err != nil || res == nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
didReplicate = res.CurrentSeqNo >= mt.SeqNo
didPersist = res.PersistSeqNo >= mt.SeqNo
opm.Resolve(nil)
}))
if err != nil {
errOut = err
}
return
}
func (c *Collection) observeOne(
tracectx requestSpanContext,
docID string,
mt gocbcore.MutationToken,
replicaIdx int,
replicaCh, persistCh chan struct{},
cancelCh chan struct{},
timeout time.Duration,
) {
sentReplicated := false
sentPersisted := false
calc := gocbcore.ExponentialBackoff(10*time.Microsecond, 100*time.Millisecond, 0)
retries := uint32(0)
ObserveLoop:
for {
select {
case <-cancelCh:
break ObserveLoop
default:
// not cancelled yet
}
didReplicate, didPersist, err := c.observeOnceSeqNo(tracectx, docID, mt, replicaIdx, cancelCh, timeout)
if err != nil {
logDebugf("ObserveOnce failed unexpected: %s", err)
return
}
if didReplicate && !sentReplicated {
replicaCh <- struct{}{}
sentReplicated = true
}
if didPersist && !sentPersisted {
persistCh <- struct{}{}
sentPersisted = true
}
// If we've got persisted and replicated, we can just stop
if sentPersisted && sentReplicated {
break ObserveLoop
}
waitTmr := gocbcore.AcquireTimer(calc(retries))
retries++
select {
case <-waitTmr.C:
gocbcore.ReleaseTimer(waitTmr, true)
case <-cancelCh:
gocbcore.ReleaseTimer(waitTmr, false)
}
}
}
func (c *Collection) waitForDurability(
tracectx requestSpanContext,
docID string,
mt gocbcore.MutationToken,
replicateTo uint,
persistTo uint,
deadline time.Time,
cancelCh chan struct{},
) error {
opm := c.newKvOpManager("waitForDurability", tracectx)
defer opm.Finish()
opm.SetDocumentID(docID)
agent, err := c.getKvProvider()
if err != nil {
return err
}
snapshot, err := agent.ConfigSnapshot()
if err != nil {
return err
}
numReplicas, err := snapshot.NumReplicas()
if err != nil {
return err
}
numServers := numReplicas + 1
if replicateTo > uint(numServers-1) || persistTo > uint(numServers) {
return opm.EnhanceErr(ErrDurabilityImpossible)
}
subOpCancelCh := make(chan struct{}, 1)
replicaCh := make(chan struct{}, numServers)
persistCh := make(chan struct{}, numServers)
for replicaIdx := 0; replicaIdx < numServers; replicaIdx++ {
go c.observeOne(opm.TraceSpan(), docID, mt, replicaIdx, replicaCh, persistCh, subOpCancelCh, time.Until(deadline))
}
numReplicated := uint(0)
numPersisted := uint(0)
for {
select {
case <-replicaCh:
numReplicated++
case <-persistCh:
numPersisted++
case <-time.After(time.Until(deadline)):
// deadline exceeded
close(subOpCancelCh)
return opm.EnhanceErr(ErrAmbiguousTimeout)
case <-cancelCh:
// parent asked for cancellation
close(subOpCancelCh)
return opm.EnhanceErr(ErrRequestCanceled)
}
if numReplicated >= replicateTo && numPersisted >= persistTo {
close(subOpCancelCh)
return nil
}
}
}

View File

@ -0,0 +1,316 @@
package gocb
import (
"encoding/json"
"errors"
"time"
"github.com/couchbase/gocbcore/v9/memd"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// LookupInOptions are the set of options available to LookupIn.
type LookupInOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
// Internal: This should never be used and is not supported.
Internal struct {
AccessDeleted bool
}
}
// LookupIn performs a set of subdocument lookup operations on the document identified by id.
func (c *Collection) LookupIn(id string, ops []LookupInSpec, opts *LookupInOptions) (docOut *LookupInResult, errOut error) {
if opts == nil {
opts = &LookupInOptions{}
}
opm := c.newKvOpManager("LookupIn", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
return c.internalLookupIn(opm, ops, opts.Internal.AccessDeleted)
}
func (c *Collection) internalLookupIn(
opm *kvOpManager,
ops []LookupInSpec,
accessDeleted bool,
) (docOut *LookupInResult, errOut error) {
var subdocs []gocbcore.SubDocOp
for _, op := range ops {
if op.op == memd.SubDocOpGet && op.path == "" {
if op.isXattr {
return nil, errors.New("invalid xattr fetch with no path")
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: memd.SubDocOpGetDoc,
Flags: memd.SubdocFlag(SubdocFlagNone),
})
continue
} else if op.op == memd.SubDocOpDictSet && op.path == "" {
if op.isXattr {
return nil, errors.New("invalid xattr set with no path")
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: memd.SubDocOpSetDoc,
Flags: memd.SubdocFlag(SubdocFlagNone),
})
continue
}
flags := memd.SubdocFlagNone
if op.isXattr {
flags |= memd.SubdocFlagXattrPath
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: op.op,
Path: op.path,
Flags: flags,
})
}
var flags memd.SubdocDocFlag
if accessDeleted {
flags = memd.SubdocDocFlagAccessDeleted
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.LookupIn(gocbcore.LookupInOptions{
Key: opm.DocumentID(),
Ops: subdocs,
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
Flags: flags,
}, func(res *gocbcore.LookupInResult, err error) {
if err != nil && res == nil {
errOut = opm.EnhanceErr(err)
}
if res != nil {
docOut = &LookupInResult{}
docOut.cas = Cas(res.Cas)
docOut.contents = make([]lookupInPartial, len(subdocs))
for i, opRes := range res.Ops {
docOut.contents[i].err = opm.EnhanceErr(opRes.Err)
docOut.contents[i].data = json.RawMessage(opRes.Value)
}
}
if err == nil {
opm.Resolve(nil)
} else {
opm.Reject()
}
}))
if err != nil {
errOut = err
}
return
}
// StoreSemantics is used to define the document level action to take during a MutateIn operation.
type StoreSemantics uint8
const (
// StoreSemanticsReplace signifies to Replace the document, and fail if it does not exist.
// This is the default action
StoreSemanticsReplace StoreSemantics = iota
// StoreSemanticsUpsert signifies to replace the document or create it if it doesn't exist.
StoreSemanticsUpsert
// StoreSemanticsInsert signifies to create the document, and fail if it exists.
StoreSemanticsInsert
)
// MutateInOptions are the set of options available to MutateIn.
type MutateInOptions struct {
Expiry time.Duration
Cas Cas
PersistTo uint
ReplicateTo uint
DurabilityLevel DurabilityLevel
StoreSemantic StoreSemantics
Timeout time.Duration
RetryStrategy RetryStrategy
// Internal: This should never be used and is not supported.
Internal struct {
AccessDeleted bool
}
}
// MutateIn performs a set of subdocument mutations on the document specified by id.
func (c *Collection) MutateIn(id string, ops []MutateInSpec, opts *MutateInOptions) (mutOut *MutateInResult, errOut error) {
if opts == nil {
opts = &MutateInOptions{}
}
opm := c.newKvOpManager("MutateIn", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
return c.internalMutateIn(opm, opts.StoreSemantic, opts.Expiry, opts.Cas, ops, opts.Internal.AccessDeleted)
}
func jsonMarshalMultiArray(in interface{}) ([]byte, error) {
out, err := json.Marshal(in)
if err != nil {
return nil, err
}
// Assert first character is a '['
if len(out) < 2 || out[0] != '[' {
return nil, makeInvalidArgumentsError("not a JSON array")
}
out = out[1 : len(out)-1]
return out, nil
}
func jsonMarshalMutateSpec(op MutateInSpec) ([]byte, memd.SubdocFlag, error) {
if op.value == nil {
return nil, memd.SubdocFlagNone, nil
}
if macro, ok := op.value.(MutationMacro); ok {
return []byte(macro), memd.SubdocFlagExpandMacros | memd.SubdocFlagXattrPath, nil
}
if op.multiValue {
bytes, err := jsonMarshalMultiArray(op.value)
return bytes, memd.SubdocFlagNone, err
}
bytes, err := json.Marshal(op.value)
return bytes, memd.SubdocFlagNone, err
}
func (c *Collection) internalMutateIn(
opm *kvOpManager,
action StoreSemantics,
expiry time.Duration,
cas Cas,
ops []MutateInSpec,
accessDeleted bool,
) (mutOut *MutateInResult, errOut error) {
var docFlags memd.SubdocDocFlag
if action == StoreSemanticsReplace {
// this is the default behaviour
} else if action == StoreSemanticsUpsert {
docFlags |= memd.SubdocDocFlagMkDoc
} else if action == StoreSemanticsInsert {
docFlags |= memd.SubdocDocFlagAddDoc
} else {
return nil, makeInvalidArgumentsError("invalid StoreSemantics value provided")
}
if accessDeleted {
docFlags |= memd.SubdocDocFlagAccessDeleted
}
var subdocs []gocbcore.SubDocOp
for _, op := range ops {
if op.path == "" {
switch op.op {
case memd.SubDocOpDictAdd:
return nil, makeInvalidArgumentsError("cannot specify a blank path with InsertSpec")
case memd.SubDocOpDictSet:
return nil, makeInvalidArgumentsError("cannot specify a blank path with UpsertSpec")
case memd.SubDocOpDelete:
return nil, makeInvalidArgumentsError("cannot specify a blank path with DeleteSpec")
case memd.SubDocOpReplace:
op.op = memd.SubDocOpSetDoc
default:
}
}
etrace := c.startKvOpTrace("encode", opm.TraceSpan())
bytes, flags, err := jsonMarshalMutateSpec(op)
etrace.Finish()
if err != nil {
return nil, err
}
if op.createPath {
flags |= memd.SubdocFlagMkDirP
}
if op.isXattr {
flags |= memd.SubdocFlagXattrPath
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: op.op,
Flags: flags,
Path: op.path,
Value: bytes,
})
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.MutateIn(gocbcore.MutateInOptions{
Key: opm.DocumentID(),
Flags: docFlags,
Cas: gocbcore.Cas(cas),
Ops: subdocs,
Expiry: durationToExpiry(expiry),
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.MutateInResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
mutOut = &MutateInResult{}
mutOut.cas = Cas(res.Cas)
mutOut.mt = opm.EnhanceMt(res.MutationToken)
mutOut.contents = make([]mutateInPartial, len(res.Ops))
for i, op := range res.Ops {
mutOut.contents[i] = mutateInPartial{data: op.Value}
}
opm.Resolve(mutOut.mt)
}))
if err != nil {
errOut = err
}
return
}

203
vendor/github.com/couchbase/gocb/v2/constants.go generated vendored Normal file
View File

@ -0,0 +1,203 @@
package gocb
import (
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/couchbase/gocbcore/v9/memd"
)
const (
goCbVersionStr = "v2.1.4"
)
// QueryIndexType provides information on the type of indexer used for an index.
type QueryIndexType string
const (
// QueryIndexTypeGsi indicates that GSI was used to build the index.
QueryIndexTypeGsi QueryIndexType = "gsi"
// QueryIndexTypeView indicates that views were used to build the index.
QueryIndexTypeView QueryIndexType = "views"
)
// QueryStatus provides information about the current status of a query.
type QueryStatus string
const (
// QueryStatusRunning indicates the query is still running
QueryStatusRunning QueryStatus = "running"
// QueryStatusSuccess indicates the query was successful.
QueryStatusSuccess QueryStatus = "success"
// QueryStatusErrors indicates a query completed with errors.
QueryStatusErrors QueryStatus = "errors"
// QueryStatusCompleted indicates a query has completed.
QueryStatusCompleted QueryStatus = "completed"
// QueryStatusStopped indicates a query has been stopped.
QueryStatusStopped QueryStatus = "stopped"
// QueryStatusTimeout indicates a query timed out.
QueryStatusTimeout QueryStatus = "timeout"
// QueryStatusClosed indicates that a query was closed.
QueryStatusClosed QueryStatus = "closed"
// QueryStatusFatal indicates that a query ended with a fatal error.
QueryStatusFatal QueryStatus = "fatal"
// QueryStatusAborted indicates that a query was aborted.
QueryStatusAborted QueryStatus = "aborted"
// QueryStatusUnknown indicates that the query status is unknown.
QueryStatusUnknown QueryStatus = "unknown"
)
// ServiceType specifies a particular Couchbase service type.
type ServiceType gocbcore.ServiceType
const (
// ServiceTypeManagement represents a management service.
ServiceTypeManagement ServiceType = ServiceType(gocbcore.MgmtService)
// ServiceTypeKeyValue represents a memcached service.
ServiceTypeKeyValue ServiceType = ServiceType(gocbcore.MemdService)
// ServiceTypeViews represents a views service.
ServiceTypeViews ServiceType = ServiceType(gocbcore.CapiService)
// ServiceTypeQuery represents a query service.
ServiceTypeQuery ServiceType = ServiceType(gocbcore.N1qlService)
// ServiceTypeSearch represents a full-text-search service.
ServiceTypeSearch ServiceType = ServiceType(gocbcore.FtsService)
// ServiceTypeAnalytics represents an analytics service.
ServiceTypeAnalytics ServiceType = ServiceType(gocbcore.CbasService)
)
// QueryProfileMode specifies the profiling mode to use during a query.
type QueryProfileMode string
const (
// QueryProfileModeNone disables query profiling
QueryProfileModeNone QueryProfileMode = "off"
// QueryProfileModePhases includes phase profiling information in the query response
QueryProfileModePhases QueryProfileMode = "phases"
// QueryProfileModeTimings includes timing profiling information in the query response
QueryProfileModeTimings QueryProfileMode = "timings"
)
// SubdocFlag provides special handling flags for sub-document operations
type SubdocFlag memd.SubdocFlag
const (
// SubdocFlagNone indicates no special behaviours
SubdocFlagNone SubdocFlag = SubdocFlag(memd.SubdocFlagNone)
// SubdocFlagCreatePath indicates you wish to recursively create the tree of paths
// if it does not already exist within the document.
SubdocFlagCreatePath SubdocFlag = SubdocFlag(memd.SubdocFlagMkDirP)
// SubdocFlagXattr indicates your path refers to an extended attribute rather than the document.
SubdocFlagXattr SubdocFlag = SubdocFlag(memd.SubdocFlagXattrPath)
// SubdocFlagUseMacros indicates that you wish macro substitution to occur on the value
SubdocFlagUseMacros SubdocFlag = SubdocFlag(memd.SubdocFlagExpandMacros)
)
// SubdocDocFlag specifies document-level flags for a sub-document operation.
type SubdocDocFlag memd.SubdocDocFlag
const (
// SubdocDocFlagNone indicates no special behaviours
SubdocDocFlagNone SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagNone)
// SubdocDocFlagMkDoc indicates that the document should be created if it does not already exist.
SubdocDocFlagMkDoc SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagMkDoc)
// SubdocDocFlagAddDoc indices that the document should be created only if it does not already exist.
SubdocDocFlagAddDoc SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagAddDoc)
// SubdocDocFlagAccessDeleted indicates that you wish to receive soft-deleted documents.
SubdocDocFlagAccessDeleted SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagAccessDeleted)
)
// DurabilityLevel specifies the level of synchronous replication to use.
type DurabilityLevel uint8
const (
// DurabilityLevelMajority specifies that a mutation must be replicated (held in memory) to a majority of nodes.
DurabilityLevelMajority DurabilityLevel = iota + 1
// DurabilityLevelMajorityAndPersistOnMaster specifies that a mutation must be replicated (held in memory) to a
// majority of nodes and also persisted (written to disk) on the active node.
DurabilityLevelMajorityAndPersistOnMaster
// DurabilityLevelPersistToMajority specifies that a mutation must be persisted (written to disk) to a majority
// of nodes.
DurabilityLevelPersistToMajority
)
// MutationMacro can be supplied to MutateIn operations to perform ExpandMacros operations.
type MutationMacro string
const (
// MutationMacroCAS can be used to tell the server to use the CAS macro.
MutationMacroCAS MutationMacro = "\"${Mutation.CAS}\""
// MutationMacroSeqNo can be used to tell the server to use the seqno macro.
MutationMacroSeqNo MutationMacro = "\"${Mutation.seqno}\""
// MutationMacroValueCRC32c can be used to tell the server to use the value_crc32c macro.
MutationMacroValueCRC32c MutationMacro = "\"${Mutation.value_crc32c}\""
)
// ClusterState specifies the current state of the cluster
type ClusterState uint
const (
// ClusterStateOnline indicates that all nodes are online and reachable.
ClusterStateOnline ClusterState = iota + 1
// ClusterStateDegraded indicates that all services will function, but possibly not optimally.
ClusterStateDegraded
// ClusterStateOffline indicates that no nodes were reachable.
ClusterStateOffline
)
// EndpointState specifies the current state of an endpoint.
type EndpointState uint
const (
// EndpointStateDisconnected indicates the endpoint socket is unreachable.
EndpointStateDisconnected EndpointState = iota + 1
// EndpointStateConnecting indicates the endpoint socket is connecting.
EndpointStateConnecting
// EndpointStateConnected indicates the endpoint socket is connected and ready.
EndpointStateConnected
// EndpointStateDisconnecting indicates the endpoint socket is disconnecting.
EndpointStateDisconnecting
)
// PingState specifies the result of the ping operation
type PingState uint
const (
// PingStateOk indicates that the ping operation was successful.
PingStateOk PingState = iota + 1
// PingStateTimeout indicates that the ping operation timed out.
PingStateTimeout
// PingStateError indicates that the ping operation failed.
PingStateError
)

57
vendor/github.com/couchbase/gocb/v2/constants_str.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
package gocb
func serviceTypeToString(service ServiceType) string {
switch service {
case ServiceTypeManagement:
return "mgmt"
case ServiceTypeKeyValue:
return "kv"
case ServiceTypeViews:
return "views"
case ServiceTypeQuery:
return "query"
case ServiceTypeSearch:
return "search"
case ServiceTypeAnalytics:
return "analytics"
}
return ""
}
func clusterStateToString(state ClusterState) string {
switch state {
case ClusterStateOnline:
return "online"
case ClusterStateDegraded:
return "degraded"
case ClusterStateOffline:
return "offline"
}
return ""
}
func endpointStateToString(state EndpointState) string {
switch state {
case EndpointStateDisconnected:
return "disconnected"
case EndpointStateConnecting:
return "connecting"
case EndpointStateConnected:
return "connected"
case EndpointStateDisconnecting:
return "disconnecting"
}
return ""
}
func pingStateToString(state PingState) string {
switch state {
case PingStateOk:
return "ok"
case PingStateTimeout:
return "timeout"
case PingStateError:
return "error"
}
return ""
}

299
vendor/github.com/couchbase/gocb/v2/error.go generated vendored Normal file
View File

@ -0,0 +1,299 @@
package gocb
import (
"errors"
"fmt"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type wrappedError struct {
Message string
InnerError error
}
func (e wrappedError) Error() string {
return fmt.Sprintf("%s: %s", e.Message, e.InnerError.Error())
}
func (e wrappedError) Unwrap() error {
return e.InnerError
}
func wrapError(err error, message string) error {
return wrappedError{
Message: message,
InnerError: err,
}
}
type invalidArgumentsError struct {
message string
}
func (e invalidArgumentsError) Error() string {
return fmt.Sprintf("invalid arguments: %s", e.message)
}
func (e invalidArgumentsError) Unwrap() error {
return ErrInvalidArgument
}
func makeInvalidArgumentsError(message string) error {
return invalidArgumentsError{
message: message,
}
}
// Shared Error Definitions RFC#58@15
var (
// ErrTimeout occurs when an operation does not receive a response in a timely manner.
ErrTimeout = gocbcore.ErrTimeout
// ErrRequestCanceled occurs when an operation has been canceled.
ErrRequestCanceled = gocbcore.ErrRequestCanceled
// ErrInvalidArgument occurs when an invalid argument is provided for an operation.
ErrInvalidArgument = gocbcore.ErrInvalidArgument
// ErrServiceNotAvailable occurs when the requested service is not available.
ErrServiceNotAvailable = gocbcore.ErrServiceNotAvailable
// ErrInternalServerFailure occurs when the server encounters an internal server error.
ErrInternalServerFailure = gocbcore.ErrInternalServerFailure
// ErrAuthenticationFailure occurs when authentication has failed.
ErrAuthenticationFailure = gocbcore.ErrAuthenticationFailure
// ErrTemporaryFailure occurs when an operation has failed for a reason that is temporary.
ErrTemporaryFailure = gocbcore.ErrTemporaryFailure
// ErrParsingFailure occurs when a query has failed to be parsed by the server.
ErrParsingFailure = gocbcore.ErrParsingFailure
// ErrCasMismatch occurs when an operation has been performed with a cas value that does not the value on the server.
ErrCasMismatch = gocbcore.ErrCasMismatch
// ErrBucketNotFound occurs when the requested bucket could not be found.
ErrBucketNotFound = gocbcore.ErrBucketNotFound
// ErrCollectionNotFound occurs when the requested collection could not be found.
ErrCollectionNotFound = gocbcore.ErrCollectionNotFound
// ErrEncodingFailure occurs when encoding of a value failed.
ErrEncodingFailure = gocbcore.ErrEncodingFailure
// ErrDecodingFailure occurs when decoding of a value failed.
ErrDecodingFailure = gocbcore.ErrDecodingFailure
// ErrUnsupportedOperation occurs when an operation that is unsupported or unknown is performed against the server.
ErrUnsupportedOperation = gocbcore.ErrUnsupportedOperation
// ErrAmbiguousTimeout occurs when an operation does not receive a response in a timely manner for a reason that
//
ErrAmbiguousTimeout = gocbcore.ErrAmbiguousTimeout
// ErrAmbiguousTimeout occurs when an operation does not receive a response in a timely manner for a reason that
// it can be safely established that
ErrUnambiguousTimeout = gocbcore.ErrUnambiguousTimeout
// ErrFeatureNotAvailable occurs when an operation is performed on a bucket which does not support it.
ErrFeatureNotAvailable = gocbcore.ErrFeatureNotAvailable
// ErrScopeNotFound occurs when the requested scope could not be found.
ErrScopeNotFound = gocbcore.ErrScopeNotFound
// ErrIndexNotFound occurs when the requested index could not be found.
ErrIndexNotFound = gocbcore.ErrIndexNotFound
// ErrIndexExists occurs when creating an index that already exists.
ErrIndexExists = gocbcore.ErrIndexExists
)
// Key Value Error Definitions RFC#58@15
var (
// ErrDocumentNotFound occurs when the requested document could not be found.
ErrDocumentNotFound = gocbcore.ErrDocumentNotFound
// ErrDocumentUnretrievable occurs when GetAnyReplica cannot find the document on any replica.
ErrDocumentUnretrievable = gocbcore.ErrDocumentUnretrievable
// ErrDocumentLocked occurs when a mutation operation is attempted against a document that is locked.
ErrDocumentLocked = gocbcore.ErrDocumentLocked
// ErrValueTooLarge occurs when a document has gone over the maximum size allowed by the server.
ErrValueTooLarge = gocbcore.ErrValueTooLarge
// ErrDocumentExists occurs when an attempt is made to insert a document but a document with that key already exists.
ErrDocumentExists = gocbcore.ErrDocumentExists
// ErrValueNotJSON occurs when a sub-document operation is performed on a
// document which is not JSON.
ErrValueNotJSON = gocbcore.ErrValueNotJSON
// ErrDurabilityLevelNotAvailable occurs when an invalid durability level was requested.
ErrDurabilityLevelNotAvailable = gocbcore.ErrDurabilityLevelNotAvailable
// ErrDurabilityImpossible occurs when a request is performed with impossible
// durability level requirements.
ErrDurabilityImpossible = gocbcore.ErrDurabilityImpossible
// ErrDurabilityAmbiguous occurs when an SyncWrite does not complete in the specified
// time and the result is ambiguous.
ErrDurabilityAmbiguous = gocbcore.ErrDurabilityAmbiguous
// ErrDurableWriteInProgress occurs when an attempt is made to write to a key that has
// a SyncWrite pending.
ErrDurableWriteInProgress = gocbcore.ErrDurableWriteInProgress
// ErrDurableWriteReCommitInProgress occurs when an SyncWrite is being recommitted.
ErrDurableWriteReCommitInProgress = gocbcore.ErrDurableWriteReCommitInProgress
// ErrMutationLost occurs when a mutation was lost.
ErrMutationLost = gocbcore.ErrMutationLost
// ErrPathNotFound occurs when a sub-document operation targets a path
// which does not exist in the specified document.
ErrPathNotFound = gocbcore.ErrPathNotFound
// ErrPathMismatch occurs when a sub-document operation specifies a path
// which does not match the document structure (field access on an array).
ErrPathMismatch = gocbcore.ErrPathMismatch
// ErrPathInvalid occurs when a sub-document path could not be parsed.
ErrPathInvalid = gocbcore.ErrPathInvalid
// ErrPathTooBig occurs when a sub-document path is too big.
ErrPathTooBig = gocbcore.ErrPathTooBig
// ErrPathTooDeep occurs when an operation would cause a document to be
// nested beyond the depth limits allowed by the sub-document specification.
ErrPathTooDeep = gocbcore.ErrPathTooDeep
// ErrValueTooDeep occurs when a sub-document operation specifies a value
// which is deeper than the depth limits of the sub-document specification.
ErrValueTooDeep = gocbcore.ErrValueTooDeep
// ErrValueInvalid occurs when a sub-document operation could not insert.
ErrValueInvalid = gocbcore.ErrValueInvalid
// ErrDocumentNotJSON occurs when a sub-document operation is performed on a
// document which is not JSON.
ErrDocumentNotJSON = gocbcore.ErrDocumentNotJSON
// ErrNumberTooBig occurs when a sub-document operation is performed with
// a bad range.
ErrNumberTooBig = gocbcore.ErrNumberTooBig
// ErrDeltaInvalid occurs when a sub-document counter operation is performed
// and the specified delta is not valid.
ErrDeltaInvalid = gocbcore.ErrDeltaInvalid
// ErrPathExists occurs when a sub-document operation expects a path not
// to exists, but the path was found in the document.
ErrPathExists = gocbcore.ErrPathExists
// ErrXattrUnknownMacro occurs when an invalid macro value is specified.
ErrXattrUnknownMacro = gocbcore.ErrXattrUnknownMacro
// ErrXattrInvalidFlagCombo occurs when an invalid set of
// extended-attribute flags is passed to a sub-document operation.
ErrXattrInvalidFlagCombo = gocbcore.ErrXattrInvalidFlagCombo
// ErrXattrInvalidKeyCombo occurs when an invalid set of key operations
// are specified for a extended-attribute sub-document operation.
ErrXattrInvalidKeyCombo = gocbcore.ErrXattrInvalidKeyCombo
// ErrXattrUnknownVirtualAttribute occurs when an invalid virtual attribute is specified.
ErrXattrUnknownVirtualAttribute = gocbcore.ErrXattrUnknownVirtualAttribute
// ErrXattrCannotModifyVirtualAttribute occurs when a mutation is attempted upon
// a virtual attribute (which are immutable by definition).
ErrXattrCannotModifyVirtualAttribute = gocbcore.ErrXattrCannotModifyVirtualAttribute
// ErrXattrInvalidOrder occurs when a set key key operations are specified for a extended-attribute sub-document
// operation in the incorrect order.
ErrXattrInvalidOrder = gocbcore.ErrXattrInvalidOrder
)
// Query Error Definitions RFC#58@15
var (
// ErrPlanningFailure occurs when the query service was unable to create a query plan.
ErrPlanningFailure = gocbcore.ErrPlanningFailure
// ErrIndexFailure occurs when there was an issue with the index specified.
ErrIndexFailure = gocbcore.ErrIndexFailure
// ErrPreparedStatementFailure occurs when there was an issue with the prepared statement.
ErrPreparedStatementFailure = gocbcore.ErrPreparedStatementFailure
)
// Analytics Error Definitions RFC#58@15
var (
// ErrCompilationFailure occurs when there was an issue executing the analytics query because it could not
// be compiled.
ErrCompilationFailure = gocbcore.ErrCompilationFailure
// ErrJobQueueFull occurs when the analytics service job queue is full.
ErrJobQueueFull = gocbcore.ErrJobQueueFull
// ErrDatasetNotFound occurs when the analytics dataset requested could not be found.
ErrDatasetNotFound = gocbcore.ErrDatasetNotFound
// ErrDataverseNotFound occurs when the analytics dataverse requested could not be found.
ErrDataverseNotFound = gocbcore.ErrDataverseNotFound
// ErrDatasetExists occurs when creating an analytics dataset failed because it already exists.
ErrDatasetExists = gocbcore.ErrDatasetExists
// ErrDataverseExists occurs when creating an analytics dataverse failed because it already exists.
ErrDataverseExists = gocbcore.ErrDataverseExists
// ErrLinkNotFound occurs when the analytics link requested could not be found.
ErrLinkNotFound = gocbcore.ErrLinkNotFound
)
// Search Error Definitions RFC#58@15
var ()
// View Error Definitions RFC#58@15
var (
// ErrViewNotFound occurs when the view requested could not be found.
ErrViewNotFound = gocbcore.ErrViewNotFound
// ErrDesignDocumentNotFound occurs when the design document requested could not be found.
ErrDesignDocumentNotFound = gocbcore.ErrDesignDocumentNotFound
)
// Management Error Definitions RFC#58@15
var (
// ErrCollectionExists occurs when creating a collection failed because it already exists.
ErrCollectionExists = gocbcore.ErrCollectionExists
// ErrScopeExists occurs when creating a scope failed because it already exists.
ErrScopeExists = gocbcore.ErrScopeExists
// ErrUserNotFound occurs when the user requested could not be found.
ErrUserNotFound = gocbcore.ErrUserNotFound
// ErrGroupNotFound occurs when the group requested could not be found.
ErrGroupNotFound = gocbcore.ErrGroupNotFound
// ErrBucketExists occurs when creating a bucket failed because it already exists.
ErrBucketExists = gocbcore.ErrBucketExists
// ErrUserExists occurs when creating a user failed because it already exists.
ErrUserExists = gocbcore.ErrUserExists
// ErrBucketNotFlushable occurs when a bucket could not be flushed because flushing is not enabled.
ErrBucketNotFlushable = gocbcore.ErrBucketNotFlushable
)
// SDK specific error definitions
var (
// ErrOverload occurs when too many operations are dispatched and all queues are full.
ErrOverload = gocbcore.ErrOverload
// ErrNoResult occurs when no results are available to a query.
ErrNoResult = errors.New("no result was available")
)

42
vendor/github.com/couchbase/gocb/v2/error_analytics.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// AnalyticsErrorDesc represents a specific error returned from the analytics service.
type AnalyticsErrorDesc struct {
Code uint32
Message string
}
func translateCoreAnalyticsErrorDesc(descs []gocbcore.AnalyticsErrorDesc) []AnalyticsErrorDesc {
descsOut := make([]AnalyticsErrorDesc, len(descs))
for descIdx, desc := range descs {
descsOut[descIdx] = AnalyticsErrorDesc{
Code: desc.Code,
Message: desc.Message,
}
}
return descsOut
}
// AnalyticsError is the error type of all analytics query errors.
// UNCOMMITTED: This API may change in the future.
type AnalyticsError struct {
InnerError error `json:"-"`
Statement string `json:"statement,omitempty"`
ClientContextID string `json:"client_context_id,omitempty"`
Errors []AnalyticsErrorDesc `json:"errors,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e AnalyticsError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e AnalyticsError) Unwrap() error {
return e.InnerError
}

72
vendor/github.com/couchbase/gocb/v2/error_http.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package gocb
import (
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
// HTTPError is the error type of management HTTP errors.
// UNCOMMITTED: This API may change in the future.
type HTTPError struct {
InnerError error `json:"-"`
UniqueID string `json:"unique_id,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e HTTPError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e HTTPError) Unwrap() error {
return e.InnerError
}
func makeGenericHTTPError(baseErr error, req *gocbcore.HTTPRequest, resp *gocbcore.HTTPResponse) error {
if baseErr == nil {
logErrorf("makeGenericHTTPError got an empty error")
baseErr = errors.New("unknown error")
}
err := HTTPError{
InnerError: baseErr,
}
if req != nil {
err.UniqueID = req.UniqueID
}
if resp != nil {
err.Endpoint = resp.Endpoint
}
return err
}
func makeGenericMgmtError(baseErr error, req *mgmtRequest, resp *mgmtResponse) error {
if baseErr == nil {
logErrorf("makeGenericMgmtError got an empty error")
baseErr = errors.New("unknown error")
}
err := HTTPError{
InnerError: baseErr,
}
if req != nil {
err.UniqueID = req.UniqueID
}
if resp != nil {
err.Endpoint = resp.Endpoint
}
return err
}
func makeMgmtBadStatusError(message string, req *mgmtRequest, resp *mgmtResponse) error {
return makeGenericMgmtError(errors.New(message), req, resp)
}

34
vendor/github.com/couchbase/gocb/v2/error_keyvalue.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package gocb
import "github.com/couchbase/gocbcore/v9/memd"
// KeyValueError wraps key-value errors that occur within the SDK.
// UNCOMMITTED: This API may change in the future.
type KeyValueError struct {
InnerError error `json:"-"`
StatusCode memd.StatusCode `json:"status_code,omitempty"`
BucketName string `json:"bucket,omitempty"`
ScopeName string `json:"scope,omitempty"`
CollectionName string `json:"collection,omitempty"`
CollectionID uint32 `json:"collection_id,omitempty"`
ErrorName string `json:"error_name,omitempty"`
ErrorDescription string `json:"error_description,omitempty"`
Opaque uint32 `json:"opaque,omitempty"`
Context string `json:"context,omitempty"`
Ref string `json:"ref,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
LastDispatchedTo string `json:"last_dispatched_to,omitempty"`
LastDispatchedFrom string `json:"last_dispatched_from,omitempty"`
LastConnectionID string `json:"last_connection_id,omitempty"`
}
// Error returns the string representation of a kv error.
func (e KeyValueError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying reason for the error
func (e KeyValueError) Unwrap() error {
return e.InnerError
}

42
vendor/github.com/couchbase/gocb/v2/error_query.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// QueryErrorDesc represents a specific error returned from the query service.
type QueryErrorDesc struct {
Code uint32
Message string
}
func translateCoreQueryErrorDesc(descs []gocbcore.N1QLErrorDesc) []QueryErrorDesc {
descsOut := make([]QueryErrorDesc, len(descs))
for descIdx, desc := range descs {
descsOut[descIdx] = QueryErrorDesc{
Code: desc.Code,
Message: desc.Message,
}
}
return descsOut
}
// QueryError is the error type of all query errors.
// UNCOMMITTED: This API may change in the future.
type QueryError struct {
InnerError error `json:"-"`
Statement string `json:"statement,omitempty"`
ClientContextID string `json:"client_context_id,omitempty"`
Errors []QueryErrorDesc `json:"errors,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e QueryError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e QueryError) Unwrap() error {
return e.InnerError
}

23
vendor/github.com/couchbase/gocb/v2/error_search.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package gocb
// SearchError is the error type of all search query errors.
// UNCOMMITTED: This API may change in the future.
type SearchError struct {
InnerError error `json:"-"`
Query interface{} `json:"query,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
ErrorText string `json:"error_text"`
IndexName string `json:"index_name,omitempty"`
}
// Error returns the string representation of this error.
func (e SearchError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e SearchError) Unwrap() error {
return e.InnerError
}

87
vendor/github.com/couchbase/gocb/v2/error_timeout.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
package gocb
import (
"encoding/json"
"time"
)
// TimeoutError wraps timeout errors that occur within the SDK.
// UNCOMMITTED: This API may change in the future.
type TimeoutError struct {
InnerError error
OperationID string
Opaque string
TimeObserved time.Duration
RetryReasons []RetryReason
RetryAttempts uint32
LastDispatchedTo string
LastDispatchedFrom string
LastConnectionID string
}
type timeoutError struct {
InnerError error `json:"-"`
OperationID string `json:"s,omitempty"`
Opaque string `json:"i,omitempty"`
TimeObserved uint64 `json:"t,omitempty"`
RetryReasons []string `json:"rr,omitempty"`
RetryAttempts uint32 `json:"ra,omitempty"`
LastDispatchedTo string `json:"r,omitempty"`
LastDispatchedFrom string `json:"l,omitempty"`
LastConnectionID string `json:"c,omitempty"`
}
// MarshalJSON implements the Marshaler interface.
func (err *TimeoutError) MarshalJSON() ([]byte, error) {
var retries []string
for _, rr := range err.RetryReasons {
retries = append(retries, rr.Description())
}
toMarshal := timeoutError{
InnerError: err.InnerError,
OperationID: err.OperationID,
Opaque: err.Opaque,
TimeObserved: uint64(err.TimeObserved / time.Microsecond),
RetryReasons: retries,
RetryAttempts: err.RetryAttempts,
LastDispatchedTo: err.LastDispatchedTo,
LastDispatchedFrom: err.LastDispatchedFrom,
LastConnectionID: err.LastConnectionID,
}
return json.Marshal(toMarshal)
}
// UnmarshalJSON implements the Unmarshaler interface.
func (err *TimeoutError) UnmarshalJSON(data []byte) error {
var tErr *timeoutError
if err := json.Unmarshal(data, &tErr); err != nil {
return err
}
duration := time.Duration(tErr.TimeObserved) * time.Microsecond
// Note that we cannot reasonably unmarshal the retry reasons
err.OperationID = tErr.OperationID
err.Opaque = tErr.Opaque
err.TimeObserved = duration
err.RetryAttempts = tErr.RetryAttempts
err.LastDispatchedTo = tErr.LastDispatchedTo
err.LastDispatchedFrom = tErr.LastDispatchedFrom
err.LastConnectionID = tErr.LastConnectionID
return nil
}
func (err TimeoutError) Error() string {
if err.InnerError == nil {
return serializeWrappedError(err)
}
return err.InnerError.Error() + " | " + serializeWrappedError(err)
}
// Unwrap returns the underlying reason for the error
func (err TimeoutError) Unwrap() error {
return err.InnerError
}

42
vendor/github.com/couchbase/gocb/v2/error_view.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// ViewErrorDesc represents a specific error returned from the views service.
type ViewErrorDesc struct {
SourceNode string
Message string
}
func translateCoreViewErrorDesc(descs []gocbcore.ViewQueryErrorDesc) []ViewErrorDesc {
descsOut := make([]ViewErrorDesc, len(descs))
for descIdx, desc := range descs {
descsOut[descIdx] = ViewErrorDesc{
SourceNode: desc.SourceNode,
Message: desc.Message,
}
}
return descsOut
}
// ViewError is the error type of all view query errors.
// UNCOMMITTED: This API may change in the future.
type ViewError struct {
InnerError error `json:"-"`
DesignDocumentName string `json:"design_document_name,omitempty"`
ViewName string `json:"view_name,omitempty"`
Errors []ViewErrorDesc `json:"errors,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e ViewError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e ViewError) Unwrap() error {
return e.InnerError
}

130
vendor/github.com/couchbase/gocb/v2/error_wrapping.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
package gocb
import (
"encoding/json"
gocbcore "github.com/couchbase/gocbcore/v9"
)
func serializeWrappedError(err error) string {
errBytes, serErr := json.Marshal(err)
if serErr != nil {
logErrorf("failed to serialize error to json: %s", serErr.Error())
}
return string(errBytes)
}
func maybeEnhanceCoreErr(err error) error {
if kvErr, ok := err.(*gocbcore.KeyValueError); ok {
return &KeyValueError{
InnerError: kvErr.InnerError,
StatusCode: kvErr.StatusCode,
BucketName: kvErr.BucketName,
ScopeName: kvErr.ScopeName,
CollectionName: kvErr.CollectionName,
CollectionID: kvErr.CollectionID,
ErrorName: kvErr.ErrorName,
ErrorDescription: kvErr.ErrorDescription,
Opaque: kvErr.Opaque,
Context: kvErr.Context,
Ref: kvErr.Ref,
RetryReasons: translateCoreRetryReasons(kvErr.RetryReasons),
RetryAttempts: kvErr.RetryAttempts,
LastDispatchedTo: kvErr.LastDispatchedTo,
LastDispatchedFrom: kvErr.LastDispatchedFrom,
LastConnectionID: kvErr.LastConnectionID,
}
}
if viewErr, ok := err.(*gocbcore.ViewError); ok {
return &ViewError{
InnerError: viewErr.InnerError,
DesignDocumentName: viewErr.DesignDocumentName,
ViewName: viewErr.ViewName,
Errors: translateCoreViewErrorDesc(viewErr.Errors),
Endpoint: viewErr.Endpoint,
RetryReasons: translateCoreRetryReasons(viewErr.RetryReasons),
RetryAttempts: viewErr.RetryAttempts,
}
}
if queryErr, ok := err.(*gocbcore.N1QLError); ok {
return &QueryError{
InnerError: queryErr.InnerError,
Statement: queryErr.Statement,
ClientContextID: queryErr.ClientContextID,
Errors: translateCoreQueryErrorDesc(queryErr.Errors),
Endpoint: queryErr.Endpoint,
RetryReasons: translateCoreRetryReasons(queryErr.RetryReasons),
RetryAttempts: queryErr.RetryAttempts,
}
}
if analyticsErr, ok := err.(*gocbcore.AnalyticsError); ok {
return &AnalyticsError{
InnerError: analyticsErr.InnerError,
Statement: analyticsErr.Statement,
ClientContextID: analyticsErr.ClientContextID,
Errors: translateCoreAnalyticsErrorDesc(analyticsErr.Errors),
Endpoint: analyticsErr.Endpoint,
RetryReasons: translateCoreRetryReasons(analyticsErr.RetryReasons),
RetryAttempts: analyticsErr.RetryAttempts,
}
}
if searchErr, ok := err.(*gocbcore.SearchError); ok {
return &SearchError{
InnerError: searchErr.InnerError,
Query: searchErr.Query,
Endpoint: searchErr.Endpoint,
RetryReasons: translateCoreRetryReasons(searchErr.RetryReasons),
RetryAttempts: searchErr.RetryAttempts,
ErrorText: searchErr.ErrorText,
IndexName: searchErr.IndexName,
}
}
if httpErr, ok := err.(*gocbcore.HTTPError); ok {
return &HTTPError{
InnerError: httpErr.InnerError,
UniqueID: httpErr.UniqueID,
Endpoint: httpErr.Endpoint,
RetryReasons: translateCoreRetryReasons(httpErr.RetryReasons),
RetryAttempts: httpErr.RetryAttempts,
}
}
if timeoutErr, ok := err.(*gocbcore.TimeoutError); ok {
return &TimeoutError{
InnerError: timeoutErr.InnerError,
OperationID: timeoutErr.OperationID,
Opaque: timeoutErr.Opaque,
TimeObserved: timeoutErr.TimeObserved,
RetryReasons: translateCoreRetryReasons(timeoutErr.RetryReasons),
RetryAttempts: timeoutErr.RetryAttempts,
LastDispatchedTo: timeoutErr.LastDispatchedTo,
LastDispatchedFrom: timeoutErr.LastDispatchedFrom,
LastConnectionID: timeoutErr.LastConnectionID,
}
}
return err
}
func maybeEnhanceKVErr(err error, bucketName, scopeName, collName, docKey string) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceCollKVErr(err error, bucket kvProvider, coll *Collection, docKey string) error {
return maybeEnhanceKVErr(err, coll.bucketName(), coll.Name(), coll.ScopeName(), docKey)
}
func maybeEnhanceViewError(err error) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceQueryError(err error) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceAnalyticsError(err error) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceSearchError(err error) error {
return maybeEnhanceCoreErr(err)
}

12
vendor/github.com/couchbase/gocb/v2/go.mod generated vendored Normal file
View File

@ -0,0 +1,12 @@
module github.com/couchbase/gocb/v2
require (
github.com/couchbase/gocbcore/v9 v9.0.4
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/uuid v1.1.1
github.com/pkg/errors v0.9.1
github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.5.1
)
go 1.13

24
vendor/github.com/couchbase/gocb/v2/go.sum generated vendored Normal file
View File

@ -0,0 +1,24 @@
github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8=
github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

273
vendor/github.com/couchbase/gocb/v2/kvopmanager.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/couchbase/gocbcore/v9/memd"
"github.com/pkg/errors"
)
type kvOpManager struct {
parent *Collection
signal chan struct{}
err error
wasResolved bool
mutationToken *MutationToken
span requestSpan
documentID string
transcoder Transcoder
timeout time.Duration
deadline time.Time
bytes []byte
flags uint32
persistTo uint
replicateTo uint
durabilityLevel DurabilityLevel
retryStrategy *retryStrategyWrapper
cancelCh chan struct{}
}
func (m *kvOpManager) getTimeout() time.Duration {
if m.timeout > 0 {
return m.timeout
}
defaultTimeout := m.parent.timeoutsConfig.KVTimeout
if m.durabilityLevel > DurabilityLevelMajority || m.persistTo > 0 {
defaultTimeout = m.parent.timeoutsConfig.KVDurableTimeout
}
return defaultTimeout
}
func (m *kvOpManager) SetDocumentID(id string) {
m.documentID = id
}
func (m *kvOpManager) SetCancelCh(cancelCh chan struct{}) {
m.cancelCh = cancelCh
}
func (m *kvOpManager) SetTimeout(timeout time.Duration) {
m.timeout = timeout
}
func (m *kvOpManager) SetTranscoder(transcoder Transcoder) {
if transcoder == nil {
transcoder = m.parent.transcoder
}
m.transcoder = transcoder
}
func (m *kvOpManager) SetValue(val interface{}) {
if m.err != nil {
return
}
if m.transcoder == nil {
m.err = errors.New("Expected a transcoder to be specified first")
return
}
espan := m.parent.startKvOpTrace("encode", m.span)
defer espan.Finish()
bytes, flags, err := m.transcoder.Encode(val)
if err != nil {
m.err = err
return
}
m.bytes = bytes
m.flags = flags
}
func (m *kvOpManager) SetDuraOptions(persistTo, replicateTo uint, level DurabilityLevel) {
if persistTo != 0 || replicateTo != 0 {
if !m.parent.useMutationTokens {
m.err = makeInvalidArgumentsError("cannot use observe based durability without mutation tokens")
return
}
if level > 0 {
m.err = makeInvalidArgumentsError("cannot mix observe based durability and synchronous durability")
return
}
}
m.persistTo = persistTo
m.replicateTo = replicateTo
m.durabilityLevel = level
}
func (m *kvOpManager) SetRetryStrategy(retryStrategy RetryStrategy) {
wrapper := m.parent.retryStrategyWrapper
if retryStrategy != nil {
wrapper = newRetryStrategyWrapper(retryStrategy)
}
m.retryStrategy = wrapper
}
func (m *kvOpManager) Finish() {
m.span.Finish()
}
func (m *kvOpManager) TraceSpan() requestSpan {
return m.span
}
func (m *kvOpManager) DocumentID() []byte {
return []byte(m.documentID)
}
func (m *kvOpManager) CollectionName() string {
return m.parent.name()
}
func (m *kvOpManager) ScopeName() string {
return m.parent.ScopeName()
}
func (m *kvOpManager) BucketName() string {
return m.parent.bucketName()
}
func (m *kvOpManager) ValueBytes() []byte {
return m.bytes
}
func (m *kvOpManager) ValueFlags() uint32 {
return m.flags
}
func (m *kvOpManager) Transcoder() Transcoder {
return m.transcoder
}
func (m *kvOpManager) DurabilityLevel() memd.DurabilityLevel {
return memd.DurabilityLevel(m.durabilityLevel)
}
func (m *kvOpManager) DurabilityTimeout() time.Duration {
timeout := m.getTimeout()
duraTimeout := timeout * 10 / 9
return duraTimeout
}
func (m *kvOpManager) Deadline() time.Time {
if m.deadline.IsZero() {
timeout := m.getTimeout()
m.deadline = time.Now().Add(timeout)
}
return m.deadline
}
func (m *kvOpManager) RetryStrategy() *retryStrategyWrapper {
return m.retryStrategy
}
func (m *kvOpManager) CheckReadyForOp() error {
if m.err != nil {
return m.err
}
if m.getTimeout() == 0 {
return errors.New("op manager had no timeout specified")
}
return nil
}
func (m *kvOpManager) NeedsObserve() bool {
return m.persistTo > 0 || m.replicateTo > 0
}
func (m *kvOpManager) EnhanceErr(err error) error {
return maybeEnhanceCollKVErr(err, nil, m.parent, m.documentID)
}
func (m *kvOpManager) EnhanceMt(token gocbcore.MutationToken) *MutationToken {
if token.VbUUID != 0 {
return &MutationToken{
token: token,
bucketName: m.BucketName(),
}
}
return nil
}
func (m *kvOpManager) Reject() {
m.signal <- struct{}{}
}
func (m *kvOpManager) Resolve(token *MutationToken) {
m.wasResolved = true
m.mutationToken = token
m.signal <- struct{}{}
}
func (m *kvOpManager) Wait(op gocbcore.PendingOp, err error) error {
if err != nil {
return err
}
if m.err != nil {
op.Cancel()
}
select {
case <-m.signal:
// Good to go
case <-m.cancelCh:
op.Cancel()
<-m.signal
}
if m.wasResolved && (m.persistTo > 0 || m.replicateTo > 0) {
if m.mutationToken == nil {
return errors.New("expected a mutation token")
}
return m.parent.waitForDurability(
m.span,
m.documentID,
m.mutationToken.token,
m.replicateTo,
m.persistTo,
m.Deadline(),
m.cancelCh,
)
}
return nil
}
func (c *Collection) newKvOpManager(opName string, tracectx requestSpanContext) *kvOpManager {
span := c.startKvOpTrace(opName, tracectx)
return &kvOpManager{
parent: c,
signal: make(chan struct{}, 1),
span: span,
}
}
func durationToExpiry(dura time.Duration) uint32 {
// If the duration is 0, that indicates never-expires
if dura == 0 {
return 0
}
// If the duration is less than one second, we must force the
// value to 1 to avoid accidentally making it never expire.
if dura < 1*time.Second {
return 1
}
// Translate into a uint32 in seconds.
return uint32(dura / time.Second)
}

148
vendor/github.com/couchbase/gocb/v2/logging.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package gocb
import (
"fmt"
"log"
"strings"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// LogLevel specifies the severity of a log message.
type LogLevel gocbcore.LogLevel
// Various logging levels (or subsystems) which can categorize the message.
// Currently these are ordered in decreasing severity.
const (
LogError LogLevel = LogLevel(gocbcore.LogError)
LogWarn LogLevel = LogLevel(gocbcore.LogWarn)
LogInfo LogLevel = LogLevel(gocbcore.LogInfo)
LogDebug LogLevel = LogLevel(gocbcore.LogDebug)
LogTrace LogLevel = LogLevel(gocbcore.LogTrace)
LogSched LogLevel = LogLevel(gocbcore.LogSched)
LogMaxVerbosity LogLevel = LogLevel(gocbcore.LogMaxVerbosity)
)
// LogRedactLevel specifies the degree with which to redact the logs.
type LogRedactLevel uint
const (
// RedactNone indicates to perform no redactions
RedactNone LogRedactLevel = iota
// RedactPartial indicates to redact all possible user-identifying information from logs.
RedactPartial
// RedactFull indicates to fully redact all possible identifying information from logs.
RedactFull
)
// SetLogRedactionLevel specifies the level with which logs should be redacted.
func SetLogRedactionLevel(level LogRedactLevel) {
globalLogRedactionLevel = level
gocbcore.SetLogRedactionLevel(gocbcore.LogRedactLevel(level))
}
// Logger defines a logging interface. You can either use one of the default loggers
// (DefaultStdioLogger(), VerboseStdioLogger()) or implement your own.
type Logger interface {
// Outputs logging information:
// level is the verbosity level
// offset is the position within the calling stack from which the message
// originated. This is useful for contextual loggers which retrieve file/line
// information.
Log(level LogLevel, offset int, format string, v ...interface{}) error
}
var (
globalLogger Logger
globalLogRedactionLevel LogRedactLevel
)
type coreLogWrapper struct {
wrapped gocbcore.Logger
}
func (wrapper coreLogWrapper) Log(level LogLevel, offset int, format string, v ...interface{}) error {
return wrapper.wrapped.Log(gocbcore.LogLevel(level), offset+2, format, v...)
}
// DefaultStdioLogger gets the default standard I/O logger.
// gocb.SetLogger(gocb.DefaultStdioLogger())
func DefaultStdioLogger() Logger {
return &coreLogWrapper{
wrapped: gocbcore.DefaultStdioLogger(),
}
}
// VerboseStdioLogger is a more verbose level of DefaultStdioLogger(). Messages
// pertaining to the scheduling of ordinary commands (and their responses) will
// also be emitted.
// gocb.SetLogger(gocb.VerboseStdioLogger())
func VerboseStdioLogger() Logger {
return coreLogWrapper{
wrapped: gocbcore.VerboseStdioLogger(),
}
}
type coreLogger struct {
wrapped Logger
}
func (wrapper coreLogger) Log(level gocbcore.LogLevel, offset int, format string, v ...interface{}) error {
return wrapper.wrapped.Log(LogLevel(level), offset+2, format, v...)
}
func getCoreLogger(logger Logger) gocbcore.Logger {
typedLogger, isCoreLogger := logger.(*coreLogWrapper)
if isCoreLogger {
return typedLogger.wrapped
}
return &coreLogger{
wrapped: logger,
}
}
// SetLogger sets a logger to be used by the library. A logger can be obtained via
// the DefaultStdioLogger() or VerboseStdioLogger() functions. You can also implement
// your own logger using the Logger interface.
func SetLogger(logger Logger) {
globalLogger = logger
gocbcore.SetLogger(getCoreLogger(logger))
// gocbcore.SetLogRedactionLevel(gocbcore.LogRedactLevel(globalLogRedactionLevel))
}
func logExf(level LogLevel, offset int, format string, v ...interface{}) {
if globalLogger != nil {
err := globalLogger.Log(level, offset+1, format, v...)
if err != nil {
log.Printf("Logger error occurred (%s)\n", err)
}
}
}
func logInfof(format string, v ...interface{}) {
logExf(LogInfo, 1, format, v...)
}
func logDebugf(format string, v ...interface{}) {
logExf(LogDebug, 1, format, v...)
}
func logSchedf(format string, v ...interface{}) {
logExf(LogSched, 1, format, v...)
}
func logWarnf(format string, v ...interface{}) {
logExf(LogWarn, 1, format, v...)
}
func logErrorf(format string, v ...interface{}) {
logExf(LogError, 1, format, v...)
}
func reindentLog(indent, message string) string {
reindentedMessage := strings.Replace(message, "\n", "\n"+indent, -1)
return fmt.Sprintf("%s%s", indent, reindentedMessage)
}

126
vendor/github.com/couchbase/gocb/v2/mgmt_http.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
package gocb
import (
"io"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type mgmtRequest struct {
Service ServiceType
Method string
Path string
Body []byte
Headers map[string]string
ContentType string
IsIdempotent bool
UniqueID string
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
type mgmtResponse struct {
Endpoint string
StatusCode uint32
Body io.ReadCloser
}
type mgmtProvider interface {
executeMgmtRequest(req mgmtRequest) (*mgmtResponse, error)
}
func (c *Cluster) executeMgmtRequest(req mgmtRequest) (mgmtRespOut *mgmtResponse, errOut error) {
timeout := req.Timeout
if timeout == 0 {
timeout = c.timeoutsConfig.ManagementTimeout
}
provider, err := c.getHTTPProvider()
if err != nil {
return nil, err
}
retryStrategy := c.retryStrategyWrapper
if req.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(req.RetryStrategy)
}
corereq := &gocbcore.HTTPRequest{
Service: gocbcore.ServiceType(req.Service),
Method: req.Method,
Path: req.Path,
Body: req.Body,
Headers: req.Headers,
ContentType: req.ContentType,
IsIdempotent: req.IsIdempotent,
UniqueID: req.UniqueID,
Deadline: time.Now().Add(timeout),
RetryStrategy: retryStrategy,
TraceContext: req.parentSpan,
}
coreresp, err := provider.DoHTTPRequest(corereq)
if err != nil {
return nil, makeGenericHTTPError(err, corereq, coreresp)
}
resp := &mgmtResponse{
Endpoint: coreresp.Endpoint,
StatusCode: uint32(coreresp.StatusCode),
Body: coreresp.Body,
}
return resp, nil
}
func (b *Bucket) executeMgmtRequest(req mgmtRequest) (mgmtRespOut *mgmtResponse, errOut error) {
timeout := req.Timeout
if timeout == 0 {
timeout = b.timeoutsConfig.ManagementTimeout
}
provider, err := b.connectionManager.getHTTPProvider()
if err != nil {
return nil, err
}
retryStrategy := b.retryStrategyWrapper
if req.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(req.RetryStrategy)
}
corereq := &gocbcore.HTTPRequest{
Service: gocbcore.ServiceType(req.Service),
Method: req.Method,
Path: req.Path,
Body: req.Body,
Headers: req.Headers,
ContentType: req.ContentType,
IsIdempotent: req.IsIdempotent,
UniqueID: req.UniqueID,
Deadline: time.Now().Add(timeout),
RetryStrategy: retryStrategy,
}
coreresp, err := provider.DoHTTPRequest(corereq)
if err != nil {
return nil, makeGenericHTTPError(err, corereq, coreresp)
}
resp := &mgmtResponse{
Endpoint: coreresp.Endpoint,
StatusCode: uint32(coreresp.StatusCode),
Body: coreresp.Body,
}
return resp, nil
}
func ensureBodyClosed(body io.ReadCloser) {
err := body.Close()
if err != nil {
logDebugf("Failed to close socket: %v", err)
}
}

230
vendor/github.com/couchbase/gocb/v2/providers.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type httpProvider interface {
DoHTTPRequest(req *gocbcore.HTTPRequest) (*gocbcore.HTTPResponse, error)
}
type viewProvider interface {
ViewQuery(opts gocbcore.ViewQueryOptions) (viewRowReader, error)
}
type queryProvider interface {
N1QLQuery(opts gocbcore.N1QLQueryOptions) (queryRowReader, error)
PreparedN1QLQuery(opts gocbcore.N1QLQueryOptions) (queryRowReader, error)
}
type analyticsProvider interface {
AnalyticsQuery(opts gocbcore.AnalyticsQueryOptions) (analyticsRowReader, error)
}
type searchProvider interface {
SearchQuery(opts gocbcore.SearchQueryOptions) (searchRowReader, error)
}
type waitUntilReadyProvider interface {
WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions) error
}
type gocbcoreWaitUntilReadyProvider interface {
WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions,
cb gocbcore.WaitUntilReadyCallback) (gocbcore.PendingOp, error)
}
type diagnosticsProvider interface {
Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error)
Ping(opts gocbcore.PingOptions) (*gocbcore.PingResult, error)
}
type gocbcoreDiagnosticsProvider interface {
Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error)
Ping(opts gocbcore.PingOptions, cb gocbcore.PingCallback) (gocbcore.PendingOp, error)
}
type waitUntilReadyProviderWrapper struct {
provider gocbcoreWaitUntilReadyProvider
}
func (wpw *waitUntilReadyProviderWrapper) WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions) (errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(wpw.provider.WaitUntilReady(deadline, opts, func(res *gocbcore.WaitUntilReadyResult, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type diagnosticsProviderWrapper struct {
provider gocbcoreDiagnosticsProvider
}
func (dpw *diagnosticsProviderWrapper) Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error) {
return dpw.provider.Diagnostics(opts)
}
func (dpw *diagnosticsProviderWrapper) Ping(opts gocbcore.PingOptions) (pOut *gocbcore.PingResult, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(dpw.provider.Ping(opts, func(res *gocbcore.PingResult, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
pOut = res
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type httpProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (hpw *httpProviderWrapper) DoHTTPRequest(req *gocbcore.HTTPRequest) (respOut *gocbcore.HTTPResponse, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(hpw.provider.DoHTTPRequest(req, func(res *gocbcore.HTTPResponse, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
respOut = res
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type analyticsProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *analyticsProviderWrapper) AnalyticsQuery(opts gocbcore.AnalyticsQueryOptions) (aOut analyticsRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.AnalyticsQuery(opts, func(reader *gocbcore.AnalyticsRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
aOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type queryProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *queryProviderWrapper) N1QLQuery(opts gocbcore.N1QLQueryOptions) (qOut queryRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.N1QLQuery(opts, func(reader *gocbcore.N1QLRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
qOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
func (apw *queryProviderWrapper) PreparedN1QLQuery(opts gocbcore.N1QLQueryOptions) (qOut queryRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.PreparedN1QLQuery(opts, func(reader *gocbcore.N1QLRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
qOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type searchProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *searchProviderWrapper) SearchQuery(opts gocbcore.SearchQueryOptions) (sOut searchRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.SearchQuery(opts, func(reader *gocbcore.SearchRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
sOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type viewProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *viewProviderWrapper) ViewQuery(opts gocbcore.ViewQueryOptions) (vOut viewRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.ViewQuery(opts, func(reader *gocbcore.ViewQueryRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
vOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}

144
vendor/github.com/couchbase/gocb/v2/query_options.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package gocb
import (
"strconv"
"strings"
"time"
"github.com/google/uuid"
)
// QueryScanConsistency indicates the level of data consistency desired for a query.
type QueryScanConsistency uint
const (
// QueryScanConsistencyNotBounded indicates no data consistency is required.
QueryScanConsistencyNotBounded QueryScanConsistency = iota + 1
// QueryScanConsistencyRequestPlus indicates that request-level data consistency is required.
QueryScanConsistencyRequestPlus
)
// QueryOptions represents the options available when executing a query.
type QueryOptions struct {
ScanConsistency QueryScanConsistency
ConsistentWith *MutationState
Profile QueryProfileMode
// ScanCap is the maximum buffered channel size between the indexer connectionManager and the query service for index scans.
ScanCap uint32
// PipelineBatch controls the number of items execution operators can batch for Fetch from the KV.
PipelineBatch uint32
// PipelineCap controls the maximum number of items each execution operator can buffer between various operators.
PipelineCap uint32
// ScanWait is how long the indexer is allowed to wait until it can satisfy ScanConsistency/ConsistentWith criteria.
ScanWait time.Duration
Readonly bool
// MaxParallelism is the maximum number of index partitions, for computing aggregation in parallel.
MaxParallelism uint32
// ClientContextID provides a unique ID for this query which can be used matching up requests between connectionManager and
// server. If not provided will be assigned a uuid value.
ClientContextID string
PositionalParameters []interface{}
NamedParameters map[string]interface{}
Metrics bool
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]interface{}
Adhoc bool
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *QueryOptions) toMap() (map[string]interface{}, error) {
execOpts := make(map[string]interface{})
if opts.ScanConsistency != 0 && opts.ConsistentWith != nil {
return nil, makeInvalidArgumentsError("ScanConsistency and ConsistentWith must be used exclusively")
}
if opts.ScanConsistency != 0 {
if opts.ScanConsistency == QueryScanConsistencyNotBounded {
execOpts["scan_consistency"] = "not_bounded"
} else if opts.ScanConsistency == QueryScanConsistencyRequestPlus {
execOpts["scan_consistency"] = "request_plus"
} else {
return nil, makeInvalidArgumentsError("Unexpected consistency option")
}
}
if opts.ConsistentWith != nil {
execOpts["scan_consistency"] = "at_plus"
execOpts["scan_vectors"] = opts.ConsistentWith
}
if opts.Profile != "" {
execOpts["profile"] = opts.Profile
}
if opts.Readonly {
execOpts["readonly"] = opts.Readonly
}
if opts.PositionalParameters != nil && opts.NamedParameters != nil {
return nil, makeInvalidArgumentsError("Positional and named parameters must be used exclusively")
}
if opts.PositionalParameters != nil {
execOpts["args"] = opts.PositionalParameters
}
if opts.NamedParameters != nil {
for key, value := range opts.NamedParameters {
if !strings.HasPrefix(key, "$") {
key = "$" + key
}
execOpts[key] = value
}
}
if opts.ScanCap != 0 {
execOpts["scan_cap"] = strconv.FormatUint(uint64(opts.ScanCap), 10)
}
if opts.PipelineBatch != 0 {
execOpts["pipeline_batch"] = strconv.FormatUint(uint64(opts.PipelineBatch), 10)
}
if opts.PipelineCap != 0 {
execOpts["pipeline_cap"] = strconv.FormatUint(uint64(opts.PipelineCap), 10)
}
if opts.ScanWait > 0 {
execOpts["scan_wait"] = opts.ScanWait.String()
}
if opts.Raw != nil {
for k, v := range opts.Raw {
execOpts[k] = v
}
}
if opts.MaxParallelism > 0 {
execOpts["max_parallelism"] = strconv.FormatUint(uint64(opts.MaxParallelism), 10)
}
if !opts.Metrics {
execOpts["metrics"] = false
}
if opts.ClientContextID == "" {
execOpts["client_context_id"] = uuid.New()
} else {
execOpts["client_context_id"] = opts.ClientContextID
}
return execOpts, nil
}

350
vendor/github.com/couchbase/gocb/v2/results.go generated vendored Normal file
View File

@ -0,0 +1,350 @@
package gocb
import (
"encoding/json"
"time"
"github.com/pkg/errors"
)
// Result is the base type for the return types of operations
type Result struct {
cas Cas
}
// Cas returns the cas of the result.
func (d *Result) Cas() Cas {
return d.cas
}
// GetResult is the return type of Get operations.
type GetResult struct {
Result
transcoder Transcoder
flags uint32
contents []byte
expiry *time.Duration
}
// Content assigns the value of the result into the valuePtr using default decoding.
func (d *GetResult) Content(valuePtr interface{}) error {
return d.transcoder.Decode(d.contents, d.flags, valuePtr)
}
// Expiry returns the expiry value for the result if it available. Note that a nil
// pointer indicates that the Expiry was fetched, while a valid pointer to a zero
// Duration indicates that the document will never expire.
func (d *GetResult) Expiry() *time.Duration {
return d.expiry
}
func (d *GetResult) fromFullProjection(ops []LookupInSpec, result *LookupInResult, fields []string) error {
if len(fields) == 0 {
// This is a special case where user specified a full doc fetch with expiration.
d.contents = result.contents[0].data
return nil
}
if len(result.contents) != 1 {
return makeInvalidArgumentsError("fromFullProjection should only be called with 1 subdoc result")
}
resultContent := result.contents[0]
if resultContent.err != nil {
return resultContent.err
}
var content map[string]interface{}
err := json.Unmarshal(resultContent.data, &content)
if err != nil {
return err
}
newContent := make(map[string]interface{})
for _, field := range fields {
parts := d.pathParts(field)
d.set(parts, newContent, content[field])
}
bytes, err := json.Marshal(newContent)
if err != nil {
return errors.Wrap(err, "could not marshal result contents")
}
d.contents = bytes
return nil
}
func (d *GetResult) fromSubDoc(ops []LookupInSpec, result *LookupInResult) error {
content := make(map[string]interface{})
for i, op := range ops {
err := result.contents[i].err
if err != nil {
// We return the first error that has occurred, this will be
// a SubDocument error and will indicate the real reason.
return err
}
parts := d.pathParts(op.path)
d.set(parts, content, result.contents[i].data)
}
bytes, err := json.Marshal(content)
if err != nil {
return errors.Wrap(err, "could not marshal result contents")
}
d.contents = bytes
return nil
}
type subdocPath struct {
path string
isArray bool
}
func (d *GetResult) pathParts(pathStr string) []subdocPath {
pathLen := len(pathStr)
var elemIdx int
var i int
var paths []subdocPath
for i < pathLen {
ch := pathStr[i]
i++
if ch == '[' {
// opening of an array
isArr := false
arrayStart := i
for i < pathLen {
arrCh := pathStr[i]
if arrCh == ']' {
isArr = true
i++
break
} else if arrCh == '.' {
i++
break
}
i++
}
if isArr {
paths = append(paths, subdocPath{path: pathStr[elemIdx : arrayStart-1], isArray: true})
} else {
paths = append(paths, subdocPath{path: pathStr[elemIdx:i], isArray: false})
}
elemIdx = i
if i < pathLen && pathStr[i] == '.' {
i++
elemIdx = i
}
} else if ch == '.' {
paths = append(paths, subdocPath{path: pathStr[elemIdx : i-1]})
elemIdx = i
}
}
if elemIdx != i {
// this should only ever be an object as an array would have ended in [...]
paths = append(paths, subdocPath{path: pathStr[elemIdx:i]})
}
return paths
}
func (d *GetResult) set(paths []subdocPath, content interface{}, value interface{}) interface{} {
path := paths[0]
if len(paths) == 1 {
if path.isArray {
arr := make([]interface{}, 0)
arr = append(arr, value)
if _, ok := content.(map[string]interface{}); ok {
content.(map[string]interface{})[path.path] = arr
} else if _, ok := content.([]interface{}); ok {
content = append(content.([]interface{}), arr)
} else {
logErrorf("Projections encountered a non-array or object content assigning an array")
}
} else {
if _, ok := content.([]interface{}); ok {
elem := make(map[string]interface{})
elem[path.path] = value
content = append(content.([]interface{}), elem)
} else {
content.(map[string]interface{})[path.path] = value
}
}
return content
}
if path.isArray {
if _, ok := content.([]interface{}); ok {
var m []interface{}
content = append(content.([]interface{}), d.set(paths[1:], m, value))
return content
} else if cMap, ok := content.(map[string]interface{}); ok {
cMap[path.path] = make([]interface{}, 0)
cMap[path.path] = d.set(paths[1:], cMap[path.path], value)
return content
} else {
logErrorf("Projections encountered a non-array or object content assigning an array")
}
} else {
if arr, ok := content.([]interface{}); ok {
m := make(map[string]interface{})
m[path.path] = make(map[string]interface{})
content = append(arr, m)
d.set(paths[1:], m[path.path], value)
return content
}
cMap, ok := content.(map[string]interface{})
if !ok {
// this isn't possible but the linter won't play nice without it
logErrorf("Failed to assert projection content to a map")
}
cMap[path.path] = make(map[string]interface{})
return d.set(paths[1:], cMap[path.path], value)
}
return content
}
// LookupInResult is the return type for LookupIn.
type LookupInResult struct {
Result
contents []lookupInPartial
}
type lookupInPartial struct {
data json.RawMessage
err error
}
func (pr *lookupInPartial) as(valuePtr interface{}) error {
if pr.err != nil {
return pr.err
}
if valuePtr == nil {
return nil
}
if valuePtr, ok := valuePtr.(*[]byte); ok {
*valuePtr = pr.data
return nil
}
return json.Unmarshal(pr.data, valuePtr)
}
func (pr *lookupInPartial) exists() bool {
err := pr.as(nil)
return err == nil
}
// ContentAt retrieves the value of the operation by its index. The index is the position of
// the operation as it was added to the builder.
func (lir *LookupInResult) ContentAt(idx uint, valuePtr interface{}) error {
if idx >= uint(len(lir.contents)) {
return makeInvalidArgumentsError("invalid index")
}
return lir.contents[idx].as(valuePtr)
}
// Exists verifies that the item at idx exists.
func (lir *LookupInResult) Exists(idx uint) bool {
if idx >= uint(len(lir.contents)) {
return false
}
return lir.contents[idx].exists()
}
// ExistsResult is the return type of Exist operations.
type ExistsResult struct {
Result
docExists bool
}
// Exists returns whether or not the document exists.
func (d *ExistsResult) Exists() bool {
return d.docExists
}
// MutationResult is the return type of any store related operations. It contains Cas and mutation tokens.
type MutationResult struct {
Result
mt *MutationToken
}
// MutationToken returns the mutation token belonging to an operation.
func (mr MutationResult) MutationToken() *MutationToken {
return mr.mt
}
// MutateInResult is the return type of any mutate in related operations.
// It contains Cas, mutation tokens and any returned content.
type MutateInResult struct {
MutationResult
contents []mutateInPartial
}
type mutateInPartial struct {
data json.RawMessage
}
func (pr *mutateInPartial) as(valuePtr interface{}) error {
if valuePtr == nil {
return nil
}
if valuePtr, ok := valuePtr.(*[]byte); ok {
*valuePtr = pr.data
return nil
}
return json.Unmarshal(pr.data, valuePtr)
}
// ContentAt retrieves the value of the operation by its index. The index is the position of
// the operation as it was added to the builder.
func (mir MutateInResult) ContentAt(idx uint, valuePtr interface{}) error {
return mir.contents[idx].as(valuePtr)
}
// CounterResult is the return type of counter operations.
type CounterResult struct {
MutationResult
content uint64
}
// MutationToken returns the mutation token belonging to an operation.
func (mr CounterResult) MutationToken() *MutationToken {
return mr.mt
}
// Cas returns the Cas value for a document following an operation.
func (mr CounterResult) Cas() Cas {
return mr.cas
}
// Content returns the new value for the counter document.
func (mr CounterResult) Content() uint64 {
return mr.content
}
// GetReplicaResult is the return type of GetReplica operations.
type GetReplicaResult struct {
GetResult
isReplica bool
}
// IsReplica returns whether or not this result came from a replica server.
func (r *GetReplicaResult) IsReplica() bool {
return r.isReplica
}

194
vendor/github.com/couchbase/gocb/v2/retry.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
)
func translateCoreRetryReasons(reasons []gocbcore.RetryReason) []RetryReason {
var reasonsOut []RetryReason
for _, retryReason := range reasons {
gocbReason, ok := retryReason.(RetryReason)
if !ok {
logErrorf("Failed to assert gocbcore retry reason to gocb retry reason: %v", retryReason)
continue
}
reasonsOut = append(reasonsOut, gocbReason)
}
return reasonsOut
}
// RetryRequest is a request that can possibly be retried.
type RetryRequest interface {
RetryAttempts() uint32
Identifier() string
Idempotent() bool
RetryReasons() []RetryReason
}
type wrappedRetryRequest struct {
req gocbcore.RetryRequest
}
func (req *wrappedRetryRequest) RetryAttempts() uint32 {
return req.req.RetryAttempts()
}
func (req *wrappedRetryRequest) Identifier() string {
return req.req.Identifier()
}
func (req *wrappedRetryRequest) Idempotent() bool {
return req.req.Idempotent()
}
func (req *wrappedRetryRequest) RetryReasons() []RetryReason {
return translateCoreRetryReasons(req.req.RetryReasons())
}
// RetryReason represents the reason for an operation possibly being retried.
type RetryReason interface {
AllowsNonIdempotentRetry() bool
AlwaysRetry() bool
Description() string
}
var (
// UnknownRetryReason indicates that the operation failed for an unknown reason.
UnknownRetryReason = RetryReason(gocbcore.UnknownRetryReason)
// SocketNotAvailableRetryReason indicates that the operation failed because the underlying socket was not available.
SocketNotAvailableRetryReason = RetryReason(gocbcore.SocketNotAvailableRetryReason)
// ServiceNotAvailableRetryReason indicates that the operation failed because the requested service was not available.
ServiceNotAvailableRetryReason = RetryReason(gocbcore.ServiceNotAvailableRetryReason)
// NodeNotAvailableRetryReason indicates that the operation failed because the requested node was not available.
NodeNotAvailableRetryReason = RetryReason(gocbcore.NodeNotAvailableRetryReason)
// KVNotMyVBucketRetryReason indicates that the operation failed because it was sent to the wrong node for the vbucket.
KVNotMyVBucketRetryReason = RetryReason(gocbcore.KVNotMyVBucketRetryReason)
// KVCollectionOutdatedRetryReason indicates that the operation failed because the collection ID on the request is outdated.
KVCollectionOutdatedRetryReason = RetryReason(gocbcore.KVCollectionOutdatedRetryReason)
// KVErrMapRetryReason indicates that the operation failed for an unsupported reason but the KV error map indicated
// that the operation can be retried.
KVErrMapRetryReason = RetryReason(gocbcore.KVErrMapRetryReason)
// KVLockedRetryReason indicates that the operation failed because the document was locked.
KVLockedRetryReason = RetryReason(gocbcore.KVLockedRetryReason)
// KVTemporaryFailureRetryReason indicates that the operation failed because of a temporary failure.
KVTemporaryFailureRetryReason = RetryReason(gocbcore.KVTemporaryFailureRetryReason)
// KVSyncWriteInProgressRetryReason indicates that the operation failed because a sync write is in progress.
KVSyncWriteInProgressRetryReason = RetryReason(gocbcore.KVSyncWriteInProgressRetryReason)
// KVSyncWriteRecommitInProgressRetryReason indicates that the operation failed because a sync write recommit is in progress.
KVSyncWriteRecommitInProgressRetryReason = RetryReason(gocbcore.KVSyncWriteRecommitInProgressRetryReason)
// ServiceResponseCodeIndicatedRetryReason indicates that the operation failed and the service responded stating that
// the request should be retried.
ServiceResponseCodeIndicatedRetryReason = RetryReason(gocbcore.ServiceResponseCodeIndicatedRetryReason)
// SocketCloseInFlightRetryReason indicates that the operation failed because the socket was closed whilst the operation
// was in flight.
SocketCloseInFlightRetryReason = RetryReason(gocbcore.SocketCloseInFlightRetryReason)
// CircuitBreakerOpenRetryReason indicates that the operation failed because the circuit breaker on the connection
// was open.
CircuitBreakerOpenRetryReason = RetryReason(gocbcore.CircuitBreakerOpenRetryReason)
// QueryIndexNotFoundRetryReason indicates that the operation failed to to a missing query index
QueryIndexNotFoundRetryReason = RetryReason(gocbcore.QueryIndexNotFoundRetryReason)
// QueryPreparedStatementFailureRetryReason indicates that the operation failed due to a prepared statement failure
QueryPreparedStatementFailureRetryReason = RetryReason(gocbcore.QueryPreparedStatementFailureRetryReason)
// AnalyticsTemporaryFailureRetryReason indicates that an analytics operation failed due to a temporary failure
AnalyticsTemporaryFailureRetryReason = RetryReason(gocbcore.AnalyticsTemporaryFailureRetryReason)
// SearchTooManyRequestsRetryReason indicates that a search operation failed due to too many requests
SearchTooManyRequestsRetryReason = RetryReason(gocbcore.SearchTooManyRequestsRetryReason)
)
// RetryAction is used by a RetryStrategy to calculate the duration to wait before retrying an operation.
// Returning a value of 0 indicates to not retry.
type RetryAction interface {
Duration() time.Duration
}
// NoRetryRetryAction represents an action that indicates to not retry.
type NoRetryRetryAction struct {
}
// Duration is the length of time to wait before retrying an operation.
func (ra *NoRetryRetryAction) Duration() time.Duration {
return 0
}
// WithDurationRetryAction represents an action that indicates to retry with a given duration.
type WithDurationRetryAction struct {
WithDuration time.Duration
}
// Duration is the length of time to wait before retrying an operation.
func (ra *WithDurationRetryAction) Duration() time.Duration {
return ra.WithDuration
}
// RetryStrategy is to determine if an operation should be retried, and if so how long to wait before retrying.
type RetryStrategy interface {
RetryAfter(req RetryRequest, reason RetryReason) RetryAction
}
func newRetryStrategyWrapper(strategy RetryStrategy) *retryStrategyWrapper {
return &retryStrategyWrapper{
wrapped: strategy,
}
}
type retryStrategyWrapper struct {
wrapped RetryStrategy
}
// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation.
func (rs *retryStrategyWrapper) RetryAfter(req gocbcore.RetryRequest, reason gocbcore.RetryReason) gocbcore.RetryAction {
wreq := &wrappedRetryRequest{
req: req,
}
wrappedAction := rs.wrapped.RetryAfter(wreq, RetryReason(reason))
return gocbcore.RetryAction(wrappedAction)
}
// BackoffCalculator defines how backoff durations will be calculated by the retry API.
type BackoffCalculator func(retryAttempts uint32) time.Duration
// BestEffortRetryStrategy represents a strategy that will keep retrying until it succeeds (or the caller times out
// the request).
type BestEffortRetryStrategy struct {
BackoffCalculator BackoffCalculator
}
// NewBestEffortRetryStrategy returns a new BestEffortRetryStrategy which will use the supplied calculator function
// to calculate retry durations. If calculator is nil then a controlled backoff will be used.
func NewBestEffortRetryStrategy(calculator BackoffCalculator) *BestEffortRetryStrategy {
if calculator == nil {
calculator = BackoffCalculator(gocbcore.ExponentialBackoff(1*time.Millisecond, 500*time.Millisecond, 2))
}
return &BestEffortRetryStrategy{BackoffCalculator: calculator}
}
// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation.
func (rs *BestEffortRetryStrategy) RetryAfter(req RetryRequest, reason RetryReason) RetryAction {
if req.Idempotent() || reason.AllowsNonIdempotentRetry() {
return &WithDurationRetryAction{WithDuration: rs.BackoffCalculator(req.RetryAttempts())}
}
return &NoRetryRetryAction{}
}

55
vendor/github.com/couchbase/gocb/v2/scope.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package gocb
// Scope represents a single scope within a bucket.
// VOLATILE: This API is subject to change at any time.
type Scope struct {
scopeName string
bucket *Bucket
timeoutsConfig kvTimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
tracer requestTracer
useMutationTokens bool
getKvProvider func() (kvProvider, error)
}
func newScope(bucket *Bucket, scopeName string) *Scope {
return &Scope{
scopeName: scopeName,
bucket: bucket,
timeoutsConfig: kvTimeoutsConfig{
KVTimeout: bucket.timeoutsConfig.KVTimeout,
KVDurableTimeout: bucket.timeoutsConfig.KVDurableTimeout,
},
transcoder: bucket.transcoder,
retryStrategyWrapper: bucket.retryStrategyWrapper,
tracer: bucket.tracer,
useMutationTokens: bucket.useMutationTokens,
getKvProvider: bucket.getKvProvider,
}
}
// Name returns the name of the scope.
func (s *Scope) Name() string {
return s.scopeName
}
// BucketName returns the name of the bucket to which this collection belongs.
// UNCOMMITTED: This API may change in the future.
func (s *Scope) BucketName() string {
return s.bucket.Name()
}
// Collection returns an instance of a collection.
// VOLATILE: This API is subject to change at any time.
func (s *Scope) Collection(collectionName string) *Collection {
return newCollection(s, collectionName)
}

110
vendor/github.com/couchbase/gocb/v2/search/facets.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package search
import (
"encoding/json"
)
// Facet represents a facet for a search query.
type Facet interface {
}
type termFacetData struct {
Field string `json:"field,omitempty"`
Size uint64 `json:"size,omitempty"`
}
// TermFacet is an search term facet.
type TermFacet struct {
data termFacetData
}
// MarshalJSON marshal's this facet to JSON for the search REST API.
func (f TermFacet) MarshalJSON() ([]byte, error) {
return json.Marshal(f.data)
}
// NewTermFacet creates a new TermFacet
func NewTermFacet(field string, size uint64) *TermFacet {
mq := &TermFacet{}
mq.data.Field = field
mq.data.Size = size
return mq
}
type numericFacetRange struct {
Name string `json:"name,omitempty"`
Start float64 `json:"start,omitempty"`
End float64 `json:"end,omitempty"`
}
type numericFacetData struct {
Field string `json:"field,omitempty"`
Size uint64 `json:"size,omitempty"`
NumericRanges []numericFacetRange `json:"numeric_ranges,omitempty"`
}
// NumericFacet is an search numeric range facet.
type NumericFacet struct {
data numericFacetData
}
// MarshalJSON marshal's this facet to JSON for the search REST API.
func (f NumericFacet) MarshalJSON() ([]byte, error) {
return json.Marshal(f.data)
}
// AddRange adds a new range to this numeric range facet.
func (f *NumericFacet) AddRange(name string, start, end float64) *NumericFacet {
f.data.NumericRanges = append(f.data.NumericRanges, numericFacetRange{
Name: name,
Start: start,
End: end,
})
return f
}
// NewNumericFacet creates a new numeric range facet.
func NewNumericFacet(field string, size uint64) *NumericFacet {
mq := &NumericFacet{}
mq.data.Field = field
mq.data.Size = size
return mq
}
type dateFacetRange struct {
Name string `json:"name,omitempty"`
Start string `json:"start,omitempty"`
End string `json:"end,omitempty"`
}
type dateFacetData struct {
Field string `json:"field,omitempty"`
Size uint64 `json:"size,omitempty"`
DateRanges []dateFacetRange `json:"date_ranges,omitempty"`
}
// DateFacet is an search date range facet.
type DateFacet struct {
data dateFacetData
}
// MarshalJSON marshal's this facet to JSON for the search REST API.
func (f DateFacet) MarshalJSON() ([]byte, error) {
return json.Marshal(f.data)
}
// AddRange adds a new range to this date range facet.
func (f *DateFacet) AddRange(name string, start, end string) *DateFacet {
f.data.DateRanges = append(f.data.DateRanges, dateFacetRange{
Name: name,
Start: start,
End: end,
})
return f
}
// NewDateFacet creates a new date range facet.
func NewDateFacet(field string, size uint64) *DateFacet {
mq := &DateFacet{}
mq.data.Field = field
mq.data.Size = size
return mq
}

620
vendor/github.com/couchbase/gocb/v2/search/queries.go generated vendored Normal file
View File

@ -0,0 +1,620 @@
package search
import "encoding/json"
// Query represents a search query.
type Query interface {
}
type searchQueryBase struct {
options map[string]interface{}
}
func newSearchQueryBase() searchQueryBase {
return searchQueryBase{
options: make(map[string]interface{}),
}
}
// MarshalJSON marshal's this query to JSON for the search REST API.
func (q searchQueryBase) MarshalJSON() ([]byte, error) {
return json.Marshal(q.options)
}
// MatchQuery represents a search match query.
type MatchQuery struct {
searchQueryBase
}
// NewMatchQuery creates a new MatchQuery.
func NewMatchQuery(match string) *MatchQuery {
q := &MatchQuery{newSearchQueryBase()}
q.options["match"] = match
return q
}
// Field specifies the field for this query.
func (q *MatchQuery) Field(field string) *MatchQuery {
q.options["field"] = field
return q
}
// Analyzer specifies the analyzer to use for this query.
func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery {
q.options["analyzer"] = analyzer
return q
}
// PrefixLength specifies the prefix length from this query.
func (q *MatchQuery) PrefixLength(length uint64) *MatchQuery {
q.options["prefix_length"] = length
return q
}
// Fuzziness specifies the fuziness for this query.
func (q *MatchQuery) Fuzziness(fuzziness uint64) *MatchQuery {
q.options["fuzziness"] = fuzziness
return q
}
// Boost specifies the boost for this query.
func (q *MatchQuery) Boost(boost float32) *MatchQuery {
q.options["boost"] = boost
return q
}
// MatchPhraseQuery represents a search match phrase query.
type MatchPhraseQuery struct {
searchQueryBase
}
// NewMatchPhraseQuery creates a new MatchPhraseQuery
func NewMatchPhraseQuery(phrase string) *MatchPhraseQuery {
q := &MatchPhraseQuery{newSearchQueryBase()}
q.options["match_phrase"] = phrase
return q
}
// Field specifies the field for this query.
func (q *MatchPhraseQuery) Field(field string) *MatchPhraseQuery {
q.options["field"] = field
return q
}
// Analyzer specifies the analyzer to use for this query.
func (q *MatchPhraseQuery) Analyzer(analyzer string) *MatchPhraseQuery {
q.options["analyzer"] = analyzer
return q
}
// Boost specifies the boost for this query.
func (q *MatchPhraseQuery) Boost(boost float32) *MatchPhraseQuery {
q.options["boost"] = boost
return q
}
// RegexpQuery represents a search regular expression query.
type RegexpQuery struct {
searchQueryBase
}
// NewRegexpQuery creates a new RegexpQuery.
func NewRegexpQuery(regexp string) *RegexpQuery {
q := &RegexpQuery{newSearchQueryBase()}
q.options["regexp"] = regexp
return q
}
// Field specifies the field for this query.
func (q *RegexpQuery) Field(field string) *RegexpQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *RegexpQuery) Boost(boost float32) *RegexpQuery {
q.options["boost"] = boost
return q
}
// QueryStringQuery represents a search string query.
type QueryStringQuery struct {
searchQueryBase
}
// NewQueryStringQuery creates a new StringQuery.
func NewQueryStringQuery(query string) *QueryStringQuery {
q := &QueryStringQuery{newSearchQueryBase()}
q.options["query"] = query
return q
}
// Boost specifies the boost for this query.
func (q *QueryStringQuery) Boost(boost float32) *QueryStringQuery {
q.options["boost"] = boost
return q
}
// NumericRangeQuery represents a search numeric range query.
type NumericRangeQuery struct {
searchQueryBase
}
// NewNumericRangeQuery creates a new NumericRangeQuery.
func NewNumericRangeQuery() *NumericRangeQuery {
q := &NumericRangeQuery{newSearchQueryBase()}
return q
}
// Min specifies the minimum value and inclusiveness for this range query.
func (q *NumericRangeQuery) Min(min float32, inclusive bool) *NumericRangeQuery {
q.options["min"] = min
q.options["inclusive_min"] = inclusive
return q
}
// Max specifies the maximum value and inclusiveness for this range query.
func (q *NumericRangeQuery) Max(max float32, inclusive bool) *NumericRangeQuery {
q.options["max"] = max
q.options["inclusive_max"] = inclusive
return q
}
// Field specifies the field for this query.
func (q *NumericRangeQuery) Field(field string) *NumericRangeQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *NumericRangeQuery) Boost(boost float32) *NumericRangeQuery {
q.options["boost"] = boost
return q
}
// DateRangeQuery represents a search date range query.
type DateRangeQuery struct {
searchQueryBase
}
// NewDateRangeQuery creates a new DateRangeQuery.
func NewDateRangeQuery() *DateRangeQuery {
q := &DateRangeQuery{newSearchQueryBase()}
return q
}
// Start specifies the start value and inclusiveness for this range query.
func (q *DateRangeQuery) Start(start string, inclusive bool) *DateRangeQuery {
q.options["start"] = start
q.options["inclusive_start"] = inclusive
return q
}
// End specifies the end value and inclusiveness for this range query.
func (q *DateRangeQuery) End(end string, inclusive bool) *DateRangeQuery {
q.options["end"] = end
q.options["inclusive_end"] = inclusive
return q
}
// DateTimeParser specifies which date time string parser to use.
func (q *DateRangeQuery) DateTimeParser(parser string) *DateRangeQuery {
q.options["datetime_parser"] = parser
return q
}
// Field specifies the field for this query.
func (q *DateRangeQuery) Field(field string) *DateRangeQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *DateRangeQuery) Boost(boost float32) *DateRangeQuery {
q.options["boost"] = boost
return q
}
// ConjunctionQuery represents a search conjunction query.
type ConjunctionQuery struct {
searchQueryBase
}
// NewConjunctionQuery creates a new ConjunctionQuery.
func NewConjunctionQuery(queries ...Query) *ConjunctionQuery {
q := &ConjunctionQuery{newSearchQueryBase()}
q.options["conjuncts"] = []Query{}
return q.And(queries...)
}
// And adds new predicate queries to this conjunction query.
func (q *ConjunctionQuery) And(queries ...Query) *ConjunctionQuery {
q.options["conjuncts"] = append(q.options["conjuncts"].([]Query), queries...)
return q
}
// Boost specifies the boost for this query.
func (q *ConjunctionQuery) Boost(boost float32) *ConjunctionQuery {
q.options["boost"] = boost
return q
}
// DisjunctionQuery represents a search disjunction query.
type DisjunctionQuery struct {
searchQueryBase
}
// NewDisjunctionQuery creates a new DisjunctionQuery.
func NewDisjunctionQuery(queries ...Query) *DisjunctionQuery {
q := &DisjunctionQuery{newSearchQueryBase()}
q.options["disjuncts"] = []Query{}
return q.Or(queries...)
}
// Or adds new predicate queries to this disjunction query.
func (q *DisjunctionQuery) Or(queries ...Query) *DisjunctionQuery {
q.options["disjuncts"] = append(q.options["disjuncts"].([]Query), queries...)
return q
}
// Boost specifies the boost for this query.
func (q *DisjunctionQuery) Boost(boost float32) *DisjunctionQuery {
q.options["boost"] = boost
return q
}
type booleanQueryData struct {
Must *ConjunctionQuery `json:"must,omitempty"`
Should *DisjunctionQuery `json:"should,omitempty"`
MustNot *DisjunctionQuery `json:"must_not,omitempty"`
Boost float32 `json:"boost,omitempty"`
}
// BooleanQuery represents a search boolean query.
type BooleanQuery struct {
data booleanQueryData
shouldMin uint32
}
// NewBooleanQuery creates a new BooleanQuery.
func NewBooleanQuery() *BooleanQuery {
q := &BooleanQuery{}
return q
}
// Must specifies a query which must match.
func (q *BooleanQuery) Must(query Query) *BooleanQuery {
switch val := query.(type) {
case ConjunctionQuery:
q.data.Must = &val
case *ConjunctionQuery:
q.data.Must = val
default:
q.data.Must = NewConjunctionQuery(val)
}
return q
}
// Should specifies a query which should match.
func (q *BooleanQuery) Should(query Query) *BooleanQuery {
switch val := query.(type) {
case DisjunctionQuery:
q.data.Should = &val
case *DisjunctionQuery:
q.data.Should = val
default:
q.data.Should = NewDisjunctionQuery(val)
}
return q
}
// MustNot specifies a query which must not match.
func (q *BooleanQuery) MustNot(query Query) *BooleanQuery {
switch val := query.(type) {
case DisjunctionQuery:
q.data.MustNot = &val
case *DisjunctionQuery:
q.data.MustNot = val
default:
q.data.MustNot = NewDisjunctionQuery(val)
}
return q
}
// ShouldMin specifies the minimum value before the should query will boost.
func (q *BooleanQuery) ShouldMin(min uint32) *BooleanQuery {
q.shouldMin = min
return q
}
// Boost specifies the boost for this query.
func (q *BooleanQuery) Boost(boost float32) *BooleanQuery {
q.data.Boost = boost
return q
}
// MarshalJSON marshal's this query to JSON for the search REST API.
func (q *BooleanQuery) MarshalJSON() ([]byte, error) {
if q.data.Should != nil {
q.data.Should.options["min"] = q.shouldMin
}
bytes, err := json.Marshal(q.data)
if q.data.Should != nil {
delete(q.data.Should.options, "min")
}
return bytes, err
}
// WildcardQuery represents a search wildcard query.
type WildcardQuery struct {
searchQueryBase
}
// NewWildcardQuery creates a new WildcardQuery.
func NewWildcardQuery(wildcard string) *WildcardQuery {
q := &WildcardQuery{newSearchQueryBase()}
q.options["wildcard"] = wildcard
return q
}
// Field specifies the field for this query.
func (q *WildcardQuery) Field(field string) *WildcardQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *WildcardQuery) Boost(boost float32) *WildcardQuery {
q.options["boost"] = boost
return q
}
// DocIDQuery represents a search document id query.
type DocIDQuery struct {
searchQueryBase
}
// NewDocIDQuery creates a new DocIdQuery.
func NewDocIDQuery(ids ...string) *DocIDQuery {
q := &DocIDQuery{newSearchQueryBase()}
q.options["ids"] = []string{}
return q.AddDocIds(ids...)
}
// AddDocIds adds addition document ids to this query.
func (q *DocIDQuery) AddDocIds(ids ...string) *DocIDQuery {
q.options["ids"] = append(q.options["ids"].([]string), ids...)
return q
}
// Field specifies the field for this query.
func (q *DocIDQuery) Field(field string) *DocIDQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *DocIDQuery) Boost(boost float32) *DocIDQuery {
q.options["boost"] = boost
return q
}
// BooleanFieldQuery represents a search boolean field query.
type BooleanFieldQuery struct {
searchQueryBase
}
// NewBooleanFieldQuery creates a new BooleanFieldQuery.
func NewBooleanFieldQuery(val bool) *BooleanFieldQuery {
q := &BooleanFieldQuery{newSearchQueryBase()}
q.options["bool"] = val
return q
}
// Field specifies the field for this query.
func (q *BooleanFieldQuery) Field(field string) *BooleanFieldQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *BooleanFieldQuery) Boost(boost float32) *BooleanFieldQuery {
q.options["boost"] = boost
return q
}
// TermQuery represents a search term query.
type TermQuery struct {
searchQueryBase
}
// NewTermQuery creates a new TermQuery.
func NewTermQuery(term string) *TermQuery {
q := &TermQuery{newSearchQueryBase()}
q.options["term"] = term
return q
}
// Field specifies the field for this query.
func (q *TermQuery) Field(field string) *TermQuery {
q.options["field"] = field
return q
}
// PrefixLength specifies the prefix length from this query.
func (q *TermQuery) PrefixLength(length uint64) *TermQuery {
q.options["prefix_length"] = length
return q
}
// Fuzziness specifies the fuziness for this query.
func (q *TermQuery) Fuzziness(fuzziness uint64) *TermQuery {
q.options["fuzziness"] = fuzziness
return q
}
// Boost specifies the boost for this query.
func (q *TermQuery) Boost(boost float32) *TermQuery {
q.options["boost"] = boost
return q
}
// PhraseQuery represents a search phrase query.
type PhraseQuery struct {
searchQueryBase
}
// NewPhraseQuery creates a new PhraseQuery.
func NewPhraseQuery(terms ...string) *PhraseQuery {
q := &PhraseQuery{newSearchQueryBase()}
q.options["terms"] = terms
return q
}
// Field specifies the field for this query.
func (q *PhraseQuery) Field(field string) *PhraseQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *PhraseQuery) Boost(boost float32) *PhraseQuery {
q.options["boost"] = boost
return q
}
// PrefixQuery represents a search prefix query.
type PrefixQuery struct {
searchQueryBase
}
// NewPrefixQuery creates a new PrefixQuery.
func NewPrefixQuery(prefix string) *PrefixQuery {
q := &PrefixQuery{newSearchQueryBase()}
q.options["prefix"] = prefix
return q
}
// Field specifies the field for this query.
func (q *PrefixQuery) Field(field string) *PrefixQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *PrefixQuery) Boost(boost float32) *PrefixQuery {
q.options["boost"] = boost
return q
}
// MatchAllQuery represents a search match all query.
type MatchAllQuery struct {
searchQueryBase
}
// NewMatchAllQuery creates a new MatchAllQuery.
func NewMatchAllQuery() *MatchAllQuery {
q := &MatchAllQuery{newSearchQueryBase()}
q.options["match_all"] = nil
return q
}
// MatchNoneQuery represents a search match none query.
type MatchNoneQuery struct {
searchQueryBase
}
// NewMatchNoneQuery creates a new MatchNoneQuery.
func NewMatchNoneQuery() *MatchNoneQuery {
q := &MatchNoneQuery{newSearchQueryBase()}
q.options["match_none"] = nil
return q
}
// TermRangeQuery represents a search term range query.
type TermRangeQuery struct {
searchQueryBase
}
// NewTermRangeQuery creates a new TermRangeQuery.
func NewTermRangeQuery(term string) *TermRangeQuery {
q := &TermRangeQuery{newSearchQueryBase()}
q.options["term"] = term
return q
}
// Field specifies the field for this query.
func (q *TermRangeQuery) Field(field string) *TermRangeQuery {
q.options["field"] = field
return q
}
// Min specifies the minimum value and inclusiveness for this range query.
func (q *TermRangeQuery) Min(min string, inclusive bool) *TermRangeQuery {
q.options["min"] = min
q.options["inclusive_min"] = inclusive
return q
}
// Max specifies the maximum value and inclusiveness for this range query.
func (q *TermRangeQuery) Max(max string, inclusive bool) *TermRangeQuery {
q.options["max"] = max
q.options["inclusive_max"] = inclusive
return q
}
// Boost specifies the boost for this query.
func (q *TermRangeQuery) Boost(boost float32) *TermRangeQuery {
q.options["boost"] = boost
return q
}
// GeoDistanceQuery represents a search geographical distance query.
type GeoDistanceQuery struct {
searchQueryBase
}
// NewGeoDistanceQuery creates a new GeoDistanceQuery.
func NewGeoDistanceQuery(lon, lat float64, distance string) *GeoDistanceQuery {
q := &GeoDistanceQuery{newSearchQueryBase()}
q.options["location"] = []float64{lon, lat}
q.options["distance"] = distance
return q
}
// Field specifies the field for this query.
func (q *GeoDistanceQuery) Field(field string) *GeoDistanceQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *GeoDistanceQuery) Boost(boost float32) *GeoDistanceQuery {
q.options["boost"] = boost
return q
}
// GeoBoundingBoxQuery represents a search geographical bounding box query.
type GeoBoundingBoxQuery struct {
searchQueryBase
}
// NewGeoBoundingBoxQuery creates a new GeoBoundingBoxQuery.
func NewGeoBoundingBoxQuery(tlLon, tlLat, brLon, brLat float64) *GeoBoundingBoxQuery {
q := &GeoBoundingBoxQuery{newSearchQueryBase()}
q.options["top_left"] = []float64{tlLon, tlLat}
q.options["bottom_right"] = []float64{brLon, brLat}
return q
}
// Field specifies the field for this query.
func (q *GeoBoundingBoxQuery) Field(field string) *GeoBoundingBoxQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *GeoBoundingBoxQuery) Boost(boost float32) *GeoBoundingBoxQuery {
q.options["boost"] = boost
return q
}

123
vendor/github.com/couchbase/gocb/v2/search/sorting.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package search
import (
"encoding/json"
)
// SearchSort represents an search sorting for a search query.
type Sort interface {
}
type searchSortBase struct {
options map[string]interface{}
}
func newSearchSortBase() searchSortBase {
return searchSortBase{
options: make(map[string]interface{}),
}
}
// MarshalJSON marshal's this query to JSON for the search REST API.
func (q searchSortBase) MarshalJSON() ([]byte, error) {
return json.Marshal(q.options)
}
// SearchSortScore represents a search score sort.
type SearchSortScore struct {
searchSortBase
}
// NewSearchSortScore creates a new SearchSortScore.
func NewSearchSortScore() *SearchSortScore {
q := &SearchSortScore{newSearchSortBase()}
q.options["by"] = "score"
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortScore) Descending(descending bool) *SearchSortScore {
q.options["desc"] = descending
return q
}
// SearchSortID represents a search Document ID sort.
type SearchSortID struct {
searchSortBase
}
// NewSearchSortID creates a new SearchSortScore.
func NewSearchSortID() *SearchSortID {
q := &SearchSortID{newSearchSortBase()}
q.options["by"] = "id"
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortID) Descending(descending bool) *SearchSortID {
q.options["desc"] = descending
return q
}
// SearchSortField represents a search field sort.
type SearchSortField struct {
searchSortBase
}
// NewSearchSortField creates a new SearchSortField.
func NewSearchSortField(field string) *SearchSortField {
q := &SearchSortField{newSearchSortBase()}
q.options["by"] = "field"
q.options["field"] = field
return q
}
// Type allows you to specify the search field sort type.
func (q *SearchSortField) Type(value string) *SearchSortField {
q.options["type"] = value
return q
}
// Mode allows you to specify the search field sort mode.
func (q *SearchSortField) Mode(mode string) *SearchSortField {
q.options["mode"] = mode
return q
}
// Missing allows you to specify the search field sort missing behaviour.
func (q *SearchSortField) Missing(missing string) *SearchSortField {
q.options["missing"] = missing
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortField) Descending(descending bool) *SearchSortField {
q.options["desc"] = descending
return q
}
// SearchSortGeoDistance represents a search geo sort.
type SearchSortGeoDistance struct {
searchSortBase
}
// NewSearchSortGeoDistance creates a new SearchSortGeoDistance.
func NewSearchSortGeoDistance(field string, lon, lat float64) *SearchSortGeoDistance {
q := &SearchSortGeoDistance{newSearchSortBase()}
q.options["by"] = "geo_distance"
q.options["field"] = field
q.options["location"] = []float64{lon, lat}
return q
}
// Unit specifies the unit used for sorting
func (q *SearchSortGeoDistance) Unit(unit string) *SearchSortGeoDistance {
q.options["unit"] = unit
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortGeoDistance) Descending(descending bool) *SearchSortGeoDistance {
q.options["desc"] = descending
return q
}

View File

@ -0,0 +1,138 @@
package gocb
import (
"time"
cbsearch "github.com/couchbase/gocb/v2/search"
)
// SearchHighlightStyle indicates the type of highlighting to use for a search query.
type SearchHighlightStyle string
const (
// DefaultHighlightStyle specifies to use the default to highlight search result hits.
DefaultHighlightStyle SearchHighlightStyle = ""
// HTMLHighlightStyle specifies to use HTML tags to highlight search result hits.
HTMLHighlightStyle SearchHighlightStyle = "html"
// AnsiHightlightStyle specifies to use ANSI tags to highlight search result hits.
AnsiHightlightStyle SearchHighlightStyle = "ansi"
)
// SearchScanConsistency indicates the level of data consistency desired for a search query.
type SearchScanConsistency uint
const (
searchScanConsistencyNotSet SearchScanConsistency = iota
// SearchScanConsistencyNotBounded indicates no data consistency is required.
SearchScanConsistencyNotBounded
)
// SearchHighlightOptions are the options available for search highlighting.
type SearchHighlightOptions struct {
Style SearchHighlightStyle
Fields []string
}
// SearchOptions represents a pending search query.
type SearchOptions struct {
ScanConsistency SearchScanConsistency
Limit uint32
Skip uint32
Explain bool
Highlight *SearchHighlightOptions
Fields []string
Sort []cbsearch.Sort
Facets map[string]cbsearch.Facet
ConsistentWith *MutationState
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]interface{}
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *SearchOptions) toMap() (map[string]interface{}, error) {
data := make(map[string]interface{})
if opts.Limit > 0 {
data["size"] = opts.Limit
}
if opts.Skip > 0 {
data["from"] = opts.Skip
}
if opts.Explain {
data["explain"] = opts.Explain
}
if len(opts.Fields) > 0 {
data["fields"] = opts.Fields
}
if len(opts.Sort) > 0 {
data["sort"] = opts.Sort
}
if opts.Highlight != nil {
highlight := make(map[string]interface{})
highlight["style"] = string(opts.Highlight.Style)
highlight["fields"] = opts.Highlight.Fields
data["highlight"] = highlight
}
if opts.Facets != nil {
facets := make(map[string]interface{})
for k, v := range opts.Facets {
facets[k] = v
}
data["facets"] = facets
}
if opts.ScanConsistency != 0 && opts.ConsistentWith != nil {
return nil, makeInvalidArgumentsError("ScanConsistency and ConsistentWith must be used exclusively")
}
var ctl map[string]interface{}
if opts.ScanConsistency != searchScanConsistencyNotSet {
consistency := make(map[string]interface{})
if opts.ScanConsistency == SearchScanConsistencyNotBounded {
consistency["level"] = "not_bounded"
} else {
return nil, makeInvalidArgumentsError("unexpected consistency option")
}
ctl = map[string]interface{}{"consistency": consistency}
}
if opts.ConsistentWith != nil {
consistency := make(map[string]interface{})
consistency["level"] = "at_plus"
consistency["vectors"] = opts.ConsistentWith.toSearchMutationState()
if ctl == nil {
ctl = make(map[string]interface{})
}
ctl["consistency"] = consistency
}
if ctl != nil {
data["ctl"] = ctl
}
if opts.Raw != nil {
for k, v := range opts.Raw {
data[k] = v
}
}
return data, nil
}

327
vendor/github.com/couchbase/gocb/v2/subdocspecs.go generated vendored Normal file
View File

@ -0,0 +1,327 @@
package gocb
import "github.com/couchbase/gocbcore/v9/memd"
// LookupInSpec is the representation of an operation available when calling LookupIn
type LookupInSpec struct {
op memd.SubDocOpType
path string
isXattr bool
}
// MutateInSpec is the representation of an operation available when calling MutateIn
type MutateInSpec struct {
op memd.SubDocOpType
createPath bool
isXattr bool
path string
value interface{}
multiValue bool
}
// GetSpecOptions are the options available to LookupIn subdoc Get operations.
type GetSpecOptions struct {
IsXattr bool
}
// GetSpec indicates a path to be retrieved from the document. The value of the path
// can later be retrieved from the LookupResult.
// The path syntax follows query's path syntax (e.g. `foo.bar.baz`).
func GetSpec(path string, opts *GetSpecOptions) LookupInSpec {
if opts == nil {
opts = &GetSpecOptions{}
}
return LookupInSpec{
op: memd.SubDocOpGet,
path: path,
isXattr: opts.IsXattr,
}
}
// ExistsSpecOptions are the options available to LookupIn subdoc Exists operations.
type ExistsSpecOptions struct {
IsXattr bool
}
// ExistsSpec is similar to Path(), but does not actually retrieve the value from the server.
// This may save bandwidth if you only need to check for the existence of a
// path (without caring for its content). You can check the status of this
// operation by using .ContentAt (and ignoring the value) or .Exists() on the LookupResult.
func ExistsSpec(path string, opts *ExistsSpecOptions) LookupInSpec {
if opts == nil {
opts = &ExistsSpecOptions{}
}
return LookupInSpec{
op: memd.SubDocOpExists,
path: path,
isXattr: opts.IsXattr,
}
}
// CountSpecOptions are the options available to LookupIn subdoc Count operations.
type CountSpecOptions struct {
IsXattr bool
}
// CountSpec allows you to retrieve the number of items in an array or keys within an
// dictionary within an element of a document.
func CountSpec(path string, opts *CountSpecOptions) LookupInSpec {
if opts == nil {
opts = &CountSpecOptions{}
}
return LookupInSpec{
op: memd.SubDocOpGetCount,
path: path,
isXattr: opts.IsXattr,
}
}
// InsertSpecOptions are the options available to subdocument Insert operations.
type InsertSpecOptions struct {
CreatePath bool
IsXattr bool
}
// InsertSpec inserts a value at the specified path within the document.
func InsertSpec(path string, val interface{}, opts *InsertSpecOptions) MutateInSpec {
if opts == nil {
opts = &InsertSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpDictAdd,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// UpsertSpecOptions are the options available to subdocument Upsert operations.
type UpsertSpecOptions struct {
CreatePath bool
IsXattr bool
}
// UpsertSpec creates a new value at the specified path within the document if it does not exist, if it does exist then it
// updates it.
func UpsertSpec(path string, val interface{}, opts *UpsertSpecOptions) MutateInSpec {
if opts == nil {
opts = &UpsertSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpDictSet,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// ReplaceSpecOptions are the options available to subdocument Replace operations.
type ReplaceSpecOptions struct {
IsXattr bool
}
// ReplaceSpec replaces the value of the field at path.
func ReplaceSpec(path string, val interface{}, opts *ReplaceSpecOptions) MutateInSpec {
if opts == nil {
opts = &ReplaceSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpReplace,
createPath: false,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// RemoveSpecOptions are the options available to subdocument Remove operations.
type RemoveSpecOptions struct {
IsXattr bool
}
// RemoveSpec removes the field at path.
func RemoveSpec(path string, opts *RemoveSpecOptions) MutateInSpec {
if opts == nil {
opts = &RemoveSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpDelete,
createPath: false,
isXattr: opts.IsXattr,
path: path,
value: nil,
multiValue: false,
}
}
// ArrayAppendSpecOptions are the options available to subdocument ArrayAppend operations.
type ArrayAppendSpecOptions struct {
CreatePath bool
IsXattr bool
// HasMultiple adds multiple values as elements to an array.
// When used `value` in the spec must be an array type
// ArrayAppend("path", []int{1,2,3,4}, ArrayAppendSpecOptions{HasMultiple:true}) =>
// "path" [..., 1,2,3,4]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// spec.ArrayAppend("path", 1, nil)
// spec.ArrayAppend("path", 2, nil)
// spec.ArrayAppend("path", 3, nil)
HasMultiple bool
}
// ArrayAppendSpec adds an element(s) to the end (i.e. right) of an array
func ArrayAppendSpec(path string, val interface{}, opts *ArrayAppendSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayAppendSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayPushLast,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: opts.HasMultiple,
}
}
// ArrayPrependSpecOptions are the options available to subdocument ArrayPrepend operations.
type ArrayPrependSpecOptions struct {
CreatePath bool
IsXattr bool
// HasMultiple adds multiple values as elements to an array.
// When used `value` in the spec must be an array type
// ArrayPrepend("path", []int{1,2,3,4}, ArrayPrependSpecOptions{HasMultiple:true}) =>
// "path" [1,2,3,4, ....]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// spec.ArrayPrepend("path", 1, nil)
// spec.ArrayPrepend("path", 2, nil)
// spec.ArrayPrepend("path", 3, nil)
HasMultiple bool
}
// ArrayPrependSpec adds an element to the beginning (i.e. left) of an array
func ArrayPrependSpec(path string, val interface{}, opts *ArrayPrependSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayPrependSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayPushFirst,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: opts.HasMultiple,
}
}
// ArrayInsertSpecOptions are the options available to subdocument ArrayInsert operations.
type ArrayInsertSpecOptions struct {
CreatePath bool
IsXattr bool
// HasMultiple adds multiple values as elements to an array.
// When used `value` in the spec must be an array type
// ArrayInsert("path[1]", []int{1,2,3,4}, ArrayInsertSpecOptions{HasMultiple:true}) =>
// "path" [..., 1,2,3,4]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// spec.ArrayInsert("path[2]", 1, nil)
// spec.ArrayInsert("path[3]", 2, nil)
// spec.ArrayInsert("path[4]", 3, nil)
HasMultiple bool
}
// ArrayInsertSpec inserts an element at a given position within an array. The position should be
// specified as part of the path, e.g. path.to.array[3]
func ArrayInsertSpec(path string, val interface{}, opts *ArrayInsertSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayInsertSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayInsert,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: opts.HasMultiple,
}
}
// ArrayAddUniqueSpecOptions are the options available to subdocument ArrayAddUnique operations.
type ArrayAddUniqueSpecOptions struct {
CreatePath bool
IsXattr bool
}
// ArrayAddUniqueSpec adds an dictionary add unique operation to this mutation operation set.
func ArrayAddUniqueSpec(path string, val interface{}, opts *ArrayAddUniqueSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayAddUniqueSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayAddUnique,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// CounterSpecOptions are the options available to subdocument Increment and Decrement operations.
type CounterSpecOptions struct {
CreatePath bool
IsXattr bool
}
// IncrementSpec adds an increment operation to this mutation operation set.
func IncrementSpec(path string, delta int64, opts *CounterSpecOptions) MutateInSpec {
if opts == nil {
opts = &CounterSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpCounter,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: delta,
multiValue: false,
}
}
// DecrementSpec adds a decrement operation to this mutation operation set.
func DecrementSpec(path string, delta int64, opts *CounterSpecOptions) MutateInSpec {
if opts == nil {
opts = &CounterSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpCounter,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: -delta,
multiValue: false,
}
}

View File

@ -0,0 +1,414 @@
package gocb
import (
"encoding/json"
"sort"
"sync"
"sync/atomic"
"time"
)
type thresholdLogGroup struct {
name string
floor time.Duration
ops []*thresholdLogSpan
lock sync.RWMutex
}
func (g *thresholdLogGroup) init(name string, floor time.Duration, size uint32) {
g.name = name
g.floor = floor
g.ops = make([]*thresholdLogSpan, 0, size)
}
func (g *thresholdLogGroup) recordOp(span *thresholdLogSpan) {
if span.duration < g.floor {
return
}
// Preemptively check that we actually need to be inserted using a read lock first
// this is a performance improvement measure to avoid locking the mutex all the time.
g.lock.RLock()
if len(g.ops) == cap(g.ops) && span.duration < g.ops[0].duration {
// we are at capacity and we are faster than the fastest slow op
g.lock.RUnlock()
return
}
g.lock.RUnlock()
g.lock.Lock()
if len(g.ops) == cap(g.ops) && span.duration < g.ops[0].duration {
// we are at capacity and we are faster than the fastest slow op
g.lock.Unlock()
return
}
l := len(g.ops)
i := sort.Search(l, func(i int) bool { return span.duration < g.ops[i].duration })
// i represents the slot where it should be inserted
if len(g.ops) < cap(g.ops) {
if i == l {
g.ops = append(g.ops, span)
} else {
g.ops = append(g.ops, nil)
copy(g.ops[i+1:], g.ops[i:])
g.ops[i] = span
}
} else {
if i == 0 {
g.ops[i] = span
} else {
copy(g.ops[0:i-1], g.ops[1:i])
g.ops[i-1] = span
}
}
g.lock.Unlock()
}
type thresholdLogItem struct {
OperationName string `json:"operation_name,omitempty"`
TotalTimeUs uint64 `json:"total_us,omitempty"`
EncodeDurationUs uint64 `json:"encode_us,omitempty"`
DispatchDurationUs uint64 `json:"dispatch_us,omitempty"`
ServerDurationUs uint64 `json:"server_us,omitempty"`
LastRemoteAddress string `json:"last_remote_address,omitempty"`
LastLocalAddress string `json:"last_local_address,omitempty"`
LastDispatchDurationUs uint64 `json:"last_dispatch_us,omitempty"`
LastOperationID string `json:"last_operation_id,omitempty"`
LastLocalID string `json:"last_local_id,omitempty"`
DocumentKey string `json:"document_key,omitempty"`
}
type thresholdLogService struct {
Service string `json:"service"`
Count uint64 `json:"count"`
Top []thresholdLogItem `json:"top"`
}
func (g *thresholdLogGroup) logRecordedRecords(sampleSize uint32) {
// Preallocate space to copy the ops into...
oldOps := make([]*thresholdLogSpan, sampleSize)
g.lock.Lock()
// Escape early if we have no ops to log...
if len(g.ops) == 0 {
g.lock.Unlock()
return
}
// Copy out our ops so we can cheaply print them out without blocking
// our ops from actually being recorded in other goroutines (which would
// effectively slow down the op pipeline for logging).
oldOps = oldOps[0:len(g.ops)]
copy(oldOps, g.ops)
g.ops = g.ops[:0]
g.lock.Unlock()
jsonData := thresholdLogService{
Service: g.name,
}
for i := len(oldOps) - 1; i >= 0; i-- {
op := oldOps[i]
jsonData.Top = append(jsonData.Top, thresholdLogItem{
OperationName: op.opName,
TotalTimeUs: uint64(op.duration / time.Microsecond),
DispatchDurationUs: uint64(op.totalDispatchDuration / time.Microsecond),
ServerDurationUs: uint64(op.totalServerDuration / time.Microsecond),
EncodeDurationUs: uint64(op.totalEncodeDuration / time.Microsecond),
LastRemoteAddress: op.lastDispatchPeer,
LastDispatchDurationUs: uint64(op.lastDispatchDuration / time.Microsecond),
LastOperationID: op.lastOperationID,
LastLocalID: op.lastLocalID,
DocumentKey: op.documentKey,
})
}
jsonData.Count = uint64(len(jsonData.Top))
jsonBytes, err := json.Marshal(jsonData)
if err != nil {
logDebugf("Failed to generate threshold logging service JSON: %s", err)
}
logInfof("Threshold Log: %s", jsonBytes)
}
// ThresholdLoggingOptions is the set of options available for configuring threshold logging.
type ThresholdLoggingOptions struct {
ServerDurationDisabled bool
Interval time.Duration
SampleSize uint32
KVThreshold time.Duration
ViewsThreshold time.Duration
QueryThreshold time.Duration
SearchThreshold time.Duration
AnalyticsThreshold time.Duration
ManagementThreshold time.Duration
}
// thresholdLoggingTracer is a specialized Tracer implementation which will automatically
// log operations which fall outside of a set of thresholds. Note that this tracer is
// only safe for use within the Couchbase SDK, uses by external event sources are
// likely to fail.
type thresholdLoggingTracer struct {
Interval time.Duration
SampleSize uint32
KVThreshold time.Duration
ViewsThreshold time.Duration
QueryThreshold time.Duration
SearchThreshold time.Duration
AnalyticsThreshold time.Duration
ManagementThreshold time.Duration
killCh chan struct{}
refCount int32
nextTick time.Time
kvGroup thresholdLogGroup
viewsGroup thresholdLogGroup
queryGroup thresholdLogGroup
searchGroup thresholdLogGroup
analyticsGroup thresholdLogGroup
managementGroup thresholdLogGroup
}
func newThresholdLoggingTracer(opts *ThresholdLoggingOptions) *thresholdLoggingTracer {
if opts == nil {
opts = &ThresholdLoggingOptions{}
}
if opts.Interval == 0 {
opts.Interval = 10 * time.Second
}
if opts.SampleSize == 0 {
opts.SampleSize = 10
}
if opts.KVThreshold == 0 {
opts.KVThreshold = 500 * time.Millisecond
}
if opts.ViewsThreshold == 0 {
opts.ViewsThreshold = 1 * time.Second
}
if opts.QueryThreshold == 0 {
opts.QueryThreshold = 1 * time.Second
}
if opts.SearchThreshold == 0 {
opts.SearchThreshold = 1 * time.Second
}
if opts.AnalyticsThreshold == 0 {
opts.AnalyticsThreshold = 1 * time.Second
}
if opts.ManagementThreshold == 0 {
opts.ManagementThreshold = 1 * time.Second
}
t := &thresholdLoggingTracer{
Interval: opts.Interval,
SampleSize: opts.SampleSize,
KVThreshold: opts.KVThreshold,
ViewsThreshold: opts.ViewsThreshold,
QueryThreshold: opts.QueryThreshold,
SearchThreshold: opts.SearchThreshold,
AnalyticsThreshold: opts.AnalyticsThreshold,
ManagementThreshold: opts.ManagementThreshold,
}
t.kvGroup.init("kv", t.KVThreshold, t.SampleSize)
t.viewsGroup.init("views", t.ViewsThreshold, t.SampleSize)
t.queryGroup.init("query", t.QueryThreshold, t.SampleSize)
t.searchGroup.init("search", t.SearchThreshold, t.SampleSize)
t.analyticsGroup.init("analytics", t.AnalyticsThreshold, t.SampleSize)
t.managementGroup.init("management", t.ManagementThreshold, t.SampleSize)
if t.killCh == nil {
t.killCh = make(chan struct{})
}
if t.nextTick.IsZero() {
t.nextTick = time.Now().Add(t.Interval)
}
return t
}
// AddRef is used internally to keep track of the number of Cluster instances referring to it.
// This is used to correctly shut down the aggregation routines once there are no longer any
// instances tracing to it.
func (t *thresholdLoggingTracer) AddRef() int32 {
newRefCount := atomic.AddInt32(&t.refCount, 1)
if newRefCount == 1 {
t.startLoggerRoutine()
}
return newRefCount
}
// DecRef is the counterpart to AddRef (see AddRef for more information).
func (t *thresholdLoggingTracer) DecRef() int32 {
newRefCount := atomic.AddInt32(&t.refCount, -1)
if newRefCount == 0 {
t.killCh <- struct{}{}
}
return newRefCount
}
func (t *thresholdLoggingTracer) logRecordedRecords() {
t.kvGroup.logRecordedRecords(t.SampleSize)
t.viewsGroup.logRecordedRecords(t.SampleSize)
t.queryGroup.logRecordedRecords(t.SampleSize)
t.searchGroup.logRecordedRecords(t.SampleSize)
t.analyticsGroup.logRecordedRecords(t.SampleSize)
t.managementGroup.logRecordedRecords(t.SampleSize)
}
func (t *thresholdLoggingTracer) startLoggerRoutine() {
go t.loggerRoutine()
}
func (t *thresholdLoggingTracer) loggerRoutine() {
for {
select {
case <-time.After(time.Until(t.nextTick)):
t.nextTick = t.nextTick.Add(t.Interval)
t.logRecordedRecords()
case <-t.killCh:
t.logRecordedRecords()
return
}
}
}
func (t *thresholdLoggingTracer) recordOp(span *thresholdLogSpan) {
switch span.serviceName {
case "mgmt":
t.managementGroup.recordOp(span)
case "kv":
t.kvGroup.recordOp(span)
case "views":
t.viewsGroup.recordOp(span)
case "query":
t.queryGroup.recordOp(span)
case "search":
t.searchGroup.recordOp(span)
case "analytics":
t.analyticsGroup.recordOp(span)
}
}
// StartSpan belongs to the Tracer interface.
func (t *thresholdLoggingTracer) StartSpan(operationName string, parentContext requestSpanContext) requestSpan {
span := &thresholdLogSpan{
tracer: t,
opName: operationName,
startTime: time.Now(),
}
if context, ok := parentContext.(*thresholdLogSpanContext); ok {
span.parent = context.span
}
return span
}
type thresholdLogSpan struct {
tracer *thresholdLoggingTracer
parent *thresholdLogSpan
opName string
startTime time.Time
serviceName string
peerAddress string
serverDuration time.Duration
duration time.Duration
totalServerDuration time.Duration
totalDispatchDuration time.Duration
totalEncodeDuration time.Duration
lastDispatchPeer string
lastDispatchDuration time.Duration
lastOperationID string
lastLocalID string
documentKey string
lock sync.Mutex
}
func (n *thresholdLogSpan) Context() requestSpanContext {
return &thresholdLogSpanContext{n}
}
func (n *thresholdLogSpan) SetTag(key string, value interface{}) requestSpan {
var ok bool
switch key {
case "server_duration":
if n.serverDuration, ok = value.(time.Duration); !ok {
logDebugf("Failed to cast span server_duration tag")
}
case "couchbase.service":
if n.serviceName, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.service tag")
}
case "peer.address":
if n.peerAddress, ok = value.(string); !ok {
logDebugf("Failed to cast span peer.address tag")
}
case "couchbase.operation_id":
if n.lastOperationID, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.operation_id tag")
}
case "couchbase.document_key":
if n.documentKey, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.document_key tag")
}
case "couchbase.local_id":
if n.lastLocalID, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.local_id tag")
}
}
return n
}
func (n *thresholdLogSpan) Finish() {
n.duration = time.Since(n.startTime)
n.totalServerDuration += n.serverDuration
if n.opName == "dispatch" {
n.totalDispatchDuration += n.duration
n.lastDispatchPeer = n.peerAddress
n.lastDispatchDuration = n.duration
}
if n.opName == "encode" {
n.totalEncodeDuration += n.duration
}
if n.parent != nil {
n.parent.lock.Lock()
n.parent.totalServerDuration += n.totalServerDuration
n.parent.totalDispatchDuration += n.totalDispatchDuration
n.parent.totalEncodeDuration += n.totalEncodeDuration
if n.lastDispatchPeer != "" || n.lastDispatchDuration > 0 {
n.parent.lastDispatchPeer = n.lastDispatchPeer
n.parent.lastDispatchDuration = n.lastDispatchDuration
}
if n.lastOperationID != "" {
n.parent.lastOperationID = n.lastOperationID
}
if n.lastLocalID != "" {
n.parent.lastLocalID = n.lastLocalID
}
if n.documentKey != "" {
n.parent.documentKey = n.documentKey
}
n.parent.lock.Unlock()
}
if n.serviceName != "" {
n.tracer.recordOp(n)
}
}
type thresholdLogSpanContext struct {
span *thresholdLogSpan
}

183
vendor/github.com/couchbase/gocb/v2/token.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
package gocb
import (
"encoding/json"
"fmt"
"strconv"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// MutationToken holds the mutation state information from an operation.
type MutationToken struct {
token gocbcore.MutationToken
bucketName string
}
type bucketToken struct {
SeqNo uint64 `json:"seqno"`
VbUUID string `json:"vbuuid"`
}
// BucketName returns the name of the bucket that this token belongs to.
func (mt MutationToken) BucketName() string {
return mt.bucketName
}
// PartitionUUID returns the UUID of the vbucket that this token belongs to.
func (mt MutationToken) PartitionUUID() uint64 {
return uint64(mt.token.VbUUID)
}
// PartitionID returns the ID of the vbucket that this token belongs to.
func (mt MutationToken) PartitionID() uint64 {
return uint64(mt.token.VbID)
}
// SequenceNumber returns the sequence number of the vbucket that this token belongs to.
func (mt MutationToken) SequenceNumber() uint64 {
return uint64(mt.token.SeqNo)
}
func (mt bucketToken) MarshalJSON() ([]byte, error) {
info := []interface{}{mt.SeqNo, mt.VbUUID}
return json.Marshal(info)
}
func (mt *bucketToken) UnmarshalJSON(data []byte) error {
info := []interface{}{&mt.SeqNo, &mt.VbUUID}
return json.Unmarshal(data, &info)
}
type bucketTokens map[string]*bucketToken
type mutationStateData map[string]*bucketTokens
type searchMutationState map[string]map[string]uint64
// MutationState holds and aggregates MutationToken's across multiple operations.
type MutationState struct {
tokens []MutationToken
}
// NewMutationState creates a new MutationState for tracking mutation state.
func NewMutationState(tokens ...MutationToken) *MutationState {
mt := &MutationState{}
mt.Add(tokens...)
return mt
}
// Add includes an operation's mutation information in this mutation state.
func (mt *MutationState) Add(tokens ...MutationToken) {
for _, token := range tokens {
if token.bucketName != "" {
mt.tokens = append(mt.tokens, token)
}
}
}
// MutationStateInternal specifies internal operations.
// Internal: This should never be used and is not supported.
type MutationStateInternal struct {
mt *MutationState
}
// Internal return a new MutationStateInternal.
// Internal: This should never be used and is not supported.
func (mt *MutationState) Internal() *MutationStateInternal {
return &MutationStateInternal{
mt: mt,
}
}
// Add includes an operation's mutation information in this mutation state.
func (mti *MutationStateInternal) Add(bucket string, tokens ...gocbcore.MutationToken) {
for _, token := range tokens {
mti.mt.Add(MutationToken{
bucketName: bucket,
token: token,
})
}
}
// Tokens returns the tokens belonging to the mutation state.
func (mti *MutationStateInternal) Tokens() []MutationToken {
return mti.mt.tokens
}
// MarshalJSON marshal's this mutation state to JSON.
func (mt *MutationState) MarshalJSON() ([]byte, error) {
var data mutationStateData
for _, token := range mt.tokens {
if data == nil {
data = make(mutationStateData)
}
bucketName := token.bucketName
if (data)[bucketName] == nil {
tokens := make(bucketTokens)
(data)[bucketName] = &tokens
}
vbID := fmt.Sprintf("%d", token.token.VbID)
stateToken := (*(data)[bucketName])[vbID]
if stateToken == nil {
stateToken = &bucketToken{}
(*(data)[bucketName])[vbID] = stateToken
}
stateToken.SeqNo = uint64(token.token.SeqNo)
stateToken.VbUUID = fmt.Sprintf("%d", token.token.VbUUID)
}
return json.Marshal(data)
}
// UnmarshalJSON unmarshal's a mutation state from JSON.
func (mt *MutationState) UnmarshalJSON(data []byte) error {
var stateData mutationStateData
err := json.Unmarshal(data, &stateData)
if err != nil {
return err
}
for bucketName, bTokens := range stateData {
for vbIDStr, stateToken := range *bTokens {
vbID, err := strconv.Atoi(vbIDStr)
if err != nil {
return err
}
vbUUID, err := strconv.Atoi(stateToken.VbUUID)
if err != nil {
return err
}
token := MutationToken{
bucketName: bucketName,
token: gocbcore.MutationToken{
VbID: uint16(vbID),
VbUUID: gocbcore.VbUUID(vbUUID),
SeqNo: gocbcore.SeqNo(stateToken.SeqNo),
},
}
mt.tokens = append(mt.tokens, token)
}
}
return nil
}
// toSearchMutationState is specific to search, search doesn't accept tokens in the same format as other services.
func (mt *MutationState) toSearchMutationState() searchMutationState {
data := make(searchMutationState)
for _, token := range mt.tokens {
_, ok := data[token.bucketName]
if !ok {
data[token.bucketName] = make(map[string]uint64)
}
data[token.bucketName][fmt.Sprintf("%d/%d", token.token.VbID, token.token.VbUUID)] = uint64(token.token.SeqNo)
}
return data
}

97
vendor/github.com/couchbase/gocb/v2/tracing.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package gocb
import (
"github.com/couchbase/gocbcore/v9"
)
func tracerAddRef(tracer requestTracer) {
if tracer == nil {
return
}
if refTracer, ok := tracer.(interface {
AddRef() int32
}); ok {
refTracer.AddRef()
}
}
func tracerDecRef(tracer requestTracer) {
if tracer == nil {
return
}
if refTracer, ok := tracer.(interface {
DecRef() int32
}); ok {
refTracer.DecRef()
}
}
// requestTracer describes the tracing abstraction in the SDK.
type requestTracer interface {
StartSpan(operationName string, parentContext requestSpanContext) requestSpan
}
// requestSpan is the interface for spans that are created by a requestTracer.
type requestSpan interface {
Finish()
Context() requestSpanContext
SetTag(key string, value interface{}) requestSpan
}
// requestSpanContext is the interface for for external span contexts that can be passed in into the SDK option blocks.
type requestSpanContext interface {
}
type requestTracerWrapper struct {
tracer requestTracer
}
func (tracer *requestTracerWrapper) StartSpan(operationName string, parentContext gocbcore.RequestSpanContext) gocbcore.RequestSpan {
return requestSpanWrapper{
span: tracer.tracer.StartSpan(operationName, parentContext),
}
}
type requestSpanWrapper struct {
span requestSpan
}
func (span requestSpanWrapper) Finish() {
span.span.Finish()
}
func (span requestSpanWrapper) Context() gocbcore.RequestSpanContext {
return span.span.Context()
}
func (span requestSpanWrapper) SetTag(key string, value interface{}) gocbcore.RequestSpan {
span.span = span.span.SetTag(key, value)
return span
}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
)
// noopTracer will have a future use so we tell the linter not to flag it.
type noopTracer struct { // nolint: unused
}
func (tracer *noopTracer) StartSpan(operationName string, parentContext requestSpanContext) requestSpan {
return defaultNoopSpan
}
func (span noopSpan) Finish() {
}
func (span noopSpan) Context() requestSpanContext {
return defaultNoopSpanContext
}
func (span noopSpan) SetTag(key string, value interface{}) requestSpan {
return defaultNoopSpan
}

398
vendor/github.com/couchbase/gocb/v2/transcoding.go generated vendored Normal file
View File

@ -0,0 +1,398 @@
package gocb
import (
"encoding/json"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
// Transcoder provides an interface for transforming Go values to and
// from raw bytes for storage and retreival from Couchbase data storage.
type Transcoder interface {
// Decodes retrieved bytes into a Go type.
Decode([]byte, uint32, interface{}) error
// Encodes a Go type into bytes for storage.
Encode(interface{}) ([]byte, uint32, error)
}
// JSONTranscoder implements the default transcoding behavior and applies JSON transcoding to all values.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> error.
// default -> JSON value, JSON Flags.
type JSONTranscoder struct {
}
// NewJSONTranscoder returns a new JSONTranscoder.
func NewJSONTranscoder() *JSONTranscoder {
return &JSONTranscoder{}
}
// Decode applies JSON transcoding behaviour to decode into a Go type.
func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
return errors.New("binary datatype is not supported by JSONTranscoder")
} else if valueType == gocbcore.StringType {
return errors.New("string datatype is not supported by JSONTranscoder")
} else if valueType == gocbcore.JSONType {
err := json.Unmarshal(bytes, &out)
if err != nil {
return err
}
return nil
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies JSON transcoding behaviour to encode a Go type.
func (t *JSONTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
var err error
switch typeValue := value.(type) {
case []byte:
return nil, 0, errors.New("binary data is not supported by JSONTranscoder")
case *[]byte:
return nil, 0, errors.New("binary data is not supported by JSONTranscoder")
case json.RawMessage:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *json.RawMessage:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
bytes, err = json.Marshal(value)
if err != nil {
return nil, 0, err
}
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
}
// No compression supported currently
return bytes, flags, nil
}
// RawJSONTranscoder implements passthrough behavior of JSON data. This transcoder does not apply any serialization.
// It will forward data across the network without incurring unnecessary parsing costs.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> JSON bytes, JSON expectedFlags.
// string -> JSON bytes, JSON expectedFlags.
// default -> error.
type RawJSONTranscoder struct {
}
// NewRawJSONTranscoder returns a new RawJSONTranscoder.
func NewRawJSONTranscoder() *RawJSONTranscoder {
return &RawJSONTranscoder{}
}
// Decode applies raw JSON transcoding behaviour to decode into a Go type.
func (t *RawJSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
return errors.New("binary datatype is not supported by RawJSONTranscoder")
} else if valueType == gocbcore.StringType {
return errors.New("string datatype is not supported by RawJSONTranscoder")
} else if valueType == gocbcore.JSONType {
switch typedOut := out.(type) {
case *[]byte:
*typedOut = bytes
return nil
case *string:
*typedOut = string(bytes)
return nil
default:
return errors.New("you must encode raw JSON data in a byte array or string")
}
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies raw JSON transcoding behaviour to encode a Go type.
func (t *RawJSONTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
switch typeValue := value.(type) {
case []byte:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *[]byte:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case string:
bytes = []byte(typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *string:
bytes = []byte(*typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case json.RawMessage:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *json.RawMessage:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
return nil, 0, makeInvalidArgumentsError("only binary and string data is supported by RawJSONTranscoder")
}
// No compression supported currently
return bytes, flags, nil
}
// RawStringTranscoder implements passthrough behavior of raw string data. This transcoder does not apply any serialization.
//
// This will apply the following behavior to the value:
// string -> string bytes, string expectedFlags.
// default -> error.
type RawStringTranscoder struct {
}
// NewRawStringTranscoder returns a new RawStringTranscoder.
func NewRawStringTranscoder() *RawStringTranscoder {
return &RawStringTranscoder{}
}
// Decode applies raw string transcoding behaviour to decode into a Go type.
func (t *RawStringTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
return errors.New("only string datatype is supported by RawStringTranscoder")
} else if valueType == gocbcore.StringType {
switch typedOut := out.(type) {
case *string:
*typedOut = string(bytes)
return nil
case *interface{}:
*typedOut = string(bytes)
return nil
default:
return errors.New("you must encode a string in a string or interface")
}
} else if valueType == gocbcore.JSONType {
return errors.New("only string datatype is supported by RawStringTranscoder")
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies raw string transcoding behaviour to encode a Go type.
func (t *RawStringTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
switch typeValue := value.(type) {
case string:
bytes = []byte(typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case *string:
bytes = []byte(*typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
return nil, 0, makeInvalidArgumentsError("only raw string data is supported by RawStringTranscoder")
}
// No compression supported currently
return bytes, flags, nil
}
// RawBinaryTranscoder implements passthrough behavior of raw binary data. This transcoder does not apply any serialization.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> binary bytes, binary expectedFlags.
// default -> error.
type RawBinaryTranscoder struct {
}
// NewRawBinaryTranscoder returns a new RawBinaryTranscoder.
func NewRawBinaryTranscoder() *RawBinaryTranscoder {
return &RawBinaryTranscoder{}
}
// Decode applies raw binary transcoding behaviour to decode into a Go type.
func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
switch typedOut := out.(type) {
case *[]byte:
*typedOut = bytes
return nil
case *interface{}:
*typedOut = bytes
return nil
default:
return errors.New("you must encode binary in a byte array or interface")
}
} else if valueType == gocbcore.StringType {
return errors.New("only binary datatype is supported by RawBinaryTranscoder")
} else if valueType == gocbcore.JSONType {
return errors.New("only binary datatype is supported by RawBinaryTranscoder")
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies raw binary transcoding behaviour to encode a Go type.
func (t *RawBinaryTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
switch typeValue := value.(type) {
case []byte:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case *[]byte:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
return nil, 0, makeInvalidArgumentsError("only raw binary data is supported by RawBinaryTranscoder")
}
// No compression supported currently
return bytes, flags, nil
}
// LegacyTranscoder implements the behaviour for a backward-compatible transcoder. This transcoder implements
// behaviour matching that of gocb v1.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> binary bytes, Binary expectedFlags.
// string -> string bytes, String expectedFlags.
// default -> JSON value, JSON expectedFlags.
type LegacyTranscoder struct {
}
// NewLegacyTranscoder returns a new LegacyTranscoder.
func NewLegacyTranscoder() *LegacyTranscoder {
return &LegacyTranscoder{}
}
// Decode applies legacy transcoding behaviour to decode into a Go type.
func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
switch typedOut := out.(type) {
case *[]byte:
*typedOut = bytes
return nil
case *interface{}:
*typedOut = bytes
return nil
default:
return errors.New("you must encode binary in a byte array or interface")
}
} else if valueType == gocbcore.StringType {
switch typedOut := out.(type) {
case *string:
*typedOut = string(bytes)
return nil
case *interface{}:
*typedOut = string(bytes)
return nil
default:
return errors.New("you must encode a string in a string or interface")
}
} else if valueType == gocbcore.JSONType {
err := json.Unmarshal(bytes, &out)
if err != nil {
return err
}
return nil
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies legacy transcoding behavior to encode a Go type.
func (t *LegacyTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
var err error
switch typeValue := value.(type) {
case []byte:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case *[]byte:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case string:
bytes = []byte(typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case *string:
bytes = []byte(*typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case json.RawMessage:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *json.RawMessage:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
bytes, err = json.Marshal(value)
if err != nil {
return nil, 0, err
}
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
}
// No compression supported currently
return bytes, flags, nil
}

11
vendor/github.com/couchbase/gocb/v2/version.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package gocb
// Version returns a string representation of the current SDK version.
func Version() string {
return goCbVersionStr
}
// Identifier returns a string representation of the current SDK identifier.
func Identifier() string {
return "gocb/" + goCbVersionStr
}

View File

@ -0,0 +1,211 @@
package gocb
import (
"bytes"
"encoding/json"
"net/url"
"strconv"
"time"
)
// ViewScanConsistency specifies the consistency required for a view query.
type ViewScanConsistency uint
const (
// ViewScanConsistencyNotBounded indicates that no special behaviour should be used.
ViewScanConsistencyNotBounded ViewScanConsistency = iota + 1
// ViewScanConsistencyRequestPlus indicates to update the index before querying it.
ViewScanConsistencyRequestPlus
// ViewScanConsistencyUpdateAfter indicates to update the index asynchronously after querying.
ViewScanConsistencyUpdateAfter
)
// ViewOrdering specifies the ordering for the view queries results.
type ViewOrdering uint
const (
// ViewOrderingAscending indicates the query results should be sorted from lowest to highest.
ViewOrderingAscending ViewOrdering = iota + 1
// ViewOrderingDescending indicates the query results should be sorted from highest to lowest.
ViewOrderingDescending
)
// ViewErrorMode pecifies the behaviour of the query engine should an error occur during the gathering of
// view index results which would result in only partial results being available.
type ViewErrorMode uint
const (
// ViewErrorModeContinue indicates to continue gathering results on error.
ViewErrorModeContinue ViewErrorMode = iota + 1
// ViewErrorModeStop indicates to stop gathering results on error
ViewErrorModeStop
)
// ViewOptions represents the options available when executing view query.
type ViewOptions struct {
ScanConsistency ViewScanConsistency
Skip uint32
Limit uint32
Order ViewOrdering
Reduce bool
Group bool
GroupLevel uint32
Key interface{}
Keys []interface{}
StartKey interface{}
EndKey interface{}
InclusiveEnd bool
StartKeyDocID string
EndKeyDocID string
OnError ViewErrorMode
Debug bool
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]string
Namespace DesignDocumentNamespace
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *ViewOptions) toURLValues() (*url.Values, error) {
options := &url.Values{}
if opts.ScanConsistency != 0 {
if opts.ScanConsistency == ViewScanConsistencyRequestPlus {
options.Set("stale", "false")
} else if opts.ScanConsistency == ViewScanConsistencyNotBounded {
options.Set("stale", "ok")
} else if opts.ScanConsistency == ViewScanConsistencyUpdateAfter {
options.Set("stale", "update_after")
} else {
return nil, makeInvalidArgumentsError("unexpected stale option")
}
}
if opts.Skip != 0 {
options.Set("skip", strconv.FormatUint(uint64(opts.Skip), 10))
}
if opts.Limit != 0 {
options.Set("limit", strconv.FormatUint(uint64(opts.Limit), 10))
}
if opts.Order != 0 {
if opts.Order == ViewOrderingAscending {
options.Set("descending", "false")
} else if opts.Order == ViewOrderingDescending {
options.Set("descending", "true")
} else {
return nil, makeInvalidArgumentsError("unexpected order option")
}
}
options.Set("reduce", "false") // is this line necessary?
if opts.Reduce {
options.Set("reduce", "true")
// Only set group if a reduce view
options.Set("group", "false") // is this line necessary?
if opts.Group {
options.Set("group", "true")
}
if opts.GroupLevel != 0 {
options.Set("group_level", strconv.FormatUint(uint64(opts.GroupLevel), 10))
}
}
if opts.Key != nil {
jsonKey, err := opts.marshalJSON(opts.Key)
if err != nil {
return nil, err
}
options.Set("key", string(jsonKey))
}
if len(opts.Keys) > 0 {
jsonKeys, err := opts.marshalJSON(opts.Keys)
if err != nil {
return nil, err
}
options.Set("keys", string(jsonKeys))
}
if opts.StartKey != nil {
jsonStartKey, err := opts.marshalJSON(opts.StartKey)
if err != nil {
return nil, err
}
options.Set("startkey", string(jsonStartKey))
} else {
options.Del("startkey")
}
if opts.EndKey != nil {
jsonEndKey, err := opts.marshalJSON(opts.EndKey)
if err != nil {
return nil, err
}
options.Set("endkey", string(jsonEndKey))
} else {
options.Del("endkey")
}
if opts.StartKey != nil || opts.EndKey != nil {
if opts.InclusiveEnd {
options.Set("inclusive_end", "true")
} else {
options.Set("inclusive_end", "false")
}
}
if opts.StartKeyDocID == "" {
options.Del("startkey_docid")
} else {
options.Set("startkey_docid", opts.StartKeyDocID)
}
if opts.EndKeyDocID == "" {
options.Del("endkey_docid")
} else {
options.Set("endkey_docid", opts.EndKeyDocID)
}
if opts.OnError > 0 {
if opts.OnError == ViewErrorModeContinue {
options.Set("on_error", "continue")
} else if opts.OnError == ViewErrorModeStop {
options.Set("on_error", "stop")
} else {
return nil, makeInvalidArgumentsError("unexpected onerror option")
}
}
if opts.Debug {
options.Set("debug", "true")
}
if opts.Raw != nil {
for k, v := range opts.Raw {
options.Set(k, v)
}
}
return options, nil
}
func (opts *ViewOptions) marshalJSON(value interface{}) ([]byte, error) {
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
err := enc.Encode(value)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}

18
vendor/github.com/couchbase/gocbcore/v9/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
run:
modules-download-mode: readonly
tests: false
skip-files:
- logging.go # Logging has some utility functions that are useful to have around which get flagged up
linters:
enable:
- bodyclose
- golint
- gosec
- unconvert
linters-settings:
golint:
set-exit-status: true
min-confidence: 0.81
errcheck:
check-type-assertions: true
check-blank: true

202
vendor/github.com/couchbase/gocbcore/v9/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

24
vendor/github.com/couchbase/gocbcore/v9/Makefile generated vendored Normal file
View File

@ -0,0 +1,24 @@
devsetup:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
go get github.com/vektra/mockery/.../
test:
go test ./...
fasttest:
go test -short ./...
cover:
go test -coverprofile=cover.out ./...
lint:
golangci-lint run -v
check: lint
go test -cover -race ./...
updatemocks:
mockery -name dispatcher -output . -testonly -inpkg
mockery -name tracerManager -output . -testonly -inpkg
mockery -name configManager -output . -testonly -inpkg
.PHONY: all test devsetup fasttest lint cover checkerrs checkfmt checkvet checkiea checkspell check updatemocks

22
vendor/github.com/couchbase/gocbcore/v9/README.md generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Couchbase Go Core
This package provides the underlying Couchbase IO for the gocb project.
If you are looking for the Couchbase Go SDK, you are probably looking for
[gocb](https://github.com/couchbase/gocb).
## Branching Strategy
The gocbcore library maintains a branch for each previous major revision
of its API. These branches are introduced just prior to any API breaking
changes. Active work is performed on the master branch, with releases
being performed as tags. Work made on master which are not yet part of a
tagged released should be considered liable to change.
## License
Copyright 2017 Couchbase Inc.
Licensed under the Apache License, Version 2.0.
See
[LICENSE](https://github.com/couchbase/gocbcore/blob/master/LICENSE)
for further details.

572
vendor/github.com/couchbase/gocbcore/v9/agent.go generated vendored Normal file
View File

@ -0,0 +1,572 @@
// Package gocbcore implements methods for low-level communication
// with a Couchbase Server cluster.
package gocbcore
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"sync/atomic"
"time"
)
// Agent represents the base client handling connections to a Couchbase Server.
// This is used internally by the higher level classes for communicating with the cluster,
// it can also be used to perform more advanced operations with a cluster.
type Agent struct {
clientID string
bucketName string
tlsConfig *dynTLSConfig
initFn memdInitFunc
defaultRetryStrategy RetryStrategy
pollerController *pollerController
kvMux *kvMux
httpMux *httpMux
cfgManager *configManagementComponent
errMap *errMapComponent
collections *collectionsComponent
tracer *tracerComponent
http *httpComponent
diagnostics *diagnosticsComponent
crud *crudComponent
observe *observeComponent
stats *statsComponent
n1ql *n1qlQueryComponent
analytics *analyticsQueryComponent
search *searchQueryComponent
views *viewQueryComponent
zombieLogger *zombieLoggerComponent
}
// !!!!UNSURE WHY THESE EXIST!!!!
// ServerConnectTimeout gets the timeout for each server connection, including all authentication steps.
// func (agent *Agent) ServerConnectTimeout() time.Duration {
// return agent.kvConnectTimeout
// }
//
// // SetServerConnectTimeout sets the timeout for each server connection.
// func (agent *Agent) SetServerConnectTimeout(timeout time.Duration) {
// agent.kvConnectTimeout = timeout
// }
// HTTPClient returns a pre-configured HTTP Client for communicating with
// Couchbase Server. You must still specify authentication information
// for any dispatched requests.
func (agent *Agent) HTTPClient() *http.Client {
return agent.http.cli
}
// AuthFunc is invoked by the agent to authenticate a client. This function returns two channels to allow for for multi-stage
// authentication processes (such as SCRAM). The continue callback should be called when further asynchronous bootstrapping
// requests (such as select bucket) can be sent. The completed callback should be called when authentication is completed,
// or failed. It should contain any error that occurred. If completed is called before continue then continue will be called
// first internally, the success value will be determined by whether or not an error is present.
type AuthFunc func(client AuthClient, deadline time.Time, continueCb func(), completedCb func(error)) error
// authFunc wraps AuthFunc to provide a better to the user.
type authFunc func() (completedCh chan BytesAndError, continueCh chan bool, err error)
type authFuncHandler func(client AuthClient, deadline time.Time, mechanism AuthMechanism) authFunc
// CreateAgent creates an agent for performing normal operations.
func CreateAgent(config *AgentConfig) (*Agent, error) {
initFn := func(client *memdClient, deadline time.Time) error {
return nil
}
return createAgent(config, initFn)
}
func createAgent(config *AgentConfig, initFn memdInitFunc) (*Agent, error) {
logInfof("SDK Version: gocbcore/%s", goCbCoreVersionStr)
logInfof("Creating new agent: %+v", config)
var tlsConfig *dynTLSConfig
if config.UseTLS {
tlsConfig = createTLSConfig(config.Auth, config.TLSRootCAProvider)
}
httpIdleConnTimeout := 4500 * time.Millisecond
if config.HTTPIdleConnectionTimeout > 0 {
httpIdleConnTimeout = config.HTTPIdleConnectionTimeout
}
httpCli := createHTTPClient(config.HTTPMaxIdleConns, config.HTTPMaxIdleConnsPerHost,
httpIdleConnTimeout, tlsConfig)
tracer := config.Tracer
if tracer == nil {
tracer = noopTracer{}
}
tracerCmpt := newTracerComponent(tracer, config.BucketName, config.NoRootTraceSpans)
c := &Agent{
clientID: formatCbUID(randomCbUID()),
bucketName: config.BucketName,
tlsConfig: tlsConfig,
initFn: initFn,
tracer: tracerCmpt,
defaultRetryStrategy: config.DefaultRetryStrategy,
errMap: newErrMapManager(config.BucketName),
}
circuitBreakerConfig := config.CircuitBreakerConfig
auth := config.Auth
userAgent := config.UserAgent
useMutationTokens := config.UseMutationTokens
disableDecompression := config.DisableDecompression
useCompression := config.UseCompression
useCollections := config.UseCollections
compressionMinSize := 32
compressionMinRatio := 0.83
useDurations := config.UseDurations
useOutOfOrder := config.UseOutOfOrderResponses
kvConnectTimeout := 7000 * time.Millisecond
if config.KVConnectTimeout > 0 {
kvConnectTimeout = config.KVConnectTimeout
}
serverWaitTimeout := 5 * time.Second
kvPoolSize := 1
if config.KvPoolSize > 0 {
kvPoolSize = config.KvPoolSize
}
maxQueueSize := 2048
if config.MaxQueueSize > 0 {
maxQueueSize = config.MaxQueueSize
}
confHTTPRetryDelay := 10 * time.Second
if config.HTTPRetryDelay > 0 {
confHTTPRetryDelay = config.HTTPRetryDelay
}
confHTTPRedialPeriod := 10 * time.Second
if config.HTTPRedialPeriod > 0 {
confHTTPRedialPeriod = config.HTTPRedialPeriod
}
confCccpMaxWait := 3 * time.Second
if config.CccpMaxWait > 0 {
confCccpMaxWait = config.CccpMaxWait
}
confCccpPollPeriod := 2500 * time.Millisecond
if config.CccpPollPeriod > 0 {
confCccpPollPeriod = config.CccpPollPeriod
}
if config.CompressionMinSize > 0 {
compressionMinSize = config.CompressionMinSize
}
if config.CompressionMinRatio > 0 {
compressionMinRatio = config.CompressionMinRatio
if compressionMinRatio >= 1.0 {
compressionMinRatio = 1.0
}
}
if c.defaultRetryStrategy == nil {
c.defaultRetryStrategy = newFailFastRetryStrategy()
}
authMechanisms := []AuthMechanism{
ScramSha512AuthMechanism,
ScramSha256AuthMechanism,
ScramSha1AuthMechanism}
// PLAIN authentication is only supported over TLS
if config.UseTLS {
authMechanisms = append(authMechanisms, PlainAuthMechanism)
}
authHandler := buildAuthHandler(auth)
var httpEpList []string
for _, hostPort := range config.HTTPAddrs {
if !c.IsSecure() {
httpEpList = append(httpEpList, fmt.Sprintf("http://%s", hostPort))
} else {
httpEpList = append(httpEpList, fmt.Sprintf("https://%s", hostPort))
}
}
if config.UseZombieLogger {
zombieLoggerInterval := 10 * time.Second
zombieLoggerSampleSize := 10
if config.ZombieLoggerInterval > 0 {
zombieLoggerInterval = config.ZombieLoggerInterval
}
if config.ZombieLoggerSampleSize > 0 {
zombieLoggerSampleSize = config.ZombieLoggerSampleSize
}
c.zombieLogger = newZombieLoggerComponent(zombieLoggerInterval, zombieLoggerSampleSize)
go c.zombieLogger.Start()
}
c.cfgManager = newConfigManager(
configManagerProperties{
NetworkType: config.NetworkType,
UseSSL: config.UseTLS,
SrcMemdAddrs: config.MemdAddrs,
SrcHTTPAddrs: httpEpList,
},
)
dialer := newMemdClientDialerComponent(
memdClientDialerProps{
ServerWaitTimeout: serverWaitTimeout,
KVConnectTimeout: kvConnectTimeout,
ClientID: c.clientID,
TLSConfig: c.tlsConfig,
CompressionMinSize: compressionMinSize,
CompressionMinRatio: compressionMinRatio,
DisableDecompression: disableDecompression,
},
bootstrapProps{
HelloProps: helloProps{
CollectionsEnabled: useCollections,
MutationTokensEnabled: useMutationTokens,
CompressionEnabled: useCompression,
DurationsEnabled: useDurations,
OutOfOrderEnabled: useOutOfOrder,
},
Bucket: c.bucketName,
UserAgent: userAgent,
AuthMechanisms: authMechanisms,
AuthHandler: authHandler,
ErrMapManager: c.errMap,
},
circuitBreakerConfig,
c.zombieLogger,
c.tracer,
initFn,
)
c.kvMux = newKVMux(
kvMuxProps{
QueueSize: maxQueueSize,
PoolSize: kvPoolSize,
CollectionsEnabled: useCollections,
},
c.cfgManager,
c.errMap,
c.tracer,
dialer,
)
c.collections = newCollectionIDManager(
collectionIDProps{
MaxQueueSize: config.MaxQueueSize,
DefaultRetryStrategy: c.defaultRetryStrategy,
},
c.kvMux,
c.tracer,
c.cfgManager,
)
c.httpMux = newHTTPMux(circuitBreakerConfig, c.cfgManager)
c.http = newHTTPComponent(
httpComponentProps{
UserAgent: userAgent,
DefaultRetryStrategy: c.defaultRetryStrategy,
},
httpCli,
c.httpMux,
auth,
c.tracer,
)
if len(config.MemdAddrs) == 0 && config.BucketName == "" {
// The http poller can't run without a bucket. We don't trigger an error for this case
// because AgentGroup users who use memcached buckets on non-default ports will end up here.
logDebugf("No bucket name specified and only http addresses specified, not running config poller")
} else {
c.pollerController = newPollerController(
newCCCPConfigController(
cccpPollerProperties{
confCccpMaxWait: confCccpMaxWait,
confCccpPollPeriod: confCccpPollPeriod,
},
c.kvMux,
c.cfgManager,
),
newHTTPConfigController(
c.bucketName,
httpPollerProperties{
httpComponent: c.http,
confHTTPRetryDelay: confHTTPRetryDelay,
confHTTPRedialPeriod: confHTTPRedialPeriod,
},
c.httpMux,
c.cfgManager,
),
c.cfgManager,
)
}
c.observe = newObserveComponent(c.collections, c.defaultRetryStrategy, c.tracer, c.kvMux)
c.crud = newCRUDComponent(c.collections, c.defaultRetryStrategy, c.tracer, c.errMap, c.kvMux)
c.stats = newStatsComponent(c.kvMux, c.defaultRetryStrategy, c.tracer)
c.n1ql = newN1QLQueryComponent(c.http, c.cfgManager, c.tracer)
c.analytics = newAnalyticsQueryComponent(c.http, c.tracer)
c.search = newSearchQueryComponent(c.http, c.tracer)
c.views = newViewQueryComponent(c.http, c.tracer)
c.diagnostics = newDiagnosticsComponent(c.kvMux, c.httpMux, c.http, c.bucketName, c.defaultRetryStrategy, c.pollerController)
// Kick everything off.
cfg := &routeConfig{
kvServerList: config.MemdAddrs,
mgmtEpList: httpEpList,
revID: -1,
}
c.httpMux.OnNewRouteConfig(cfg)
c.kvMux.OnNewRouteConfig(cfg)
if c.pollerController != nil {
go c.pollerController.Start()
}
return c, nil
}
func createTLSConfig(auth AuthProvider, caProvider func() *x509.CertPool) *dynTLSConfig {
return &dynTLSConfig{
BaseConfig: &tls.Config{
GetClientCertificate: func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert, err := auth.Certificate(AuthCertRequest{})
if err != nil {
return nil, err
}
if cert == nil {
return &tls.Certificate{}, nil
}
return cert, nil
},
},
Provider: caProvider,
}
}
func createHTTPClient(maxIdleConns, maxIdleConnsPerHost int, idleTimeout time.Duration, tlsConfig *dynTLSConfig) *http.Client {
httpDialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
// We set up the transport to point at the BaseConfig from the dynamic TLS system.
// We also set ForceAttemptHTTP2, which will update the base-config to support HTTP2
// automatically, so that all configs from it will look for that.
var httpTLSConfig *dynTLSConfig
var httpBaseTLSConfig *tls.Config
if tlsConfig != nil {
httpTLSConfig = tlsConfig.Clone()
httpBaseTLSConfig = httpTLSConfig.BaseConfig
}
httpTransport := &http.Transport{
TLSClientConfig: httpBaseTLSConfig,
ForceAttemptHTTP2: true,
Dial: func(network, addr string) (net.Conn, error) {
return httpDialer.Dial(network, addr)
},
DialTLS: func(network, addr string) (net.Conn, error) {
tcpConn, err := httpDialer.Dial(network, addr)
if err != nil {
return nil, err
}
if httpTLSConfig == nil {
return nil, errors.New("TLS was not configured on this Agent")
}
srvTLSConfig, err := httpTLSConfig.MakeForAddr(addr)
if err != nil {
return nil, err
}
tlsConn := tls.Client(tcpConn, srvTLSConfig)
return tlsConn, nil
},
MaxIdleConns: maxIdleConns,
MaxIdleConnsPerHost: maxIdleConnsPerHost,
IdleConnTimeout: idleTimeout,
}
httpCli := &http.Client{
Transport: httpTransport,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
// All that we're doing here is setting auth on any redirects.
// For that reason we can just pull it off the oldest (first) request.
if len(via) >= 10 {
// Just duplicate the default behaviour for maximum redirects.
return errors.New("stopped after 10 redirects")
}
oldest := via[0]
auth := oldest.Header.Get("Authorization")
if auth != "" {
req.Header.Set("Authorization", auth)
}
return nil
},
}
return httpCli
}
func buildAuthHandler(auth AuthProvider) authFuncHandler {
return func(client AuthClient, deadline time.Time, mechanism AuthMechanism) authFunc {
creds, err := getKvAuthCreds(auth, client.Address())
if err != nil {
return nil
}
if creds.Username != "" || creds.Password != "" {
return func() (chan BytesAndError, chan bool, error) {
continueCh := make(chan bool, 1)
completedCh := make(chan BytesAndError, 1)
hasContinued := int32(0)
callErr := saslMethod(mechanism, creds.Username, creds.Password, client, deadline, func() {
// hasContinued should never be 1 here but let's guard against it.
if atomic.CompareAndSwapInt32(&hasContinued, 0, 1) {
continueCh <- true
}
}, func(err error) {
if atomic.CompareAndSwapInt32(&hasContinued, 0, 1) {
sendContinue := true
if err != nil {
sendContinue = false
}
continueCh <- sendContinue
}
completedCh <- BytesAndError{Err: err}
})
if callErr != nil {
return nil, nil, err
}
return completedCh, continueCh, nil
}
}
return nil
}
}
// Close shuts down the agent, disconnecting from all servers and failing
// any outstanding operations with ErrShutdown.
func (agent *Agent) Close() error {
routeCloseErr := agent.kvMux.Close()
poller := agent.pollerController
if poller != nil {
poller.Stop()
}
if agent.zombieLogger != nil {
agent.zombieLogger.Stop()
}
if poller != nil {
// Wait for our external looper goroutines to finish, note that if the
// specific looper wasn't used, it will be a nil value otherwise it
// will be an open channel till its closed to signal completion.
pollerCh := poller.Done()
if pollerCh != nil {
<-pollerCh
}
}
// Close the transports so that they don't hold open goroutines.
agent.http.Close()
return routeCloseErr
}
// ClientID returns the unique id for this agent
func (agent *Agent) ClientID() string {
return agent.clientID
}
// CapiEps returns all the available endpoints for performing
// map-reduce queries.
func (agent *Agent) CapiEps() []string {
return agent.httpMux.CapiEps()
}
// MgmtEps returns all the available endpoints for performing
// management queries.
func (agent *Agent) MgmtEps() []string {
return agent.httpMux.MgmtEps()
}
// N1qlEps returns all the available endpoints for performing
// N1QL queries.
func (agent *Agent) N1qlEps() []string {
return agent.httpMux.N1qlEps()
}
// FtsEps returns all the available endpoints for performing
// FTS queries.
func (agent *Agent) FtsEps() []string {
return agent.httpMux.FtsEps()
}
// CbasEps returns all the available endpoints for performing
// CBAS queries.
func (agent *Agent) CbasEps() []string {
return agent.httpMux.CbasEps()
}
// HasCollectionsSupport verifies whether or not collections are available on the agent.
func (agent *Agent) HasCollectionsSupport() bool {
return agent.kvMux.SupportsCollections()
}
// IsSecure returns whether this client is connected via SSL.
func (agent *Agent) IsSecure() bool {
return agent.tlsConfig != nil
}
// UsingGCCCP returns whether or not the Agent is currently using GCCCP polling.
func (agent *Agent) UsingGCCCP() bool {
return agent.kvMux.SupportsGCCCP()
}
// HasSeenConfig returns whether or not the Agent has seen a valid cluster config. This does not mean that the agent
// currently has active connections.
// Volatile: This API is subject to change at any time.
func (agent *Agent) HasSeenConfig() (bool, error) {
seen, err := agent.kvMux.ConfigRev()
if err != nil {
return false, err
}
return seen > -1, nil
}
// WaitUntilReady returns whether or not the Agent has seen a valid cluster config.
func (agent *Agent) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, cb WaitUntilReadyCallback) (PendingOp, error) {
return agent.diagnostics.WaitUntilReady(deadline, opts, cb)
}
// ConfigSnapshot returns a snapshot of the underlying configuration currently in use.
func (agent *Agent) ConfigSnapshot() (*ConfigSnapshot, error) {
return agent.kvMux.ConfigSnapshot()
}
// BucketName returns the name of the bucket that the agent is using, if any.
// Uncommitted: This API may change in the future.
func (agent *Agent) BucketName() string {
return agent.bucketName
}

365
vendor/github.com/couchbase/gocbcore/v9/agent_config.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
package gocbcore
import (
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"strconv"
"time"
"github.com/couchbase/gocbcore/v9/connstr"
)
func parseDurationOrInt(valStr string) (time.Duration, error) {
dur, err := time.ParseDuration(valStr)
if err != nil {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return 0, err
}
dur = time.Duration(val) * time.Millisecond
}
return dur, nil
}
// AgentConfig specifies the configuration options for creation of an Agent.
type AgentConfig struct {
MemdAddrs []string
HTTPAddrs []string
BucketName string
UserAgent string
UseTLS bool
NetworkType string
Auth AuthProvider
TLSRootCAProvider func() *x509.CertPool
UseMutationTokens bool
UseCompression bool
UseDurations bool
DisableDecompression bool
UseOutOfOrderResponses bool
UseCollections bool
CompressionMinSize int
CompressionMinRatio float64
HTTPRedialPeriod time.Duration
HTTPRetryDelay time.Duration
CccpMaxWait time.Duration
CccpPollPeriod time.Duration
ConnectTimeout time.Duration
KVConnectTimeout time.Duration
KvPoolSize int
MaxQueueSize int
HTTPMaxIdleConns int
HTTPMaxIdleConnsPerHost int
HTTPIdleConnectionTimeout time.Duration
// Volatile: Tracer API is subject to change.
Tracer RequestTracer
NoRootTraceSpans bool
DefaultRetryStrategy RetryStrategy
CircuitBreakerConfig CircuitBreakerConfig
UseZombieLogger bool
ZombieLoggerInterval time.Duration
ZombieLoggerSampleSize int
}
func (config *AgentConfig) redacted() interface{} {
newConfig := AgentConfig{}
newConfig = *config
if isLogRedactionLevelFull() {
// The slices here are still pointing at config's underlying arrays
// so we need to make them not do that.
newConfig.HTTPAddrs = append([]string(nil), newConfig.HTTPAddrs...)
for i, addr := range newConfig.HTTPAddrs {
newConfig.HTTPAddrs[i] = redactSystemData(addr)
}
newConfig.MemdAddrs = append([]string(nil), newConfig.MemdAddrs...)
for i, addr := range newConfig.MemdAddrs {
newConfig.MemdAddrs[i] = redactSystemData(addr)
}
if newConfig.BucketName != "" {
newConfig.BucketName = redactMetaData(newConfig.BucketName)
}
}
return newConfig
}
// FromConnStr populates the AgentConfig with information from a
// Couchbase Connection String.
// Supported options are:
// bootstrap_on (bool) - Specifies what protocol to bootstrap on (cccp, http).
// ca_cert_path (string) - Specifies the path to a CA certificate.
// network (string) - The network type to use.
// kv_connect_timeout (duration) - Maximum period to attempt to connect to cluster in ms.
// config_poll_interval (duration) - Period to wait between CCCP config polling in ms.
// config_poll_timeout (duration) - Maximum period of time to wait for a CCCP request.
// compression (bool) - Whether to enable network-wise compression of documents.
// compression_min_size (int) - The minimal size of the document in bytes to consider compression.
// compression_min_ratio (float64) - The minimal compress ratio (compressed / original) for the document to be sent compressed.
// enable_server_durations (bool) - Whether to enable fetching server operation durations.
// max_idle_http_connections (int) - Maximum number of idle http connections in the pool.
// max_perhost_idle_http_connections (int) - Maximum number of idle http connections in the pool per host.
// idle_http_connection_timeout (duration) - Maximum length of time for an idle connection to stay in the pool in ms.
// orphaned_response_logging (bool) - Whether to enable orphaned response logging.
// orphaned_response_logging_interval (duration) - How often to print the orphan log records.
// orphaned_response_logging_sample_size (int) - The maximum number of orphan log records to track.
// dcp_priority (int) - Specifies the priority to request from the Cluster when connecting for DCP.
// enable_dcp_expiry (bool) - Whether to enable the feature to distinguish between explicit delete and expired delete on DCP.
// http_redial_period (duration) - The maximum length of time for the HTTP poller to stay connected before reconnecting.
// http_retry_delay (duration) - The length of time to wait between HTTP poller retries if connecting fails.
// kv_pool_size (int) - The number of connections to create to each kv node.
// max_queue_size (int) - The maximum number of requests that can be queued for sending per connection.
func (config *AgentConfig) FromConnStr(connStr string) error {
baseSpec, err := connstr.Parse(connStr)
if err != nil {
return err
}
spec, err := connstr.Resolve(baseSpec)
if err != nil {
return err
}
fetchOption := func(name string) (string, bool) {
optValue := spec.Options[name]
if len(optValue) == 0 {
return "", false
}
return optValue[len(optValue)-1], true
}
// Grab the resolved hostnames into a set of string arrays
var httpHosts []string
for _, specHost := range spec.HttpHosts {
httpHosts = append(httpHosts, fmt.Sprintf("%s:%d", specHost.Host, specHost.Port))
}
var memdHosts []string
for _, specHost := range spec.MemdHosts {
memdHosts = append(memdHosts, fmt.Sprintf("%s:%d", specHost.Host, specHost.Port))
}
// Get bootstrap_on option to determine which, if any, of the bootstrap nodes should be cleared
switch val, _ := fetchOption("bootstrap_on"); val {
case "http":
memdHosts = nil
if len(httpHosts) == 0 {
return errors.New("bootstrap_on=http but no HTTP hosts in connection string")
}
case "cccp":
httpHosts = nil
if len(memdHosts) == 0 {
return errors.New("bootstrap_on=cccp but no CCCP/Memcached hosts in connection string")
}
case "both":
case "":
// Do nothing
break
default:
return errors.New("bootstrap_on={http,cccp,both}")
}
config.MemdAddrs = memdHosts
config.HTTPAddrs = httpHosts
if spec.UseSsl {
cacertpaths := spec.Options["ca_cert_path"]
if len(cacertpaths) > 0 {
roots := x509.NewCertPool()
for _, path := range cacertpaths {
cacert, err := ioutil.ReadFile(path)
if err != nil {
return err
}
ok := roots.AppendCertsFromPEM(cacert)
if !ok {
return errInvalidCertificate
}
}
config.TLSRootCAProvider = func() *x509.CertPool {
return roots
}
}
config.UseTLS = true
}
if spec.Bucket != "" {
config.BucketName = spec.Bucket
}
if valStr, ok := fetchOption("network"); ok {
if valStr == "default" {
valStr = ""
}
config.NetworkType = valStr
}
if valStr, ok := fetchOption("kv_connect_timeout"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("kv_connect_timeout option must be a duration or a number")
}
config.KVConnectTimeout = val
}
if valStr, ok := fetchOption("config_poll_timeout"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("config poll timeout option must be a duration or a number")
}
config.CccpMaxWait = val
}
if valStr, ok := fetchOption("config_poll_interval"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("config pool interval option must be duration or a number")
}
config.CccpPollPeriod = val
}
if valStr, ok := fetchOption("enable_mutation_tokens"); ok {
val, err := strconv.ParseBool(valStr)
if err != nil {
return fmt.Errorf("enable_mutation_tokens option must be a boolean")
}
config.UseMutationTokens = val
}
if valStr, ok := fetchOption("compression"); ok {
val, err := strconv.ParseBool(valStr)
if err != nil {
return fmt.Errorf("compression option must be a boolean")
}
config.UseCompression = val
}
if valStr, ok := fetchOption("compression_min_size"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("compression_min_size option must be an int")
}
config.CompressionMinSize = int(val)
}
if valStr, ok := fetchOption("compression_min_ratio"); ok {
val, err := strconv.ParseFloat(valStr, 64)
if err != nil {
return fmt.Errorf("compression_min_size option must be an int")
}
config.CompressionMinRatio = val
}
if valStr, ok := fetchOption("enable_server_durations"); ok {
val, err := strconv.ParseBool(valStr)
if err != nil {
return fmt.Errorf("server_duration option must be a boolean")
}
config.UseDurations = val
}
if valStr, ok := fetchOption("max_idle_http_connections"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("http max idle connections option must be a number")
}
config.HTTPMaxIdleConns = int(val)
}
if valStr, ok := fetchOption("max_perhost_idle_http_connections"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("max_perhost_idle_http_connections option must be a number")
}
config.HTTPMaxIdleConnsPerHost = int(val)
}
if valStr, ok := fetchOption("idle_http_connection_timeout"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("idle_http_connection_timeout option must be a duration or a number")
}
config.HTTPIdleConnectionTimeout = val
}
if valStr, ok := fetchOption("orphaned_response_logging"); ok {
val, err := strconv.ParseBool(valStr)
if err != nil {
return fmt.Errorf("orphaned_response_logging option must be a boolean")
}
config.UseZombieLogger = val
}
if valStr, ok := fetchOption("orphaned_response_logging_interval"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("orphaned_response_logging_interval option must be a number")
}
config.ZombieLoggerInterval = val
}
if valStr, ok := fetchOption("orphaned_response_logging_sample_size"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("orphaned_response_logging_sample_size option must be a number")
}
config.ZombieLoggerSampleSize = int(val)
}
// This option is experimental
if valStr, ok := fetchOption("http_redial_period"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("http redial period option must be a duration or a number")
}
config.HTTPRedialPeriod = val
}
// This option is experimental
if valStr, ok := fetchOption("http_retry_delay"); ok {
val, err := parseDurationOrInt(valStr)
if err != nil {
return fmt.Errorf("http retry delay option must be a duration or a number")
}
config.HTTPRetryDelay = val
}
// This option is experimental
if valStr, ok := fetchOption("kv_pool_size"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("kv pool size option must be a number")
}
config.KvPoolSize = int(val)
}
// This option is experimental
if valStr, ok := fetchOption("max_queue_size"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("max queue size option must be a number")
}
config.MaxQueueSize = int(val)
}
return nil
}

272
vendor/github.com/couchbase/gocbcore/v9/agent_ops.go generated vendored Normal file
View File

@ -0,0 +1,272 @@
package gocbcore
import "github.com/couchbase/gocbcore/v9/memd"
// GetCallback is invoked upon completion of a Get operation.
type GetCallback func(*GetResult, error)
// Get retrieves a document.
func (agent *Agent) Get(opts GetOptions, cb GetCallback) (PendingOp, error) {
return agent.crud.Get(opts, cb)
}
// GetAndTouchCallback is invoked upon completion of a GetAndTouch operation.
type GetAndTouchCallback func(*GetAndTouchResult, error)
// GetAndTouch retrieves a document and updates its expiry.
func (agent *Agent) GetAndTouch(opts GetAndTouchOptions, cb GetAndTouchCallback) (PendingOp, error) {
return agent.crud.GetAndTouch(opts, cb)
}
// GetAndLockCallback is invoked upon completion of a GetAndLock operation.
type GetAndLockCallback func(*GetAndLockResult, error)
// GetAndLock retrieves a document and locks it.
func (agent *Agent) GetAndLock(opts GetAndLockOptions, cb GetAndLockCallback) (PendingOp, error) {
return agent.crud.GetAndLock(opts, cb)
}
// GetReplicaCallback is invoked upon completion of a GetReplica operation.
type GetReplicaCallback func(*GetReplicaResult, error)
// GetOneReplica retrieves a document from a replica server.
func (agent *Agent) GetOneReplica(opts GetOneReplicaOptions, cb GetReplicaCallback) (PendingOp, error) {
return agent.crud.GetOneReplica(opts, cb)
}
// TouchCallback is invoked upon completion of a Touch operation.
type TouchCallback func(*TouchResult, error)
// Touch updates the expiry for a document.
func (agent *Agent) Touch(opts TouchOptions, cb TouchCallback) (PendingOp, error) {
return agent.crud.Touch(opts, cb)
}
// UnlockCallback is invoked upon completion of a Unlock operation.
type UnlockCallback func(*UnlockResult, error)
// Unlock unlocks a locked document.
func (agent *Agent) Unlock(opts UnlockOptions, cb UnlockCallback) (PendingOp, error) {
return agent.crud.Unlock(opts, cb)
}
// DeleteCallback is invoked upon completion of a Delete operation.
type DeleteCallback func(*DeleteResult, error)
// Delete removes a document.
func (agent *Agent) Delete(opts DeleteOptions, cb DeleteCallback) (PendingOp, error) {
return agent.crud.Delete(opts, cb)
}
// StoreCallback is invoked upon completion of a Add, Set or Replace operation.
type StoreCallback func(*StoreResult, error)
// Add stores a document as long as it does not already exist.
func (agent *Agent) Add(opts AddOptions, cb StoreCallback) (PendingOp, error) {
return agent.crud.Add(opts, cb)
}
// Set stores a document.
func (agent *Agent) Set(opts SetOptions, cb StoreCallback) (PendingOp, error) {
return agent.crud.Set(opts, cb)
}
// Replace replaces the value of a Couchbase document with another value.
func (agent *Agent) Replace(opts ReplaceOptions, cb StoreCallback) (PendingOp, error) {
return agent.crud.Replace(opts, cb)
}
// AdjoinCallback is invoked upon completion of a Append or Prepend operation.
type AdjoinCallback func(*AdjoinResult, error)
// Append appends some bytes to a document.
func (agent *Agent) Append(opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) {
return agent.crud.Append(opts, cb)
}
// Prepend prepends some bytes to a document.
func (agent *Agent) Prepend(opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) {
return agent.crud.Prepend(opts, cb)
}
// CounterCallback is invoked upon completion of a Increment or Decrement operation.
type CounterCallback func(*CounterResult, error)
// Increment increments the unsigned integer value in a document.
func (agent *Agent) Increment(opts CounterOptions, cb CounterCallback) (PendingOp, error) {
return agent.crud.Increment(opts, cb)
}
// Decrement decrements the unsigned integer value in a document.
func (agent *Agent) Decrement(opts CounterOptions, cb CounterCallback) (PendingOp, error) {
return agent.crud.Decrement(opts, cb)
}
// GetRandomCallback is invoked upon completion of a GetRandom operation.
type GetRandomCallback func(*GetRandomResult, error)
// GetRandom retrieves the key and value of a random document stored within Couchbase Server.
func (agent *Agent) GetRandom(opts GetRandomOptions, cb GetRandomCallback) (PendingOp, error) {
return agent.crud.GetRandom(opts, cb)
}
// GetMetaCallback is invoked upon completion of a GetMeta operation.
type GetMetaCallback func(*GetMetaResult, error)
// GetMeta retrieves a document along with some internal Couchbase meta-data.
func (agent *Agent) GetMeta(opts GetMetaOptions, cb GetMetaCallback) (PendingOp, error) {
return agent.crud.GetMeta(opts, cb)
}
// SetMetaCallback is invoked upon completion of a SetMeta operation.
type SetMetaCallback func(*SetMetaResult, error)
// SetMeta stores a document along with setting some internal Couchbase meta-data.
func (agent *Agent) SetMeta(opts SetMetaOptions, cb SetMetaCallback) (PendingOp, error) {
return agent.crud.SetMeta(opts, cb)
}
// DeleteMetaCallback is invoked upon completion of a DeleteMeta operation.
type DeleteMetaCallback func(*DeleteMetaResult, error)
// DeleteMeta deletes a document along with setting some internal Couchbase meta-data.
func (agent *Agent) DeleteMeta(opts DeleteMetaOptions, cb DeleteMetaCallback) (PendingOp, error) {
return agent.crud.DeleteMeta(opts, cb)
}
// StatsCallback is invoked upon completion of a Stats operation.
type StatsCallback func(*StatsResult, error)
// Stats retrieves statistics information from the server. Note that as this
// function is an aggregator across numerous servers, there are no guarantees
// about the consistency of the results. Occasionally, some nodes may not be
// represented in the results, or there may be conflicting information between
// multiple nodes (a vbucket active on two separate nodes at once).
func (agent *Agent) Stats(opts StatsOptions, cb StatsCallback) (PendingOp, error) {
return agent.stats.Stats(opts, cb)
}
// ObserveCallback is invoked upon completion of a Observe operation.
type ObserveCallback func(*ObserveResult, error)
// Observe retrieves the current CAS and persistence state for a document.
func (agent *Agent) Observe(opts ObserveOptions, cb ObserveCallback) (PendingOp, error) {
return agent.observe.Observe(opts, cb)
}
// ObserveVbCallback is invoked upon completion of a ObserveVb operation.
type ObserveVbCallback func(*ObserveVbResult, error)
// ObserveVb retrieves the persistence state sequence numbers for a particular VBucket
// and includes additional details not included by the basic version.
func (agent *Agent) ObserveVb(opts ObserveVbOptions, cb ObserveVbCallback) (PendingOp, error) {
return agent.observe.ObserveVb(opts, cb)
}
// SubDocOp defines a per-operation structure to be passed to MutateIn
// or LookupIn for performing many sub-document operations.
type SubDocOp struct {
Op memd.SubDocOpType
Flags memd.SubdocFlag
Path string
Value []byte
}
// LookupInCallback is invoked upon completion of a LookupIn operation.
type LookupInCallback func(*LookupInResult, error)
// LookupIn performs a multiple-lookup sub-document operation on a document.
func (agent *Agent) LookupIn(opts LookupInOptions, cb LookupInCallback) (PendingOp, error) {
return agent.crud.LookupIn(opts, cb)
}
// MutateInCallback is invoked upon completion of a MutateIn operation.
type MutateInCallback func(*MutateInResult, error)
// MutateIn performs a multiple-mutation sub-document operation on a document.
func (agent *Agent) MutateIn(opts MutateInOptions, cb MutateInCallback) (PendingOp, error) {
return agent.crud.MutateIn(opts, cb)
}
// N1QLQueryCallback is invoked upon completion of a N1QLQuery operation.
type N1QLQueryCallback func(*N1QLRowReader, error)
// N1QLQuery executes a N1QL query
func (agent *Agent) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) {
return agent.n1ql.N1QLQuery(opts, cb)
}
// PreparedN1QLQuery executes a prepared N1QL query
func (agent *Agent) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) {
return agent.n1ql.PreparedN1QLQuery(opts, cb)
}
// AnalyticsQueryCallback is invoked upon completion of a AnalyticsQuery operation.
type AnalyticsQueryCallback func(*AnalyticsRowReader, error)
// AnalyticsQuery executes an analytics query
func (agent *Agent) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) {
return agent.analytics.AnalyticsQuery(opts, cb)
}
// SearchQueryCallback is invoked upon completion of a SearchQuery operation.
type SearchQueryCallback func(*SearchRowReader, error)
// SearchQuery executes a Search query
func (agent *Agent) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) {
return agent.search.SearchQuery(opts, cb)
}
// ViewQueryCallback is invoked upon completion of a ViewQuery operation.
type ViewQueryCallback func(*ViewQueryRowReader, error)
// ViewQuery executes a view query
func (agent *Agent) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) {
return agent.views.ViewQuery(opts, cb)
}
// DoHTTPRequestCallback is invoked upon completion of a DoHTTPRequest operation.
type DoHTTPRequestCallback func(*HTTPResponse, error)
// DoHTTPRequest will perform an HTTP request against one of the HTTP
// services which are available within the SDK.
func (agent *Agent) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) {
return agent.http.DoHTTPRequest(req, cb)
}
// GetCollectionManifestCallback is invoked upon completion of a GetCollectionManifest operation.
type GetCollectionManifestCallback func(*GetCollectionManifestResult, error)
// GetCollectionManifest fetches the current server manifest. This function will not update the client's collection
// id cache.
func (agent *Agent) GetCollectionManifest(opts GetCollectionManifestOptions, cb GetCollectionManifestCallback) (PendingOp, error) {
return agent.collections.GetCollectionManifest(opts, cb)
}
// GetCollectionIDCallback is invoked upon completion of a GetCollectionID operation.
type GetCollectionIDCallback func(*GetCollectionIDResult, error)
// GetCollectionID fetches the collection id and manifest id that the collection belongs to, given a scope name
// and collection name. This function will also prime the client's collection id cache.
func (agent *Agent) GetCollectionID(scopeName string, collectionName string, opts GetCollectionIDOptions, cb GetCollectionIDCallback) (PendingOp, error) {
return agent.collections.GetCollectionID(scopeName, collectionName, opts, cb)
}
// PingCallback is invoked upon completion of a PingKv operation.
type PingCallback func(*PingResult, error)
// Ping pings all of the servers we are connected to and returns
// a report regarding the pings that were performed.
func (agent *Agent) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) {
return agent.diagnostics.Ping(opts, cb)
}
// Diagnostics returns diagnostics information about the client.
// Mainly containing a list of open connections and their current
// states.
func (agent *Agent) Diagnostics(opts DiagnosticsOptions) (*DiagnosticInfo, error) {
return agent.diagnostics.Diagnostics(opts)
}
// WaitUntilReadyCallback is invoked upon completion of a WaitUntilReady operation.
type WaitUntilReadyCallback func(*WaitUntilReadyResult, error)

250
vendor/github.com/couchbase/gocbcore/v9/agentgroup.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
package gocbcore
import (
"errors"
"sync"
"time"
)
// AgentGroup represents a collection of agents that can be used for performing operations
// against a cluster. It holds an internal special agent type which does not create its own
// memcached connections but registers itself for cluster config updates on all agents that
// are created through it.
type AgentGroup struct {
agentsLock sync.Mutex
boundAgents map[string]*Agent
// clusterAgent holds no memcached connections but can be used for cluster level (i.e. http) operations.
// It sets its own internal state by listening to cluster config updates on underlying agents.
clusterAgent *clusterAgent
config *AgentGroupConfig
}
// CreateAgentGroup will return a new AgentGroup with a base config of the config provided.
// Volatile: AgentGroup is subject to change or removal.
func CreateAgentGroup(config *AgentGroupConfig) (*AgentGroup, error) {
logInfof("SDK Version: gocbcore/%s", goCbCoreVersionStr)
logInfof("Creating new agent group: %+v", config)
c := config.toAgentConfig()
agent, err := CreateAgent(c)
if err != nil {
return nil, err
}
ag := &AgentGroup{
config: config,
boundAgents: make(map[string]*Agent),
}
ag.clusterAgent = createClusterAgent(&clusterAgentConfig{
HTTPAddrs: config.HTTPAddrs,
UserAgent: config.UserAgent,
UseTLS: config.UseTLS,
Auth: config.Auth,
TLSRootCAProvider: config.TLSRootCAProvider,
HTTPMaxIdleConns: config.HTTPMaxIdleConns,
HTTPMaxIdleConnsPerHost: config.HTTPMaxIdleConnsPerHost,
HTTPIdleConnectionTimeout: config.HTTPIdleConnectionTimeout,
Tracer: config.Tracer,
NoRootTraceSpans: config.NoRootTraceSpans,
DefaultRetryStrategy: config.DefaultRetryStrategy,
CircuitBreakerConfig: config.CircuitBreakerConfig,
})
ag.clusterAgent.RegisterWith(agent.cfgManager)
ag.boundAgents[config.BucketName] = agent
return ag, nil
}
// OpenBucket will attempt to open a new bucket against the cluster.
// If an agent using the specified bucket name already exists then this will not open a new connection.
func (ag *AgentGroup) OpenBucket(bucketName string) error {
if bucketName == "" {
return wrapError(errInvalidArgument, "bucket name cannot be empty")
}
existing := ag.GetAgent(bucketName)
if existing != nil {
return nil
}
config := ag.config.toAgentConfig()
config.BucketName = bucketName
agent, err := CreateAgent(config)
if err != nil {
return err
}
ag.clusterAgent.RegisterWith(agent.cfgManager)
ag.agentsLock.Lock()
ag.boundAgents[bucketName] = agent
ag.agentsLock.Unlock()
ag.maybeCloseGlobalAgent()
return nil
}
// GetAgent will return the agent, if any, corresponding to the bucket name specified.
func (ag *AgentGroup) GetAgent(bucketName string) *Agent {
if bucketName == "" {
// We don't allow access to the global level agent. We close that agent on OpenBucket so we don't want
// to return an agent that we then later close. Doing so would only lead to pain.
return nil
}
ag.agentsLock.Lock()
existingAgent := ag.boundAgents[bucketName]
ag.agentsLock.Unlock()
if existingAgent != nil {
return existingAgent
}
return nil
}
// Close will close all underlying agents.
func (ag *AgentGroup) Close() error {
var firstError error
ag.agentsLock.Lock()
for _, agent := range ag.boundAgents {
ag.clusterAgent.UnregisterWith(agent.cfgManager)
if err := agent.Close(); err != nil && firstError == nil {
firstError = err
}
}
ag.agentsLock.Unlock()
if err := ag.clusterAgent.Close(); err != nil && firstError == nil {
firstError = err
}
return firstError
}
// N1QLQuery executes a N1QL query against a random connected agent.
// If no agent is connected then this will block until one is available or the deadline is reached.
func (ag *AgentGroup) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) {
return ag.clusterAgent.N1QLQuery(opts, cb)
}
// PreparedN1QLQuery executes a prepared N1QL query against a random connected agent.
// If no agent is connected then this will block until one is available or the deadline is reached.
func (ag *AgentGroup) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) {
return ag.clusterAgent.PreparedN1QLQuery(opts, cb)
}
// AnalyticsQuery executes an analytics query against a random connected agent.
// If no agent is connected then this will block until one is available or the deadline is reached.
func (ag *AgentGroup) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) {
return ag.clusterAgent.AnalyticsQuery(opts, cb)
}
// SearchQuery executes a Search query against a random connected agent.
// If no agent is connected then this will block until one is available or the deadline is reached.
func (ag *AgentGroup) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) {
return ag.clusterAgent.SearchQuery(opts, cb)
}
// ViewQuery executes a view query against a random connected agent.
// If no agent is connected then this will block until one is available or the deadline is reached.
func (ag *AgentGroup) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) {
return ag.clusterAgent.ViewQuery(opts, cb)
}
// DoHTTPRequest will perform an HTTP request against one of the HTTP
// services which are available within the SDK, using a random connected agent.
// If no agent is connected then this will block until one is available or the deadline is reached.
func (ag *AgentGroup) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) {
return ag.clusterAgent.DoHTTPRequest(req, cb)
}
// WaitUntilReady returns whether or not the AgentGroup can ping the requested services.
func (ag *AgentGroup) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions,
cb WaitUntilReadyCallback) (PendingOp, error) {
return ag.clusterAgent.WaitUntilReady(deadline, opts, cb)
}
// Ping pings all of the servers we are connected to and returns
// a report regarding the pings that were performed.
func (ag *AgentGroup) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) {
return ag.clusterAgent.Ping(opts, cb)
}
// Diagnostics returns diagnostics information about the client.
// Mainly containing a list of open connections and their current
// states.
func (ag *AgentGroup) Diagnostics(opts DiagnosticsOptions) (*DiagnosticInfo, error) {
var agents []*Agent
ag.agentsLock.Lock()
// There's no point in trying to get diagnostics from clusterAgent as it has no kv connections.
// In fact it doesn't even expose a Diagnostics function.
for _, agent := range ag.boundAgents {
agents = append(agents, agent)
}
ag.agentsLock.Unlock()
if len(agents) == 0 {
return nil, errors.New("no agents available")
}
var firstError error
var diags []*DiagnosticInfo
for _, agent := range agents {
report, err := agent.diagnostics.Diagnostics(opts)
if err != nil && firstError == nil {
firstError = err
continue
}
diags = append(diags, report)
}
if len(diags) == 0 {
return nil, firstError
}
var overallReport DiagnosticInfo
var connected int
var expected int
for _, report := range diags {
expected++
overallReport.MemdConns = append(overallReport.MemdConns, report.MemdConns...)
if report.State == ClusterStateOnline {
connected++
}
if report.ConfigRev > overallReport.ConfigRev {
overallReport.ConfigRev = report.ConfigRev
}
}
if connected == expected {
overallReport.State = ClusterStateOnline
} else if connected > 0 {
overallReport.State = ClusterStateDegraded
} else {
overallReport.State = ClusterStateOffline
}
return &overallReport, nil
}
func (ag *AgentGroup) maybeCloseGlobalAgent() {
ag.agentsLock.Lock()
// Close and delete the global level agent that we created on Connect.
agent := ag.boundAgents[""]
if agent == nil {
ag.agentsLock.Unlock()
return
}
logDebugf("Shutting down global level agent")
delete(ag.boundAgents, "")
ag.agentsLock.Unlock()
ag.clusterAgent.UnregisterWith(agent.cfgManager)
if err := agent.Close(); err != nil {
logDebugf("Failed to close agent: %s", err)
}
}

View File

@ -0,0 +1,55 @@
package gocbcore
// AgentGroupConfig specifies the configuration options for creation of an AgentGroup.
type AgentGroupConfig struct {
AgentConfig
}
func (config *AgentGroupConfig) redacted() interface{} {
return config.AgentConfig.redacted()
}
// FromConnStr populates the AgentGroupConfig with information from a
// Couchbase Connection String. See AgentConfig for supported options.
func (config *AgentGroupConfig) FromConnStr(connStr string) error {
return config.AgentConfig.FromConnStr(connStr)
}
func (config *AgentGroupConfig) toAgentConfig() *AgentConfig {
return &AgentConfig{
MemdAddrs: config.MemdAddrs,
HTTPAddrs: config.HTTPAddrs,
BucketName: config.BucketName,
UserAgent: config.UserAgent,
UseTLS: config.UseTLS,
NetworkType: config.NetworkType,
Auth: config.Auth,
TLSRootCAProvider: config.TLSRootCAProvider,
UseMutationTokens: config.UseMutationTokens,
UseCompression: config.UseCompression,
UseDurations: config.UseDurations,
DisableDecompression: config.DisableDecompression,
UseOutOfOrderResponses: config.UseOutOfOrderResponses,
UseCollections: config.UseCollections,
CompressionMinSize: config.CompressionMinSize,
CompressionMinRatio: config.CompressionMinRatio,
HTTPRedialPeriod: config.HTTPRedialPeriod,
HTTPRetryDelay: config.HTTPRetryDelay,
CccpMaxWait: config.CccpMaxWait,
CccpPollPeriod: config.CccpPollPeriod,
ConnectTimeout: config.ConnectTimeout,
KVConnectTimeout: config.KVConnectTimeout,
KvPoolSize: config.KvPoolSize,
MaxQueueSize: config.MaxQueueSize,
HTTPMaxIdleConns: config.HTTPMaxIdleConns,
HTTPMaxIdleConnsPerHost: config.HTTPMaxIdleConnsPerHost,
HTTPIdleConnectionTimeout: config.HTTPIdleConnectionTimeout,
Tracer: config.Tracer,
NoRootTraceSpans: config.NoRootTraceSpans,
DefaultRetryStrategy: config.DefaultRetryStrategy,
CircuitBreakerConfig: config.CircuitBreakerConfig,
UseZombieLogger: config.UseZombieLogger,
ZombieLoggerInterval: config.ZombieLoggerInterval,
ZombieLoggerSampleSize: config.ZombieLoggerSampleSize,
}
}

View File

@ -0,0 +1,285 @@
package gocbcore
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"time"
)
// AnalyticsRowReader providers access to the rows of a analytics query
type AnalyticsRowReader struct {
streamer *queryStreamer
}
// NextRow reads the next rows bytes from the stream
func (q *AnalyticsRowReader) NextRow() []byte {
return q.streamer.NextRow()
}
// Err returns any errors that occurred during streaming.
func (q AnalyticsRowReader) Err() error {
return q.streamer.Err()
}
// MetaData fetches the non-row bytes streamed in the response.
func (q *AnalyticsRowReader) MetaData() ([]byte, error) {
return q.streamer.MetaData()
}
// Close immediately shuts down the connection
func (q *AnalyticsRowReader) Close() error {
return q.streamer.Close()
}
// AnalyticsQueryOptions represents the various options available for an analytics query.
type AnalyticsQueryOptions struct {
Payload []byte
Priority int
RetryStrategy RetryStrategy
Deadline time.Time
// Volatile: Tracer API is subject to change.
TraceContext RequestSpanContext
}
func wrapAnalyticsError(req *httpRequest, statement string, err error) *AnalyticsError {
if err == nil {
err = errors.New("analytics error")
}
ierr := &AnalyticsError{
InnerError: err,
}
if req != nil {
ierr.Endpoint = req.Endpoint
ierr.ClientContextID = req.UniqueID
ierr.RetryAttempts = req.RetryAttempts()
ierr.RetryReasons = req.RetryReasons()
}
ierr.Statement = statement
return ierr
}
type jsonAnalyticsError struct {
Code uint32 `json:"code"`
Msg string `json:"msg"`
}
type jsonAnalyticsErrorResponse struct {
Errors []jsonAnalyticsError
}
func parseAnalyticsError(req *httpRequest, statement string, resp *HTTPResponse) *AnalyticsError {
var err error
var errorDescs []AnalyticsErrorDesc
respBody, readErr := ioutil.ReadAll(resp.Body)
if readErr == nil {
var respParse jsonAnalyticsErrorResponse
parseErr := json.Unmarshal(respBody, &respParse)
if parseErr == nil {
for _, jsonErr := range respParse.Errors {
errorDescs = append(errorDescs, AnalyticsErrorDesc{
Code: jsonErr.Code,
Message: jsonErr.Msg,
})
}
}
}
if len(errorDescs) >= 1 {
firstErr := errorDescs[0]
errCode := firstErr.Code
errCodeGroup := errCode / 1000
if errCodeGroup == 25 {
err = errInternalServerFailure
}
if errCodeGroup == 20 {
err = errAuthenticationFailure
}
if errCodeGroup == 24 {
err = errCompilationFailure
}
if errCode == 23000 || errCode == 23003 {
err = errTemporaryFailure
}
if errCode == 24000 {
err = errParsingFailure
}
if errCode == 24047 {
err = errIndexNotFound
}
if errCode == 24048 {
err = errIndexExists
}
if errCode == 23007 {
err = errJobQueueFull
}
if errCode == 24025 || errCode == 24044 || errCode == 24045 {
err = errDatasetNotFound
}
if errCode == 24034 {
err = errDataverseNotFound
}
if errCode == 24040 {
err = errDatasetExists
}
if errCode == 24039 {
err = errDataverseExists
}
if errCode == 24006 {
err = errLinkNotFound
}
}
errOut := wrapAnalyticsError(req, statement, err)
errOut.Errors = errorDescs
return errOut
}
type analyticsQueryComponent struct {
httpComponent *httpComponent
tracer *tracerComponent
}
func newAnalyticsQueryComponent(httpComponent *httpComponent, tracer *tracerComponent) *analyticsQueryComponent {
return &analyticsQueryComponent{
httpComponent: httpComponent,
tracer: tracer,
}
}
// AnalyticsQuery executes an analytics query
func (aqc *analyticsQueryComponent) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) {
tracer := aqc.tracer.CreateOpTrace("AnalyticsQuery", opts.TraceContext)
defer tracer.Finish()
var payloadMap map[string]interface{}
err := json.Unmarshal(opts.Payload, &payloadMap)
if err != nil {
return nil, wrapAnalyticsError(nil, "", wrapError(err, "expected a JSON payload"))
}
statement := getMapValueString(payloadMap, "statement", "")
clientContextID := getMapValueString(payloadMap, "client_context_id", "")
readOnly := getMapValueBool(payloadMap, "readonly", false)
ctx, cancel := context.WithCancel(context.Background())
ireq := &httpRequest{
Service: CbasService,
Method: "POST",
Path: "/query/service",
Headers: map[string]string{
"Analytics-Priority": fmt.Sprintf("%d", opts.Priority),
},
Body: opts.Payload,
IsIdempotent: readOnly,
UniqueID: clientContextID,
Deadline: opts.Deadline,
RetryStrategy: opts.RetryStrategy,
RootTraceContext: tracer.RootContext(),
Context: ctx,
CancelFunc: cancel,
}
start := time.Now()
go func() {
ExecuteLoop:
for {
{ // Produce an updated payload with the appropriate timeout
timeoutLeft := time.Until(ireq.Deadline)
payloadMap["timeout"] = timeoutLeft.String()
newPayload, err := json.Marshal(payloadMap)
if err != nil {
cancel()
cb(nil, wrapAnalyticsError(nil, "", wrapError(err, "failed to produce payload")))
return
}
ireq.Body = newPayload
}
resp, err := aqc.httpComponent.DoInternalHTTPRequest(ireq, false)
if err != nil {
cancel()
// execHTTPRequest will handle retrying due to in-flight socket close based
// on whether or not IsIdempotent is set on the httpRequest
cb(nil, wrapAnalyticsError(ireq, statement, err))
return
}
if resp.StatusCode != 200 {
analyticsErr := parseAnalyticsError(ireq, statement, resp)
var retryReason RetryReason
if len(analyticsErr.Errors) >= 1 {
firstErrDesc := analyticsErr.Errors[0]
if firstErrDesc.Code == 23000 {
retryReason = AnalyticsTemporaryFailureRetryReason
} else if firstErrDesc.Code == 23003 {
retryReason = AnalyticsTemporaryFailureRetryReason
} else if firstErrDesc.Code == 23007 {
retryReason = AnalyticsTemporaryFailureRetryReason
}
}
if retryReason == nil {
cancel()
// analyticsErr is already wrapped here
cb(nil, analyticsErr)
return
}
shouldRetry, retryTime := retryOrchMaybeRetry(ireq, retryReason)
if !shouldRetry {
cancel()
// analyticsErr is already wrapped here
cb(nil, analyticsErr)
return
}
select {
case <-time.After(time.Until(retryTime)):
continue ExecuteLoop
case <-time.After(time.Until(ireq.Deadline)):
cancel()
err := &TimeoutError{
InnerError: errUnambiguousTimeout,
OperationID: "http",
Opaque: ireq.Identifier(),
TimeObserved: time.Since(start),
RetryReasons: ireq.retryReasons,
RetryAttempts: ireq.retryCount,
LastDispatchedTo: ireq.Endpoint,
}
cb(nil, wrapAnalyticsError(ireq, statement, err))
return
}
}
streamer, err := newQueryStreamer(resp.Body, "results")
if err != nil {
cancel()
cb(nil, wrapAnalyticsError(ireq, statement, err))
return
}
cb(&AnalyticsRowReader{
streamer: streamer,
}, nil)
return
}
}()
return ireq, nil
}

80
vendor/github.com/couchbase/gocbcore/v9/auth.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package gocbcore
import "crypto/tls"
// UserPassPair represents a username and password pair.
type UserPassPair struct {
Username string
Password string
}
// AuthCredsRequest represents an authentication details request from the agent.
type AuthCredsRequest struct {
Service ServiceType
Endpoint string
}
// AuthCertRequest represents a certificate details request from the agent.
type AuthCertRequest struct {
Service ServiceType
Endpoint string
}
// AuthProvider is an interface to allow the agent to fetch authentication
// credentials on-demand from the application.
type AuthProvider interface {
SupportsTLS() bool
SupportsNonTLS() bool
Certificate(req AuthCertRequest) (*tls.Certificate, error)
Credentials(req AuthCredsRequest) ([]UserPassPair, error)
}
func getSingleAuthCreds(auth AuthProvider, req AuthCredsRequest) (UserPassPair, error) {
creds, err := auth.Credentials(req)
if err != nil {
return UserPassPair{}, err
}
if len(creds) != 1 {
return UserPassPair{}, errInvalidCredentials
}
return creds[0], nil
}
func getKvAuthCreds(auth AuthProvider, endpoint string) (UserPassPair, error) {
return getSingleAuthCreds(auth, AuthCredsRequest{
Service: MemdService,
Endpoint: endpoint,
})
}
// PasswordAuthProvider provides a standard AuthProvider implementation
// for use with a standard username/password pair (for example, RBAC).
type PasswordAuthProvider struct {
Username string
Password string
}
// SupportsNonTLS specifies whether this authenticator supports non-TLS connections.
func (auth PasswordAuthProvider) SupportsNonTLS() bool {
return true
}
// SupportsTLS specifies whether this authenticator supports TLS connections.
func (auth PasswordAuthProvider) SupportsTLS() bool {
return true
}
// Certificate directly returns a certificate chain to present for the connection.
func (auth PasswordAuthProvider) Certificate(req AuthCertRequest) (*tls.Certificate, error) {
return nil, nil
}
// Credentials directly returns the username/password from the provider.
func (auth PasswordAuthProvider) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: auth.Username,
Password: auth.Password,
}}, nil
}

142
vendor/github.com/couchbase/gocbcore/v9/authclient.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
package gocbcore
import (
"crypto/sha1" // nolint: gosec
"crypto/sha256"
"crypto/sha512"
"hash"
"time"
"github.com/couchbase/gocbcore/v9/memd"
scram "github.com/couchbase/gocbcore/v9/scram"
)
// AuthMechanism represents a type of auth that can be performed.
type AuthMechanism string
const (
// PlainAuthMechanism represents that PLAIN auth should be performed.
PlainAuthMechanism = AuthMechanism("PLAIN")
// ScramSha1AuthMechanism represents that SCRAM SHA1 auth should be performed.
ScramSha1AuthMechanism = AuthMechanism("SCRAM_SHA1")
// ScramSha256AuthMechanism represents that SCRAM SHA256 auth should be performed.
ScramSha256AuthMechanism = AuthMechanism("SCRAM_SHA256")
// ScramSha512AuthMechanism represents that SCRAM SHA512 auth should be performed.
ScramSha512AuthMechanism = AuthMechanism("SCRAM_SHA512")
)
// AuthClient exposes an interface for performing authentication on a
// connected Couchbase K/V client.
type AuthClient interface {
Address() string
SupportsFeature(feature memd.HelloFeature) bool
SaslListMechs(deadline time.Time, cb func(mechs []AuthMechanism, err error)) error
SaslAuth(k, v []byte, deadline time.Time, cb func(b []byte, err error)) error
SaslStep(k, v []byte, deadline time.Time, cb func(err error)) error
}
// SaslListMechsCompleted is used to contain the result and/or error from a SaslListMechs operation.
type SaslListMechsCompleted struct {
Err error
Mechs []AuthMechanism
}
// SaslAuthPlain performs PLAIN SASL authentication against an AuthClient.
func SaslAuthPlain(username, password string, client AuthClient, deadline time.Time, cb func(err error)) error {
// Build PLAIN auth data
userBuf := []byte(username)
passBuf := []byte(password)
authData := make([]byte, 1+len(userBuf)+1+len(passBuf))
authData[0] = 0
copy(authData[1:], userBuf)
authData[1+len(userBuf)] = 0
copy(authData[1+len(userBuf)+1:], passBuf)
// Execute PLAIN authentication
err := client.SaslAuth([]byte(PlainAuthMechanism), authData, deadline, func(b []byte, err error) {
if err != nil {
cb(err)
return
}
cb(nil)
})
if err != nil {
return err
}
return nil
}
func saslAuthScram(saslName []byte, newHash func() hash.Hash, username, password string, client AuthClient,
deadline time.Time, continueCb func(), completedCb func(err error)) error {
scramMgr := scram.NewClient(newHash, username, password)
// Perform the initial SASL step
scramMgr.Step(nil)
err := client.SaslAuth(saslName, scramMgr.Out(), deadline, func(b []byte, err error) {
if err != nil && !isErrorStatus(err, memd.StatusAuthContinue) {
completedCb(err)
return
}
if !scramMgr.Step(b) {
err = scramMgr.Err()
if err != nil {
completedCb(err)
return
}
logErrorf("Local auth client finished before server accepted auth")
completedCb(nil)
return
}
err = client.SaslStep(saslName, scramMgr.Out(), deadline, completedCb)
if err != nil {
completedCb(err)
return
}
continueCb()
})
if err != nil {
return err
}
return nil
}
// SaslAuthScramSha1 performs SCRAM-SHA1 SASL authentication against an AuthClient.
func SaslAuthScramSha1(username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error {
return saslAuthScram([]byte("SCRAM-SHA1"), sha1.New, username, password, client, deadline, continueCb, completedCb)
}
// SaslAuthScramSha256 performs SCRAM-SHA256 SASL authentication against an AuthClient.
func SaslAuthScramSha256(username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error {
return saslAuthScram([]byte("SCRAM-SHA256"), sha256.New, username, password, client, deadline, continueCb, completedCb)
}
// SaslAuthScramSha512 performs SCRAM-SHA512 SASL authentication against an AuthClient.
func SaslAuthScramSha512(username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error {
return saslAuthScram([]byte("SCRAM-SHA512"), sha512.New, username, password, client, deadline, continueCb, completedCb)
}
func saslMethod(method AuthMechanism, username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error {
switch method {
case PlainAuthMechanism:
return SaslAuthPlain(username, password, client, deadline, completedCb)
case ScramSha1AuthMechanism:
return SaslAuthScramSha1(username, password, client, deadline, continueCb, completedCb)
case ScramSha256AuthMechanism:
return SaslAuthScramSha256(username, password, client, deadline, continueCb, completedCb)
case ScramSha512AuthMechanism:
return SaslAuthScramSha512(username, password, client, deadline, continueCb, completedCb)
default:
return errNoSupportedMechanisms
}
}

75
vendor/github.com/couchbase/gocbcore/v9/cbcrc.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
package gocbcore
var crc32tab = []uint32{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}
func cbCrc(key []byte) uint32 {
crc := uint32(0xffffffff)
for x := 0; x < len(key); x++ {
crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff]
}
return (^crc) >> 16
}

View File

@ -0,0 +1,214 @@
package gocbcore
import (
"math/rand"
"sync"
"time"
"github.com/couchbase/gocbcore/v9/memd"
)
type cccpConfigController struct {
muxer *kvMux
cfgMgr *configManagementComponent
confCccpPollPeriod time.Duration
confCccpMaxWait time.Duration
// Used exclusively for testing to overcome GOCBC-780. It allows a test to pause the cccp looper preventing
// unwanted requests from being sent to the mock once it has been setup for error map testing.
looperPauseSig chan bool
looperStopSig chan struct{}
looperDoneSig chan struct{}
fetchErr error
errLock sync.Mutex
}
func newCCCPConfigController(props cccpPollerProperties, muxer *kvMux, cfgMgr *configManagementComponent) *cccpConfigController {
return &cccpConfigController{
muxer: muxer,
cfgMgr: cfgMgr,
confCccpPollPeriod: props.confCccpPollPeriod,
confCccpMaxWait: props.confCccpMaxWait,
looperPauseSig: make(chan bool),
looperStopSig: make(chan struct{}),
looperDoneSig: make(chan struct{}),
}
}
type cccpPollerProperties struct {
confCccpPollPeriod time.Duration
confCccpMaxWait time.Duration
}
func (ccc *cccpConfigController) Error() error {
ccc.errLock.Lock()
defer ccc.errLock.Unlock()
return ccc.fetchErr
}
func (ccc *cccpConfigController) setError(err error) {
ccc.errLock.Lock()
ccc.fetchErr = err
ccc.errLock.Unlock()
}
func (ccc *cccpConfigController) Pause(paused bool) {
ccc.looperPauseSig <- paused
}
func (ccc *cccpConfigController) Stop() {
close(ccc.looperStopSig)
}
func (ccc *cccpConfigController) Done() chan struct{} {
return ccc.looperDoneSig
}
func (ccc *cccpConfigController) Reset() {
ccc.looperStopSig = make(chan struct{})
ccc.looperDoneSig = make(chan struct{})
}
func (ccc *cccpConfigController) DoLoop() error {
tickTime := ccc.confCccpPollPeriod
paused := false
logDebugf("CCCP Looper starting.")
nodeIdx := -1
// The first time that we loop we want to skip any sleep so that we can try get a config and bootstrapped ASAP.
firstLoop := true
Looper:
for {
if !firstLoop {
// Wait for either the agent to be shut down, or our tick time to expire
select {
case <-ccc.looperStopSig:
break Looper
case pause := <-ccc.looperPauseSig:
paused = pause
case <-time.After(tickTime):
}
}
firstLoop = false
if paused {
continue
}
iter, err := ccc.muxer.PipelineSnapshot()
if err != nil {
// If we have an error it indicates the client is shut down.
break
}
numNodes := iter.NumPipelines()
if numNodes == 0 {
logDebugf("CCCPPOLL: No nodes available to poll, return upstream")
return errNoCCCPHosts
}
if nodeIdx < 0 || nodeIdx > numNodes {
nodeIdx = rand.Intn(numNodes)
}
var foundConfig *cfgBucket
var foundErr error
iter.Iterate(nodeIdx, func(pipeline *memdPipeline) bool {
nodeIdx = (nodeIdx + 1) % numNodes
cccpBytes, err := ccc.getClusterConfig(pipeline)
if err != nil {
logDebugf("CCCPPOLL: Failed to retrieve CCCP config. %v", err)
if isPollingFallbackError(err) {
// This error is indicative of a memcached bucket which we can't handle so return the error.
logDebugf("CCCPPOLL: CCCP not supported, returning error upstream.")
foundErr = err
return true
}
ccc.setError(err)
return false
}
ccc.setError(nil)
logDebugf("CCCPPOLL: Got Block: %v", string(cccpBytes))
hostName, err := hostFromHostPort(pipeline.Address())
if err != nil {
logErrorf("CCCPPOLL: Failed to parse source address. %v", err)
return false
}
bk, err := parseConfig(cccpBytes, hostName)
if err != nil {
logDebugf("CCCPPOLL: Failed to parse CCCP config. %v", err)
return false
}
foundConfig = bk
return true
})
if foundErr != nil {
return foundErr
}
if foundConfig == nil {
logDebugf("CCCPPOLL: Failed to retrieve config from any node.")
continue
}
logDebugf("CCCPPOLL: Received new config")
ccc.cfgMgr.OnNewConfig(foundConfig)
}
close(ccc.looperDoneSig)
return nil
}
func (ccc *cccpConfigController) getClusterConfig(pipeline *memdPipeline) (cfgOut []byte, errOut error) {
signal := make(chan struct{}, 1)
req := &memdQRequest{
Packet: memd.Packet{
Magic: memd.CmdMagicReq,
Command: memd.CmdGetClusterConfig,
},
Callback: func(resp *memdQResponse, _ *memdQRequest, err error) {
if resp != nil {
cfgOut = resp.Packet.Value
}
errOut = err
signal <- struct{}{}
},
RetryStrategy: newFailFastRetryStrategy(),
}
err := pipeline.SendRequest(req)
if err != nil {
return nil, err
}
timeoutTmr := AcquireTimer(ccc.confCccpMaxWait)
select {
case <-signal:
ReleaseTimer(timeoutTmr, false)
return
case <-timeoutTmr.C:
ReleaseTimer(timeoutTmr, true)
// We've timed out so lets check underlying connections to see if they're responsible.
clients := pipeline.Clients()
for _, cli := range clients {
err := cli.Error()
if err != nil {
req.cancelWithCallback(err)
<-signal
return
}
}
req.cancelWithCallback(errAmbiguousTimeout)
<-signal
return
}
}

View File

@ -0,0 +1,208 @@
package gocbcore
import (
"errors"
"sync/atomic"
"time"
)
const (
circuitBreakerStateDisabled uint32 = iota
circuitBreakerStateClosed
circuitBreakerStateHalfOpen
circuitBreakerStateOpen
)
type circuitBreaker interface {
AllowsRequest() bool
MarkSuccessful()
MarkFailure()
State() uint32
Reset()
CanaryTimeout() time.Duration
CompletionCallback(error) bool
}
// CircuitBreakerCallback is the callback used by the circuit breaker to determine if an error should count toward
// the circuit breaker failure count.
type CircuitBreakerCallback func(error) bool
// CircuitBreakerConfig is the set of configuration settings for configuring circuit breakers.
// If Disabled is set to true then a noop circuit breaker will be used, otherwise a lazy circuit
// breaker.
type CircuitBreakerConfig struct {
Enabled bool
VolumeThreshold int64
ErrorThresholdPercentage float64
SleepWindow time.Duration
RollingWindow time.Duration
CompletionCallback CircuitBreakerCallback
CanaryTimeout time.Duration
}
type noopCircuitBreaker struct {
}
func newNoopCircuitBreaker() *noopCircuitBreaker {
return &noopCircuitBreaker{}
}
func (ncb *noopCircuitBreaker) AllowsRequest() bool {
return true
}
func (ncb *noopCircuitBreaker) MarkSuccessful() {
}
func (ncb *noopCircuitBreaker) MarkFailure() {
}
func (ncb *noopCircuitBreaker) State() uint32 {
return circuitBreakerStateDisabled
}
func (ncb *noopCircuitBreaker) Reset() {
}
func (ncb *noopCircuitBreaker) CompletionCallback(error) bool {
return true
}
func (ncb *noopCircuitBreaker) CanaryTimeout() time.Duration {
return 0
}
type lazyCircuitBreaker struct {
state uint32
windowStart int64
sleepWindow int64
rollingWindow int64
volumeThreshold int64
errorPercentageThreshold float64
canaryTimeout time.Duration
total int64
failed int64
openedAt int64
sendCanaryFn func()
completionCallback CircuitBreakerCallback
}
func newLazyCircuitBreaker(config CircuitBreakerConfig, canaryFn func()) *lazyCircuitBreaker {
if config.VolumeThreshold == 0 {
config.VolumeThreshold = 20
}
if config.ErrorThresholdPercentage == 0 {
config.ErrorThresholdPercentage = 50
}
if config.SleepWindow == 0 {
config.SleepWindow = 5 * time.Second
}
if config.RollingWindow == 0 {
config.RollingWindow = 1 * time.Minute
}
if config.CanaryTimeout == 0 {
config.CanaryTimeout = 5 * time.Second
}
if config.CompletionCallback == nil {
config.CompletionCallback = func(err error) bool {
return !errors.Is(err, ErrTimeout)
}
}
breaker := &lazyCircuitBreaker{
sleepWindow: int64(config.SleepWindow * time.Nanosecond),
rollingWindow: int64(config.RollingWindow * time.Nanosecond),
volumeThreshold: config.VolumeThreshold,
errorPercentageThreshold: config.ErrorThresholdPercentage,
canaryTimeout: config.CanaryTimeout,
sendCanaryFn: canaryFn,
completionCallback: config.CompletionCallback,
}
breaker.Reset()
return breaker
}
func (lcb *lazyCircuitBreaker) Reset() {
now := time.Now().UnixNano()
atomic.StoreUint32(&lcb.state, circuitBreakerStateClosed)
atomic.StoreInt64(&lcb.total, 0)
atomic.StoreInt64(&lcb.failed, 0)
atomic.StoreInt64(&lcb.openedAt, 0)
atomic.StoreInt64(&lcb.windowStart, now)
}
func (lcb *lazyCircuitBreaker) State() uint32 {
return atomic.LoadUint32(&lcb.state)
}
func (lcb *lazyCircuitBreaker) AllowsRequest() bool {
state := lcb.State()
if state == circuitBreakerStateClosed {
return true
}
elapsed := (time.Now().UnixNano() - atomic.LoadInt64(&lcb.openedAt)) > lcb.sleepWindow
if elapsed && atomic.CompareAndSwapUint32(&lcb.state, circuitBreakerStateOpen, circuitBreakerStateHalfOpen) {
// If we're outside of the sleep window and the circuit is open then send a canary.
go lcb.sendCanaryFn()
}
return false
}
func (lcb *lazyCircuitBreaker) MarkSuccessful() {
if atomic.CompareAndSwapUint32(&lcb.state, circuitBreakerStateHalfOpen, circuitBreakerStateClosed) {
logDebugf("Moving circuit breaker to closed")
lcb.Reset()
return
}
lcb.maybeResetRollingWindow()
atomic.AddInt64(&lcb.total, 1)
}
func (lcb *lazyCircuitBreaker) MarkFailure() {
now := time.Now().UnixNano()
if atomic.CompareAndSwapUint32(&lcb.state, circuitBreakerStateHalfOpen, circuitBreakerStateOpen) {
logDebugf("Moving circuit breaker from half open to open")
atomic.StoreInt64(&lcb.openedAt, now)
return
}
lcb.maybeResetRollingWindow()
atomic.AddInt64(&lcb.total, 1)
atomic.AddInt64(&lcb.failed, 1)
lcb.maybeOpenCircuit()
}
func (lcb *lazyCircuitBreaker) CanaryTimeout() time.Duration {
return lcb.canaryTimeout
}
func (lcb *lazyCircuitBreaker) CompletionCallback(err error) bool {
return lcb.completionCallback(err)
}
func (lcb *lazyCircuitBreaker) maybeOpenCircuit() {
if atomic.LoadInt64(&lcb.total) < lcb.volumeThreshold {
return
}
currentPercentage := (float64(atomic.LoadInt64(&lcb.failed)) / float64(atomic.LoadInt64(&lcb.total))) * 100
if currentPercentage >= lcb.errorPercentageThreshold {
logDebugf("Moving circuit breaker to open")
atomic.StoreUint32(&lcb.state, circuitBreakerStateOpen)
atomic.StoreInt64(&lcb.openedAt, time.Now().UnixNano())
}
}
func (lcb *lazyCircuitBreaker) maybeResetRollingWindow() {
now := time.Now().UnixNano()
if (now - atomic.LoadInt64(&lcb.windowStart)) <= lcb.rollingWindow {
return
}
atomic.StoreInt64(&lcb.windowStart, now)
atomic.StoreInt64(&lcb.total, 0)
atomic.StoreInt64(&lcb.failed, 0)
}

223
vendor/github.com/couchbase/gocbcore/v9/clusteragent.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package gocbcore
import (
"fmt"
"sync"
"time"
)
type clusterAgent struct {
tlsConfig *dynTLSConfig
defaultRetryStrategy RetryStrategy
httpMux *httpMux
tracer *tracerComponent
http *httpComponent
diagnostics *diagnosticsComponent
n1ql *n1qlQueryComponent
analytics *analyticsQueryComponent
search *searchQueryComponent
views *viewQueryComponent
revLock sync.Mutex
revID int64
configWatchLock sync.Mutex
configWatchers []routeConfigWatcher
}
func createClusterAgent(config *clusterAgentConfig) *clusterAgent {
var tlsConfig *dynTLSConfig
if config.UseTLS {
tlsConfig = createTLSConfig(config.Auth, config.TLSRootCAProvider)
}
httpCli := createHTTPClient(config.HTTPMaxIdleConns, config.HTTPMaxIdleConnsPerHost,
config.HTTPIdleConnectionTimeout, tlsConfig)
tracer := config.Tracer
if tracer == nil {
tracer = noopTracer{}
}
tracerCmpt := newTracerComponent(tracer, "", config.NoRootTraceSpans)
c := &clusterAgent{
tlsConfig: tlsConfig,
tracer: tracerCmpt,
defaultRetryStrategy: config.DefaultRetryStrategy,
}
if c.defaultRetryStrategy == nil {
c.defaultRetryStrategy = newFailFastRetryStrategy()
}
circuitBreakerConfig := config.CircuitBreakerConfig
auth := config.Auth
userAgent := config.UserAgent
var httpEpList []string
for _, hostPort := range config.HTTPAddrs {
if c.tlsConfig == nil {
httpEpList = append(httpEpList, fmt.Sprintf("http://%s", hostPort))
} else {
httpEpList = append(httpEpList, fmt.Sprintf("https://%s", hostPort))
}
}
c.httpMux = newHTTPMux(circuitBreakerConfig, c)
c.http = newHTTPComponent(
httpComponentProps{
UserAgent: userAgent,
DefaultRetryStrategy: c.defaultRetryStrategy,
},
httpCli,
c.httpMux,
auth,
c.tracer,
)
c.n1ql = newN1QLQueryComponent(c.http, c, c.tracer)
c.analytics = newAnalyticsQueryComponent(c.http, c.tracer)
c.search = newSearchQueryComponent(c.http, c.tracer)
c.views = newViewQueryComponent(c.http, c.tracer)
// diagnostics at this level will never need to hook KV. There are no persistent connections
// so Diagnostics calls should be blocked. Ping and WaitUntilReady will only try HTTP services.
c.diagnostics = newDiagnosticsComponent(nil, c.httpMux, c.http, "", c.defaultRetryStrategy, nil)
// Kick everything off.
cfg := &routeConfig{
mgmtEpList: httpEpList,
revID: -1,
}
c.httpMux.OnNewRouteConfig(cfg)
return c
}
func (agent *clusterAgent) RegisterWith(cfgMgr configManager) {
cfgMgr.AddConfigWatcher(agent)
}
func (agent *clusterAgent) UnregisterWith(cfgMgr configManager) {
cfgMgr.RemoveConfigWatcher(agent)
}
func (agent *clusterAgent) AddConfigWatcher(watcher routeConfigWatcher) {
agent.configWatchLock.Lock()
agent.configWatchers = append(agent.configWatchers, watcher)
agent.configWatchLock.Unlock()
}
func (agent *clusterAgent) RemoveConfigWatcher(watcher routeConfigWatcher) {
var idx int
agent.configWatchLock.Lock()
for i, w := range agent.configWatchers {
if w == watcher {
idx = i
}
}
if idx == len(agent.configWatchers) {
agent.configWatchers = agent.configWatchers[:idx]
} else {
agent.configWatchers = append(agent.configWatchers[:idx], agent.configWatchers[idx+1:]...)
}
agent.configWatchLock.Unlock()
}
func (agent *clusterAgent) OnNewRouteConfig(cfg *routeConfig) {
agent.revLock.Lock()
// This could be coming from multiple agents so we need to make sure that it's up to date with what we've seen.
if cfg.revID <= agent.revID {
agent.revLock.Unlock()
return
}
logDebugf("Cluster agent applying config rev id: %d\n", cfg.revID)
agent.revID = cfg.revID
agent.revLock.Unlock()
agent.configWatchLock.Lock()
watchers := agent.configWatchers
agent.configWatchLock.Unlock()
for _, watcher := range watchers {
watcher.OnNewRouteConfig(cfg)
}
}
// N1QLQuery executes a N1QL query against a random connected agent.
func (agent *clusterAgent) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) {
return agent.n1ql.N1QLQuery(opts, cb)
}
// PreparedN1QLQuery executes a prepared N1QL query against a random connected agent.
func (agent *clusterAgent) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) {
return agent.n1ql.PreparedN1QLQuery(opts, cb)
}
// AnalyticsQuery executes an analytics query against a random connected agent.
func (agent *clusterAgent) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) {
return agent.analytics.AnalyticsQuery(opts, cb)
}
// SearchQuery executes a Search query against a random connected agent.
func (agent *clusterAgent) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) {
return agent.search.SearchQuery(opts, cb)
}
// ViewQuery executes a view query against a random connected agent.
func (agent *clusterAgent) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) {
return agent.views.ViewQuery(opts, cb)
}
// DoHTTPRequest will perform an HTTP request against one of the HTTP
// services which are available within the SDK, using a random connected agent.
func (agent *clusterAgent) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) {
return agent.http.DoHTTPRequest(req, cb)
}
// Ping pings all of the servers we are connected to and returns
// a report regarding the pings that were performed.
func (agent *clusterAgent) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) {
for _, srv := range opts.ServiceTypes {
if srv == MemdService {
return nil, wrapError(errInvalidArgument, "memd service is not valid for use with clusterAgent")
} else if srv == CapiService {
return nil, wrapError(errInvalidArgument, "capi service is not valid for use with clusterAgent")
}
}
if len(opts.ServiceTypes) == 0 {
opts.ServiceTypes = []ServiceType{CbasService, FtsService, N1qlService, MgmtService}
opts.ignoreMissingServices = true
}
return agent.diagnostics.Ping(opts, cb)
}
// WaitUntilReady returns whether or not the Agent has seen a valid cluster config.
func (agent *clusterAgent) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, cb WaitUntilReadyCallback) (PendingOp, error) {
for _, srv := range opts.ServiceTypes {
if srv == MemdService {
return nil, wrapError(errInvalidArgument, "memd service is not valid for use with clusterAgent")
} else if srv == CapiService {
return nil, wrapError(errInvalidArgument, "capi service is not valid for use with clusterAgent")
}
}
if len(opts.ServiceTypes) == 0 {
opts.ServiceTypes = []ServiceType{CbasService, FtsService, N1qlService, MgmtService}
}
return agent.diagnostics.WaitUntilReady(deadline, opts, cb)
}
// Close shuts down the agent, closing the underlying http client. This does not cause the agent
// to unregister itself with any configuration providers so be sure to do that first.
func (agent *clusterAgent) Close() error {
// Close the transports so that they don't hold open goroutines.
agent.http.Close()
return nil
}

View File

@ -0,0 +1,41 @@
package gocbcore
import (
"crypto/x509"
"time"
)
type clusterAgentConfig struct {
HTTPAddrs []string
UserAgent string
UseTLS bool
Auth AuthProvider
TLSRootCAProvider func() *x509.CertPool
HTTPMaxIdleConns int
HTTPMaxIdleConnsPerHost int
HTTPIdleConnectionTimeout time.Duration
// Volatile: Tracer API is subject to change.
Tracer RequestTracer
NoRootTraceSpans bool
DefaultRetryStrategy RetryStrategy
CircuitBreakerConfig CircuitBreakerConfig
}
func (config *clusterAgentConfig) redacted() interface{} {
newConfig := clusterAgentConfig{}
newConfig = *config
if isLogRedactionLevelFull() {
// The slices here are still pointing at config's underlying arrays
// so we need to make them not do that.
newConfig.HTTPAddrs = append([]string(nil), newConfig.HTTPAddrs...)
for i, addr := range newConfig.HTTPAddrs {
newConfig.HTTPAddrs[i] = redactSystemData(addr)
}
}
return newConfig
}

117
vendor/github.com/couchbase/gocbcore/v9/collections.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
package gocbcore
import (
"encoding/json"
"strconv"
)
const (
unknownCid = uint32(0xFFFFFFFF)
pendingCid = uint32(0xFFFFFFFE)
)
// ManifestCollection is the representation of a collection within a manifest.
type ManifestCollection struct {
UID uint32
Name string
}
// UnmarshalJSON is a custom implementation of json unmarshaling.
func (item *ManifestCollection) UnmarshalJSON(data []byte) error {
decData := struct {
UID string `json:"uid"`
Name string `json:"name"`
}{}
if err := json.Unmarshal(data, &decData); err != nil {
return err
}
decUID, err := strconv.ParseUint(decData.UID, 16, 32)
if err != nil {
return err
}
item.UID = uint32(decUID)
item.Name = decData.Name
return nil
}
// ManifestScope is the representation of a scope within a manifest.
type ManifestScope struct {
UID uint32
Name string
Collections []ManifestCollection
}
// UnmarshalJSON is a custom implementation of json unmarshaling.
func (item *ManifestScope) UnmarshalJSON(data []byte) error {
decData := struct {
UID string `json:"uid"`
Name string `json:"name"`
Collections []ManifestCollection `json:"collections"`
}{}
if err := json.Unmarshal(data, &decData); err != nil {
return err
}
decUID, err := strconv.ParseUint(decData.UID, 16, 32)
if err != nil {
return err
}
item.UID = uint32(decUID)
item.Name = decData.Name
item.Collections = decData.Collections
return nil
}
// Manifest is the representation of a collections manifest.
type Manifest struct {
UID uint64
Scopes []ManifestScope
}
// UnmarshalJSON is a custom implementation of json unmarshaling.
func (item *Manifest) UnmarshalJSON(data []byte) error {
decData := struct {
UID string `json:"uid"`
Scopes []ManifestScope `json:"scopes"`
}{}
if err := json.Unmarshal(data, &decData); err != nil {
return err
}
decUID, err := strconv.ParseUint(decData.UID, 16, 64)
if err != nil {
return err
}
item.UID = decUID
item.Scopes = decData.Scopes
return nil
}
// GetCollectionManifestOptions are the options available to the GetCollectionManifest command.
type GetCollectionManifestOptions struct {
// Volatile: Tracer API is subject to change.
TraceContext RequestSpanContext
RetryStrategy RetryStrategy
}
// GetCollectionIDOptions are the options available to the GetCollectionID command.
type GetCollectionIDOptions struct {
RetryStrategy RetryStrategy
// Volatile: Tracer API is subject to change.
TraceContext RequestSpanContext
}
// GetCollectionIDResult encapsulates the result of a GetCollectionID operation.
type GetCollectionIDResult struct {
ManifestID uint64
CollectionID uint32
}
// GetCollectionManifestResult encapsulates the result of a GetCollectionManifest operation.
type GetCollectionManifestResult struct {
Manifest []byte
}

View File

@ -0,0 +1,443 @@
package gocbcore
import (
"encoding/binary"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/couchbase/gocbcore/v9/memd"
)
func (cidMgr *collectionsComponent) createKey(scopeName, collectionName string) string {
return fmt.Sprintf("%s.%s", scopeName, collectionName)
}
type collectionsComponent struct {
idMap map[string]*collectionIDCache
mapLock sync.Mutex
dispatcher dispatcher
maxQueueSize int
tracer tracerManager
defaultRetryStrategy RetryStrategy
cfgMgr configManager
// pendingOpQueue is used when collections are enabled but we've not yet seen a cluster config to confirm
// whether or not collections are supported.
pendingOpQueue *memdOpQueue
configSeen uint32
}
type collectionIDProps struct {
MaxQueueSize int
DefaultRetryStrategy RetryStrategy
}
func newCollectionIDManager(props collectionIDProps, dispatcher dispatcher, tracer tracerManager,
cfgMgr configManager) *collectionsComponent {
cidMgr := &collectionsComponent{
dispatcher: dispatcher,
idMap: make(map[string]*collectionIDCache),
maxQueueSize: props.MaxQueueSize,
tracer: tracer,
defaultRetryStrategy: props.DefaultRetryStrategy,
cfgMgr: cfgMgr,
pendingOpQueue: newMemdOpQueue(),
}
cfgMgr.AddConfigWatcher(cidMgr)
dispatcher.SetPostCompleteErrorHandler(cidMgr.handleOpRoutingResp)
return cidMgr
}
func (cidMgr *collectionsComponent) OnNewRouteConfig(cfg *routeConfig) {
if !atomic.CompareAndSwapUint32(&cidMgr.configSeen, 0, 1) {
return
}
colsSupported := cfg.ContainsBucketCapability("collections")
cidMgr.cfgMgr.RemoveConfigWatcher(cidMgr)
cidMgr.pendingOpQueue.Close()
cidMgr.pendingOpQueue.Drain(func(request *memdQRequest) {
// Anything in this queue is here because collections were present so if we definitely don't support collections
// then fail them.
if !colsSupported {
request.tryCallback(nil, errCollectionsUnsupported)
return
}
cidMgr.requeue(request)
})
}
func (cidMgr *collectionsComponent) handleCollectionUnknown(req *memdQRequest) bool {
// We cannot retry requests with no collection information.
// This also prevents the GetCollectionID requests from being automatically retried.
if req.CollectionName == "" && req.ScopeName == "" {
return false
}
shouldRetry, retryTime := retryOrchMaybeRetry(req, KVCollectionOutdatedRetryReason)
if shouldRetry {
go func() {
time.Sleep(time.Until(retryTime))
cidMgr.requeue(req)
}()
}
return shouldRetry
}
func (cidMgr *collectionsComponent) handleOpRoutingResp(resp *memdQResponse, req *memdQRequest, err error) (bool, error) {
if errors.Is(err, ErrCollectionNotFound) {
if cidMgr.handleCollectionUnknown(req) {
return true, nil
}
}
return false, err
}
func (cidMgr *collectionsComponent) GetCollectionManifest(opts GetCollectionManifestOptions, cb GetCollectionManifestCallback) (PendingOp, error) {
tracer := cidMgr.tracer.CreateOpTrace("GetCollectionManifest", opts.TraceContext)
handler := func(resp *memdQResponse, req *memdQRequest, err error) {
if err != nil {
cb(nil, err)
tracer.Finish()
return
}
res := GetCollectionManifestResult{
Manifest: resp.Value,
}
tracer.Finish()
cb(&res, nil)
}
if opts.RetryStrategy == nil {
opts.RetryStrategy = cidMgr.defaultRetryStrategy
}
req := &memdQRequest{
Packet: memd.Packet{
Magic: memd.CmdMagicReq,
Command: memd.CmdCollectionsGetManifest,
Datatype: 0,
Cas: 0,
Extras: nil,
Key: nil,
Value: nil,
},
Callback: handler,
RetryStrategy: opts.RetryStrategy,
RootTraceContext: opts.TraceContext,
}
return cidMgr.dispatcher.DispatchDirect(req)
}
// GetCollectionID does not trigger retries on unknown collection. This is because the request sets the scope and collection
// name in the key rather than in the corresponding fields.
func (cidMgr *collectionsComponent) GetCollectionID(scopeName string, collectionName string, opts GetCollectionIDOptions,
cb GetCollectionIDCallback) (PendingOp, error) {
tracer := cidMgr.tracer.CreateOpTrace("GetCollectionID", opts.TraceContext)
handler := func(resp *memdQResponse, req *memdQRequest, err error) {
cidCache, ok := cidMgr.get(scopeName, collectionName)
if !ok {
cidCache = cidMgr.newCollectionIDCache(scopeName, collectionName)
cidMgr.add(cidCache, scopeName, collectionName)
}
if err != nil {
tracer.Finish()
cb(nil, err)
return
}
manifestID := binary.BigEndian.Uint64(resp.Extras[0:])
collectionID := binary.BigEndian.Uint32(resp.Extras[8:])
cidCache.lock.Lock()
cidCache.setID(collectionID)
cidCache.lock.Unlock()
res := GetCollectionIDResult{
ManifestID: manifestID,
CollectionID: collectionID,
}
tracer.Finish()
cb(&res, nil)
}
if opts.RetryStrategy == nil {
opts.RetryStrategy = cidMgr.defaultRetryStrategy
}
keyScopeName := scopeName
if keyScopeName == "" {
keyScopeName = "_default"
}
keyCollectionName := collectionName
if keyCollectionName == "" {
keyCollectionName = "_default"
}
req := &memdQRequest{
Packet: memd.Packet{
Magic: memd.CmdMagicReq,
Command: memd.CmdCollectionsGetID,
Datatype: 0,
Cas: 0,
Extras: nil,
Key: []byte(fmt.Sprintf("%s.%s", keyScopeName, keyCollectionName)),
Value: nil,
Vbucket: 0,
},
ReplicaIdx: -1,
RetryStrategy: opts.RetryStrategy,
RootTraceContext: opts.TraceContext,
}
req.Callback = handler
return cidMgr.dispatcher.DispatchDirect(req)
}
func (cidMgr *collectionsComponent) add(id *collectionIDCache, scopeName, collectionName string) {
key := cidMgr.createKey(scopeName, collectionName)
cidMgr.mapLock.Lock()
cidMgr.idMap[key] = id
cidMgr.mapLock.Unlock()
}
func (cidMgr *collectionsComponent) get(scopeName, collectionName string) (*collectionIDCache, bool) {
cidMgr.mapLock.Lock()
id, ok := cidMgr.idMap[cidMgr.createKey(scopeName, collectionName)]
cidMgr.mapLock.Unlock()
if !ok {
return nil, false
}
return id, true
}
func (cidMgr *collectionsComponent) remove(scopeName, collectionName string) {
logDebugf("Removing cache entry for", scopeName, collectionName)
cidMgr.mapLock.Lock()
delete(cidMgr.idMap, cidMgr.createKey(scopeName, collectionName))
cidMgr.mapLock.Unlock()
}
func (cidMgr *collectionsComponent) newCollectionIDCache(scope, collection string) *collectionIDCache {
return &collectionIDCache{
dispatcher: cidMgr.dispatcher,
maxQueueSize: cidMgr.maxQueueSize,
parent: cidMgr,
scopeName: scope,
collectionName: collection,
}
}
type collectionIDCache struct {
opQueue *memdOpQueue
id uint32
collectionName string
scopeName string
parent *collectionsComponent
dispatcher dispatcher
lock sync.Mutex
maxQueueSize int
}
func (cid *collectionIDCache) sendWithCid(req *memdQRequest) error {
cid.lock.Lock()
req.CollectionID = cid.id
cid.lock.Unlock()
_, err := cid.dispatcher.DispatchDirect(req)
if err != nil {
return err
}
return nil
}
func (cid *collectionIDCache) queueRequest(req *memdQRequest) error {
cid.lock.Lock()
defer cid.lock.Unlock()
return cid.opQueue.Push(req, cid.maxQueueSize)
}
func (cid *collectionIDCache) setID(id uint32) {
logDebugf("Setting cache ID to %d for %s.%s", id, cid.scopeName, cid.collectionName)
cid.id = id
}
func (cid *collectionIDCache) refreshCid(req *memdQRequest) error {
err := cid.opQueue.Push(req, cid.maxQueueSize)
if err != nil {
return err
}
logDebugf("Refreshing collection ID for %s.%s", req.ScopeName, req.CollectionName)
_, err = cid.parent.GetCollectionID(req.ScopeName, req.CollectionName, GetCollectionIDOptions{TraceContext: req.RootTraceContext},
func(result *GetCollectionIDResult, err error) {
if err != nil {
if errors.Is(err, ErrCollectionNotFound) {
// The collection is unknown so we need to mark the cid unknown and attempt to retry the request.
// Retrying the request will requeue it in the cid manager so either it will pick up the unknown cid
// and cause a refresh or another request will and this one will get queued within the cache.
// Either the collection will eventually come online or this request will timeout.
logDebugf("Collection %s.%s not found, attempting retry", req.ScopeName, req.CollectionName)
cid.lock.Lock()
cid.setID(unknownCid)
cid.lock.Unlock()
if cid.opQueue.Remove(req) {
if cid.parent.handleCollectionUnknown(req) {
return
}
} else {
logDebugf("Request no longer existed in op queue, possibly cancelled?",
req.Opaque, req.CollectionName)
}
} else {
logDebugf("Collection ID refresh failed: %v", err)
}
// There was an error getting this collection ID so lets remove the cache from the manager and try to
// callback on all of the queued requests.
cid.parent.remove(req.ScopeName, req.CollectionName)
cid.opQueue.Close()
cid.opQueue.Drain(func(request *memdQRequest) {
request.tryCallback(nil, err)
})
return
}
// We successfully got the cid, the GetCollectionID itself will have handled setting the ID on this cache,
// so lets reset the op queue and requeue all of our requests.
logDebugf("Collection %s.%s refresh succeeded, requeuing requests", req.ScopeName, req.CollectionName)
cid.lock.Lock()
opQueue := cid.opQueue
cid.opQueue = newMemdOpQueue()
cid.lock.Unlock()
opQueue.Close()
opQueue.Drain(func(request *memdQRequest) {
request.CollectionID = result.CollectionID
cid.dispatcher.RequeueDirect(request, false)
})
},
)
return err
}
func (cid *collectionIDCache) dispatch(req *memdQRequest) error {
cid.lock.Lock()
// if the cid is unknown then mark the request pending and refresh cid first
// if it's pending then queue the request
// otherwise send the request
switch cid.id {
case unknownCid:
logDebugf("Collection %s.%s unknown, refreshing id", req.ScopeName, req.CollectionName)
cid.setID(pendingCid)
cid.opQueue = newMemdOpQueue()
// We attempt to send the refresh inside of the lock, that way we haven't released the lock and allowed an op
// to get queued if we need to move the status back to unknown. Without doing this it's possible for one or
// more op(s) to sneak into the queue and then no more requests come in and those sit in the queue until they
// timeout because nothing is triggering the cid refresh.
err := cid.refreshCid(req)
if err != nil {
// We've failed to send the cid refresh so we need to set it back to unknown otherwise it'll never
// get updated.
cid.setID(unknownCid)
cid.lock.Unlock()
return err
}
cid.lock.Unlock()
return nil
case pendingCid:
logDebugf("Collection %s.%s pending, queueing request", req.ScopeName, req.CollectionName)
cid.lock.Unlock()
return cid.queueRequest(req)
default:
cid.lock.Unlock()
return cid.sendWithCid(req)
}
}
func (cidMgr *collectionsComponent) Dispatch(req *memdQRequest) (PendingOp, error) {
noCollection := req.CollectionName == "" && req.ScopeName == ""
defaultCollection := req.CollectionName == "_default" && req.ScopeName == "_default"
collectionIDPresent := req.CollectionID > 0
// If the user didn't enable collections then we can just not bother with any collections logic.
if !cidMgr.dispatcher.CollectionsEnabled() {
if !(noCollection || defaultCollection) || collectionIDPresent {
return nil, errCollectionsUnsupported
}
_, err := cidMgr.dispatcher.DispatchDirect(req)
if err != nil {
return nil, err
}
return req, nil
}
if noCollection || defaultCollection || collectionIDPresent {
return cidMgr.dispatcher.DispatchDirect(req)
}
if atomic.LoadUint32(&cidMgr.configSeen) == 0 {
logDebugf("Collections are enabled but we've not yet seen a config so queueing request")
err := cidMgr.pendingOpQueue.Push(req, cidMgr.maxQueueSize)
if err != nil {
return nil, err
}
return req, nil
}
if !cidMgr.dispatcher.SupportsCollections() {
return nil, errCollectionsUnsupported
}
cidCache, ok := cidMgr.get(req.ScopeName, req.CollectionName)
if !ok {
cidCache = cidMgr.newCollectionIDCache(req.ScopeName, req.CollectionName)
cidCache.setID(unknownCid)
cidMgr.add(cidCache, req.ScopeName, req.CollectionName)
}
err := cidCache.dispatch(req)
if err != nil {
return nil, err
}
return req, nil
}
func (cidMgr *collectionsComponent) requeue(req *memdQRequest) {
cidCache, ok := cidMgr.get(req.ScopeName, req.CollectionName)
if !ok {
cidCache = cidMgr.newCollectionIDCache(req.ScopeName, req.CollectionName)
cidCache.setID(unknownCid)
cidMgr.add(cidCache, req.ScopeName, req.CollectionName)
}
cidCache.lock.Lock()
if cidCache.id != unknownCid && cidCache.id != pendingCid {
cidCache.setID(unknownCid)
}
cidCache.lock.Unlock()
err := cidCache.dispatch(req)
if err != nil {
req.tryCallback(nil, err)
}
}

111
vendor/github.com/couchbase/gocbcore/v9/commonflags.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package gocbcore
const (
// Legacy flag format for JSON data.
lfJSON = 0
// Common flags mask
cfMask = 0xFF000000
// Common flags mask for data format
cfFmtMask = 0x0F000000
// Common flags mask for compression mode.
cfCmprMask = 0xE0000000
// Common flag format for sdk-private data.
cfFmtPrivate = 1 << 24 // nolint: deadcode,varcheck,unused
// Common flag format for JSON data.
cfFmtJSON = 2 << 24
// Common flag format for binary data.
cfFmtBinary = 3 << 24
// Common flag format for string data.
cfFmtString = 4 << 24
// Common flags compression for disabled compression.
cfCmprNone = 0 << 29
)
// DataType represents the type of data for a value
type DataType uint32
// CompressionType indicates the type of compression for a value
type CompressionType uint32
const (
// UnknownType indicates the values type is unknown.
UnknownType = DataType(0)
// JSONType indicates the value is JSON data.
JSONType = DataType(1)
// BinaryType indicates the value is binary data.
BinaryType = DataType(2)
// StringType indicates the value is string data.
StringType = DataType(3)
)
const (
// UnknownCompression indicates that the compression type is unknown.
UnknownCompression = CompressionType(0)
// NoCompression indicates that no compression is being used.
NoCompression = CompressionType(1)
)
// EncodeCommonFlags encodes a data type and compression type into a flags
// value using the common flags specification.
func EncodeCommonFlags(valueType DataType, compression CompressionType) uint32 {
var flags uint32
switch valueType {
case JSONType:
flags |= cfFmtJSON
case BinaryType:
flags |= cfFmtBinary
case StringType:
flags |= cfFmtString
case UnknownType:
// flags |= ?
}
switch compression {
case NoCompression:
// flags |= 0
case UnknownCompression:
// flags |= ?
}
return flags
}
// DecodeCommonFlags decodes a flags value into a data type and compression type
// using the common flags specification.
func DecodeCommonFlags(flags uint32) (DataType, CompressionType) {
// Check for legacy flags
if flags&cfMask == 0 {
// Legacy Flags
if flags == lfJSON {
// Legacy JSON
flags = cfFmtJSON
} else {
return UnknownType, UnknownCompression
}
}
valueType := UnknownType
compression := UnknownCompression
if flags&cfFmtMask == cfFmtBinary {
valueType = BinaryType
} else if flags&cfFmtMask == cfFmtString {
valueType = StringType
} else if flags&cfFmtMask == cfFmtJSON {
valueType = JSONType
}
if flags&cfCmprMask == cfCmprNone {
compression = NoCompression
}
return valueType, compression
}

314
vendor/github.com/couchbase/gocbcore/v9/config.go generated vendored Normal file
View File

@ -0,0 +1,314 @@
package gocbcore
import (
"encoding/json"
"fmt"
"net"
"strings"
)
// A Node is a computer in a cluster running the couchbase software.
type cfgNode struct {
ClusterCompatibility int `json:"clusterCompatibility"`
ClusterMembership string `json:"clusterMembership"`
CouchAPIBase string `json:"couchApiBase"`
Hostname string `json:"hostname"`
InterestingStats map[string]float64 `json:"interestingStats,omitempty"`
MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"`
MCDMemoryReserved float64 `json:"mcdMemoryReserved"`
MemoryFree float64 `json:"memoryFree"`
MemoryTotal float64 `json:"memoryTotal"`
OS string `json:"os"`
Ports map[string]int `json:"ports"`
Status string `json:"status"`
Uptime int `json:"uptime,string"`
Version string `json:"version"`
ThisNode bool `json:"thisNode,omitempty"`
}
type cfgNodeServices struct {
Kv uint16 `json:"kv"`
Capi uint16 `json:"capi"`
Mgmt uint16 `json:"mgmt"`
N1ql uint16 `json:"n1ql"`
Fts uint16 `json:"fts"`
Cbas uint16 `json:"cbas"`
KvSsl uint16 `json:"kvSSL"`
CapiSsl uint16 `json:"capiSSL"`
MgmtSsl uint16 `json:"mgmtSSL"`
N1qlSsl uint16 `json:"n1qlSSL"`
FtsSsl uint16 `json:"ftsSSL"`
CbasSsl uint16 `json:"cbasSSL"`
}
type cfgNodeAltAddress struct {
Ports *cfgNodeServices `json:"ports,omitempty"`
Hostname string `json:"hostname"`
}
type cfgNodeExt struct {
Services cfgNodeServices `json:"services"`
Hostname string `json:"hostname"`
AltAddresses map[string]cfgNodeAltAddress `json:"alternateAddresses"`
}
// VBucketServerMap is the a mapping of vbuckets to nodes.
type cfgVBucketServerMap struct {
HashAlgorithm string `json:"hashAlgorithm"`
NumReplicas int `json:"numReplicas"`
ServerList []string `json:"serverList"`
VBucketMap [][]int `json:"vBucketMap"`
}
// Bucket is the primary entry point for most data operations.
type cfgBucket struct {
Rev int64 `json:"rev"`
SourceHostname string
Capabilities []string `json:"bucketCapabilities"`
CapabilitiesVersion string `json:"bucketCapabilitiesVer"`
Name string `json:"name"`
NodeLocator string `json:"nodeLocator"`
URI string `json:"uri"`
StreamingURI string `json:"streamingUri"`
UUID string `json:"uuid"`
DDocs struct {
URI string `json:"uri"`
} `json:"ddocs,omitempty"`
// These are used for JSON IO, but isn't used for processing
// since it needs to be swapped out safely.
VBucketServerMap cfgVBucketServerMap `json:"vBucketServerMap"`
Nodes []cfgNode `json:"nodes"`
NodesExt []cfgNodeExt `json:"nodesExt,omitempty"`
ClusterCapabilitiesVer []int `json:"clusterCapabilitiesVer,omitempty"`
ClusterCapabilities map[string][]string `json:"clusterCapabilities,omitempty"`
}
func (cfg *cfgBucket) BuildRouteConfig(useSsl bool, networkType string, firstConnect bool) *routeConfig {
var kvServerList []string
var capiEpList []string
var mgmtEpList []string
var n1qlEpList []string
var ftsEpList []string
var cbasEpList []string
var bktType bucketType
switch cfg.NodeLocator {
case "ketama":
bktType = bktTypeMemcached
case "vbucket":
bktType = bktTypeCouchbase
default:
if cfg.UUID == "" {
bktType = bktTypeNone
} else {
logDebugf("Invalid nodeLocator %s", cfg.NodeLocator)
bktType = bktTypeInvalid
}
}
if cfg.NodesExt != nil {
lenNodes := len(cfg.Nodes)
for i, node := range cfg.NodesExt {
hostname := node.Hostname
ports := node.Services
if networkType != "default" {
if altAddr, ok := node.AltAddresses[networkType]; ok {
hostname = altAddr.Hostname
if altAddr.Ports != nil {
ports = *altAddr.Ports
}
} else {
if !firstConnect {
logDebugf("Invalid config network type %s", networkType)
}
continue
}
}
hostname = getHostname(hostname, cfg.SourceHostname)
endpoints := endpointsFromPorts(useSsl, ports, cfg.Name, hostname)
if endpoints.kvServer != "" {
if bktType > bktTypeInvalid && i >= lenNodes {
logDebugf("KV node present in nodesext but not in nodes for %s", endpoints.kvServer)
} else {
kvServerList = append(kvServerList, endpoints.kvServer)
}
}
if endpoints.capiEp != "" {
capiEpList = append(capiEpList, endpoints.capiEp)
}
if endpoints.mgmtEp != "" {
mgmtEpList = append(mgmtEpList, endpoints.mgmtEp)
}
if endpoints.n1qlEp != "" {
n1qlEpList = append(n1qlEpList, endpoints.n1qlEp)
}
if endpoints.ftsEp != "" {
ftsEpList = append(ftsEpList, endpoints.ftsEp)
}
if endpoints.cbasEp != "" {
cbasEpList = append(cbasEpList, endpoints.cbasEp)
}
}
} else {
if useSsl {
logErrorf("Received config without nodesExt while SSL is enabled. Generating invalid config.")
return &routeConfig{}
}
if bktType == bktTypeCouchbase {
kvServerList = cfg.VBucketServerMap.ServerList
}
for _, node := range cfg.Nodes {
if node.CouchAPIBase != "" {
// Slice off the UUID as Go's HTTP client cannot handle being passed URL-Encoded path values.
capiEp := strings.SplitN(node.CouchAPIBase, "%2B", 2)[0]
capiEpList = append(capiEpList, capiEp)
}
if node.Hostname != "" {
mgmtEpList = append(mgmtEpList, fmt.Sprintf("http://%s", node.Hostname))
}
if bktType == bktTypeMemcached {
// Get the data port. No VBucketServerMap.
host, err := hostFromHostPort(node.Hostname)
if err != nil {
logErrorf("Encountered invalid memcached host/port string. Ignoring node.")
continue
}
curKvHost := fmt.Sprintf("%s:%d", host, node.Ports["direct"])
kvServerList = append(kvServerList, curKvHost)
}
}
}
rc := &routeConfig{
revID: cfg.Rev,
uuid: cfg.UUID,
name: cfg.Name,
kvServerList: kvServerList,
capiEpList: capiEpList,
mgmtEpList: mgmtEpList,
n1qlEpList: n1qlEpList,
ftsEpList: ftsEpList,
cbasEpList: cbasEpList,
bktType: bktType,
clusterCapabilities: cfg.ClusterCapabilities,
clusterCapabilitiesVer: cfg.ClusterCapabilitiesVer,
bucketCapabilities: cfg.Capabilities,
bucketCapabilitiesVer: cfg.CapabilitiesVersion,
}
if bktType == bktTypeCouchbase {
vbMap := cfg.VBucketServerMap.VBucketMap
numReplicas := cfg.VBucketServerMap.NumReplicas
rc.vbMap = newVbucketMap(vbMap, numReplicas)
} else if bktType == bktTypeMemcached {
rc.ketamaMap = newKetamaContinuum(kvServerList)
}
return rc
}
type serverEps struct {
kvServer string
capiEp string
mgmtEp string
n1qlEp string
ftsEp string
cbasEp string
}
func getHostname(hostname, sourceHostname string) string {
// Hostname blank means to use the same one as was connected to
if hostname == "" {
// Note that the SourceHostname will already be IPv6 wrapped
hostname = sourceHostname
} else {
// We need to detect an IPv6 address here and wrap it in the appropriate
// [] block to indicate its IPv6 for the rest of the system.
if strings.Contains(hostname, ":") {
hostname = "[" + hostname + "]"
}
}
return hostname
}
func endpointsFromPorts(useSsl bool, ports cfgNodeServices, name, hostname string) *serverEps {
lists := &serverEps{}
if useSsl {
if ports.KvSsl > 0 {
lists.kvServer = fmt.Sprintf("%s:%d", hostname, ports.KvSsl)
}
if ports.Capi > 0 {
lists.capiEp = fmt.Sprintf("https://%s:%d/%s", hostname, ports.CapiSsl, name)
}
if ports.Mgmt > 0 {
lists.mgmtEp = fmt.Sprintf("https://%s:%d", hostname, ports.MgmtSsl)
}
if ports.N1ql > 0 {
lists.n1qlEp = fmt.Sprintf("https://%s:%d", hostname, ports.N1qlSsl)
}
if ports.Fts > 0 {
lists.ftsEp = fmt.Sprintf("https://%s:%d", hostname, ports.FtsSsl)
}
if ports.Cbas > 0 {
lists.cbasEp = fmt.Sprintf("https://%s:%d", hostname, ports.CbasSsl)
}
} else {
if ports.Kv > 0 {
lists.kvServer = fmt.Sprintf("%s:%d", hostname, ports.Kv)
}
if ports.Capi > 0 {
lists.capiEp = fmt.Sprintf("http://%s:%d/%s", hostname, ports.Capi, name)
}
if ports.Mgmt > 0 {
lists.mgmtEp = fmt.Sprintf("http://%s:%d", hostname, ports.Mgmt)
}
if ports.N1ql > 0 {
lists.n1qlEp = fmt.Sprintf("http://%s:%d", hostname, ports.N1ql)
}
if ports.Fts > 0 {
lists.ftsEp = fmt.Sprintf("http://%s:%d", hostname, ports.Fts)
}
if ports.Cbas > 0 {
lists.cbasEp = fmt.Sprintf("http://%s:%d", hostname, ports.Cbas)
}
}
return lists
}
func hostFromHostPort(hostport string) (string, error) {
host, _, err := net.SplitHostPort(hostport)
if err != nil {
return "", err
}
// If this is an IPv6 address, we need to rewrap it in []
if strings.Contains(host, ":") {
return "[" + host + "]", nil
}
return host, nil
}
func parseConfig(config []byte, srcHost string) (*cfgBucket, error) {
configStr := strings.Replace(string(config), "$HOST", srcHost, -1)
bk := new(cfgBucket)
err := json.Unmarshal([]byte(configStr), bk)
if err != nil {
return nil, err
}
bk.SourceHostname = srcHost
return bk, nil
}

View File

@ -0,0 +1,184 @@
package gocbcore
import (
"sync"
)
type configManagementComponent struct {
useSSL bool
networkType string
currentConfig *routeConfig
cfgChangeWatchers []routeConfigWatcher
watchersLock sync.Mutex
srcServers []string
seenConfig bool
}
type configManagerProperties struct {
UseSSL bool
NetworkType string
SrcMemdAddrs []string
SrcHTTPAddrs []string
}
type routeConfigWatcher interface {
OnNewRouteConfig(cfg *routeConfig)
}
type configManager interface {
AddConfigWatcher(watcher routeConfigWatcher)
RemoveConfigWatcher(watcher routeConfigWatcher)
}
func newConfigManager(props configManagerProperties) *configManagementComponent {
return &configManagementComponent{
useSSL: props.UseSSL,
networkType: props.NetworkType,
srcServers: append(props.SrcMemdAddrs, props.SrcHTTPAddrs...),
currentConfig: &routeConfig{
revID: -1,
},
}
}
func (cm *configManagementComponent) OnNewConfig(cfg *cfgBucket) {
var routeCfg *routeConfig
if cm.seenConfig {
routeCfg = cfg.BuildRouteConfig(cm.useSSL, cm.networkType, false)
} else {
routeCfg = cm.buildFirstRouteConfig(cfg)
logDebugf("Using network type %s for connections", cm.networkType)
}
if !routeCfg.IsValid() {
logDebugf("Routing data is not valid, skipping update: \n%s", routeCfg.DebugString())
return
}
// There's something wrong with this route config so don't send it to the watchers.
if !cm.updateRouteConfig(routeCfg) {
return
}
logDebugf("Sending out mux routing data (update)...")
logDebugf("New Routing Data:\n%s", routeCfg.DebugString())
cm.seenConfig = true
// We can end up deadlocking if we iterate whilst in the lock and a watcher decides to remove itself.
cm.watchersLock.Lock()
watchers := make([]routeConfigWatcher, len(cm.cfgChangeWatchers))
copy(watchers, cm.cfgChangeWatchers)
cm.watchersLock.Unlock()
for _, watcher := range watchers {
watcher.OnNewRouteConfig(routeCfg)
}
}
func (cm *configManagementComponent) AddConfigWatcher(watcher routeConfigWatcher) {
cm.watchersLock.Lock()
cm.cfgChangeWatchers = append(cm.cfgChangeWatchers, watcher)
cm.watchersLock.Unlock()
}
func (cm *configManagementComponent) RemoveConfigWatcher(watcher routeConfigWatcher) {
var idx int
cm.watchersLock.Lock()
for i, w := range cm.cfgChangeWatchers {
if w == watcher {
idx = i
}
}
if idx == len(cm.cfgChangeWatchers) {
cm.cfgChangeWatchers = cm.cfgChangeWatchers[:idx]
} else {
cm.cfgChangeWatchers = append(cm.cfgChangeWatchers[:idx], cm.cfgChangeWatchers[idx+1:]...)
}
cm.watchersLock.Unlock()
}
// We should never be receiving concurrent updates and nothing should be accessing
// our internal route config so we shouldn't need to lock here.
func (cm *configManagementComponent) updateRouteConfig(cfg *routeConfig) bool {
oldCfg := cm.currentConfig
// Check some basic things to ensure consistency!
if oldCfg.revID > -1 {
if (cfg.vbMap == nil) != (oldCfg.vbMap == nil) {
logErrorf("Received a configuration with a different number of vbuckets. Ignoring.")
return false
}
if cfg.vbMap != nil && cfg.vbMap.NumVbuckets() != oldCfg.vbMap.NumVbuckets() {
logErrorf("Received a configuration with a different number of vbuckets. Ignoring.")
return false
}
}
// Check that the new config data is newer than the current one, in the case where we've done a select bucket
// against an existing connection then the revisions could be the same. In that case the configuration still
// needs to be applied.
if cfg.revID == 0 {
logDebugf("Unversioned configuration data, switching.")
} else if cfg.bktType != oldCfg.bktType {
logDebugf("Configuration data changed bucket type, switching.")
} else if cfg.revID == oldCfg.revID {
logDebugf("Ignoring configuration with identical revision number")
return false
} else if cfg.revID < oldCfg.revID {
logDebugf("Ignoring new configuration as it has an older revision id")
return false
}
cm.currentConfig = cfg
return true
}
func (cm *configManagementComponent) buildFirstRouteConfig(config *cfgBucket) *routeConfig {
if cm.networkType != "" && cm.networkType != "auto" {
return config.BuildRouteConfig(cm.useSSL, cm.networkType, true)
}
defaultRouteConfig := config.BuildRouteConfig(cm.useSSL, "default", true)
// Iterate over all of the source servers and check if any addresses match as default or external network types
for _, srcServer := range cm.srcServers {
// First we check if the source server is from the defaults list
srcInDefaultConfig := false
for _, endpoint := range defaultRouteConfig.kvServerList {
if endpoint == srcServer {
srcInDefaultConfig = true
}
}
for _, endpoint := range defaultRouteConfig.mgmtEpList {
if endpoint == srcServer {
srcInDefaultConfig = true
}
}
if srcInDefaultConfig {
cm.networkType = "default"
return defaultRouteConfig
}
// Next lets see if we have an external config, if so, default to that
externalRouteCfg := config.BuildRouteConfig(cm.useSSL, "external", true)
if externalRouteCfg.IsValid() {
cm.networkType = "external"
return externalRouteCfg
}
}
// If all else fails, default to the implicit default config
cm.networkType = "default"
return defaultRouteConfig
}
func (cm *configManagementComponent) NetworkType() string {
return cm.networkType
}

View File

@ -0,0 +1,95 @@
package gocbcore
// ConfigSnapshot is a snapshot of the underlying configuration currently in use.
type ConfigSnapshot struct {
state *kvMuxState
}
// RevID returns the config revision for this snapshot.
func (pi ConfigSnapshot) RevID() int64 {
return pi.state.revID
}
// KeyToVbucket translates a particular key to its assigned vbucket.
func (pi ConfigSnapshot) KeyToVbucket(key []byte) (uint16, error) {
if pi.state.vbMap == nil {
return 0, errUnsupportedOperation
}
return pi.state.vbMap.VbucketByKey(key), nil
}
// KeyToServer translates a particular key to its assigned server index.
func (pi ConfigSnapshot) KeyToServer(key []byte, replicaIdx uint32) (int, error) {
if pi.state.vbMap != nil {
serverIdx, err := pi.state.vbMap.NodeByKey(key, replicaIdx)
if err != nil {
return 0, err
}
return serverIdx, nil
}
if pi.state.ketamaMap != nil {
serverIdx, err := pi.state.ketamaMap.NodeByKey(key)
if err != nil {
return 0, err
}
return serverIdx, nil
}
return 0, errCliInternalError
}
// VbucketToServer returns the server index for a particular vbucket.
func (pi ConfigSnapshot) VbucketToServer(vbID uint16, replicaIdx uint32) (int, error) {
if pi.state.vbMap == nil {
return 0, errUnsupportedOperation
}
serverIdx, err := pi.state.vbMap.NodeByVbucket(vbID, replicaIdx)
if err != nil {
return 0, err
}
return serverIdx, nil
}
// VbucketsOnServer returns the list of VBuckets for a server.
func (pi ConfigSnapshot) VbucketsOnServer(index int) ([]uint16, error) {
if pi.state.vbMap == nil {
return nil, errUnsupportedOperation
}
return pi.state.vbMap.VbucketsOnServer(index)
}
// NumVbuckets returns the number of VBuckets configured on the
// connected cluster.
func (pi ConfigSnapshot) NumVbuckets() (int, error) {
if pi.state.vbMap == nil {
return 0, errUnsupportedOperation
}
return pi.state.vbMap.NumVbuckets(), nil
}
// NumReplicas returns the number of replicas configured on the
// connected cluster.
func (pi ConfigSnapshot) NumReplicas() (int, error) {
if pi.state.vbMap == nil {
return 0, errUnsupportedOperation
}
return pi.state.vbMap.NumReplicas(), nil
}
// NumServers returns the number of servers accessible for K/V.
func (pi ConfigSnapshot) NumServers() (int, error) {
return pi.state.NumPipelines(), nil
}
// BucketUUID returns the UUID of the bucket we are connected to.
func (pi ConfigSnapshot) BucketUUID() string {
return pi.state.uuid
}

View File

@ -0,0 +1,45 @@
# Couchbase Connection Strings for Go
This library allows you to parse and resolve Couchbase Connection Strings in Go.
This is used by the Couchbase Go SDK, as well as various tools throughout the
Couchbase infrastructure.
## Using the Library
To parse a connection string, simply call `Parse` with your connection string.
You will receive a `ConnSpec` structure representing the connection string`:
```go
type Address struct {
Host string
Port int
}
type ConnSpec struct {
Scheme string
Addresses []Address
Bucket string
Options map[string][]string
}
```
One you have a parsed connection string, you can also use our resolver to take
the `ConnSpec` and resolve any DNS SRV records as well as generate a list of
endpoints for the Couchbase server. You will receive a `ResolvedConnSpec`
structure in return:
```go
type ResolvedConnSpec struct {
UseSsl bool
MemdHosts []Address
HttpHosts []Address
Bucket string
Options map[string][]string
}
```
## License
Copyright 2020 Couchbase Inc.
Licensed under the Apache License, Version 2.0.

View File

@ -0,0 +1,317 @@
package connstr
import (
"errors"
"fmt"
"net"
"net/url"
"regexp"
"strconv"
"strings"
)
const (
// DefaultHttpPort is the default HTTP port to use to connect to Couchbase Server.
DefaultHttpPort = 8091
// DefaultSslHttpPort is the default HTTPS port to use to connect to Couchbase Server.
DefaultSslHttpPort = 18091
// DefaultMemdPort is the default memd port to use to connect to Couchbase Server.
DefaultMemdPort = 11210
// DefaultSslMemdPort is the default memd SSL port to use to connect to Couchbase Server.
DefaultSslMemdPort = 11207
)
func hostIsIpAddress(host string) bool {
if strings.HasPrefix(host, "[") {
// This is an IPv6 address
return true
}
if net.ParseIP(host) != nil {
// This is an IPv4 address
return true
}
return false
}
// Address represents a host:port pair.
type Address struct {
Host string
Port int
}
// ConnSpec describes a connection specification.
type ConnSpec struct {
Scheme string
Addresses []Address
Bucket string
Options map[string][]string
}
func (spec ConnSpec) srvRecord() (string, string, string, bool) {
// Only `couchbase`-type schemes allow SRV records
if spec.Scheme != "couchbase" && spec.Scheme != "couchbases" {
return "", "", "", false
}
// Must have only a single host, with no port specified
if len(spec.Addresses) != 1 || spec.Addresses[0].Port != -1 {
return "", "", "", false
}
if hostIsIpAddress(spec.Addresses[0].Host) {
return "", "", "", false
}
return spec.Scheme, "tcp", spec.Addresses[0].Host, true
}
// SrvRecordName returns the record name for the ConnSpec.
func (spec ConnSpec) SrvRecordName() (recordName string) {
scheme, proto, host, isValid := spec.srvRecord()
if !isValid {
return ""
}
return fmt.Sprintf("_%s._%s.%s", scheme, proto, host)
}
// GetOption returns the specified option value for the ConnSpec.
func (spec ConnSpec) GetOption(name string) []string {
if opt, ok := spec.Options[name]; ok {
return opt
}
return nil
}
// GetOptionString returns the specified option value for the ConnSpec.
func (spec ConnSpec) GetOptionString(name string) string {
opts := spec.GetOption(name)
if len(opts) > 0 {
return opts[0]
}
return ""
}
// Parse parses the connection string into a ConnSpec.
func Parse(connStr string) (out ConnSpec, err error) {
partMatcher := regexp.MustCompile(`((.*):\/\/)?(([^\/?:]*)(:([^\/?:@]*))?@)?([^\/?]*)(\/([^\?]*))?(\?(.*))?`)
hostMatcher := regexp.MustCompile(`((\[[^\]]+\]+)|([^;\,\:]+))(:([0-9]*))?(;\,)?`)
parts := partMatcher.FindStringSubmatch(connStr)
if parts[2] != "" {
out.Scheme = parts[2]
switch out.Scheme {
case "couchbase":
case "couchbases":
case "http":
default:
err = errors.New("bad scheme")
return
}
}
if parts[7] != "" {
hosts := hostMatcher.FindAllStringSubmatch(parts[7], -1)
for _, hostInfo := range hosts {
address := Address{
Host: hostInfo[1],
Port: -1,
}
if hostInfo[5] != "" {
address.Port, err = strconv.Atoi(hostInfo[5])
if err != nil {
return
}
}
out.Addresses = append(out.Addresses, address)
}
}
if parts[9] != "" {
out.Bucket, err = url.QueryUnescape(parts[9])
if err != nil {
return
}
}
if parts[11] != "" {
out.Options, err = url.ParseQuery(parts[11])
if err != nil {
return
}
}
return
}
func (spec ConnSpec) String() string {
var out string
if spec.Scheme != "" {
out += fmt.Sprintf("%s://", spec.Scheme)
}
for i, address := range spec.Addresses {
if i > 0 {
out += ","
}
if address.Port >= 0 {
out += fmt.Sprintf("%s:%d", address.Host, address.Port)
} else {
out += address.Host
}
}
if spec.Bucket != "" {
out += "/"
out += spec.Bucket
}
urlOptions := url.Values(spec.Options)
if len(urlOptions) > 0 {
out += "?" + urlOptions.Encode()
}
return out
}
// ResolvedConnSpec is the result of resolving a ConnSpec.
type ResolvedConnSpec struct {
UseSsl bool
MemdHosts []Address
HttpHosts []Address
Bucket string
Options map[string][]string
}
// Resolve parses a ConnSpec into a ResolvedConnSpec.
func Resolve(connSpec ConnSpec) (out ResolvedConnSpec, err error) {
defaultPort := 0
hasExplicitScheme := false
isHttpScheme := false
useSsl := false
switch connSpec.Scheme {
case "couchbase":
defaultPort = DefaultMemdPort
hasExplicitScheme = true
isHttpScheme = false
useSsl = false
case "couchbases":
defaultPort = DefaultSslMemdPort
hasExplicitScheme = true
isHttpScheme = false
useSsl = true
case "http":
defaultPort = DefaultHttpPort
hasExplicitScheme = true
isHttpScheme = true
useSsl = false
case "":
defaultPort = DefaultHttpPort
hasExplicitScheme = false
isHttpScheme = true
useSsl = false
default:
err = errors.New("bad scheme")
return
}
var srvRecords []*net.SRV
srvScheme, srvProto, srvHost, srvIsValid := connSpec.srvRecord()
if srvIsValid {
_, addrs, err := net.LookupSRV(srvScheme, srvProto, srvHost)
if err == nil && len(addrs) > 0 {
srvRecords = addrs
}
}
if srvRecords != nil {
for _, srv := range srvRecords {
out.MemdHosts = append(out.MemdHosts, Address{
Host: strings.TrimSuffix(srv.Target, "."),
Port: int(srv.Port),
})
}
} else if len(connSpec.Addresses) == 0 {
if useSsl {
out.MemdHosts = append(out.MemdHosts, Address{
Host: "127.0.0.1",
Port: DefaultSslMemdPort,
})
out.HttpHosts = append(out.HttpHosts, Address{
Host: "127.0.0.1",
Port: DefaultSslHttpPort,
})
} else {
out.MemdHosts = append(out.MemdHosts, Address{
Host: "127.0.0.1",
Port: DefaultMemdPort,
})
out.HttpHosts = append(out.HttpHosts, Address{
Host: "127.0.0.1",
Port: DefaultHttpPort,
})
}
} else {
for _, address := range connSpec.Addresses {
hasExplicitPort := address.Port > 0
if !hasExplicitScheme && hasExplicitPort && address.Port != defaultPort {
err = errors.New("ambiguous port without scheme")
return
}
if hasExplicitScheme && !isHttpScheme && address.Port == DefaultHttpPort {
err = errors.New("couchbase://host:8091 not supported for couchbase:// scheme. Use couchbase://host")
return
}
if address.Port <= 0 || address.Port == defaultPort || address.Port == DefaultHttpPort {
if useSsl {
out.MemdHosts = append(out.MemdHosts, Address{
Host: address.Host,
Port: DefaultSslMemdPort,
})
out.HttpHosts = append(out.HttpHosts, Address{
Host: address.Host,
Port: DefaultSslHttpPort,
})
} else {
out.MemdHosts = append(out.MemdHosts, Address{
Host: address.Host,
Port: DefaultMemdPort,
})
out.HttpHosts = append(out.HttpHosts, Address{
Host: address.Host,
Port: DefaultHttpPort,
})
}
} else {
if !isHttpScheme {
out.MemdHosts = append(out.MemdHosts, Address{
Host: address.Host,
Port: address.Port,
})
} else {
out.HttpHosts = append(out.HttpHosts, Address{
Host: address.Host,
Port: address.Port,
})
}
}
}
}
out.UseSsl = useSsl
out.Bucket = connSpec.Bucket
out.Options = connSpec.Options
return
}

88
vendor/github.com/couchbase/gocbcore/v9/constants.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
package gocbcore
const (
goCbCoreVersionStr = "v9.0.4"
)
type bucketType int
const (
bktTypeNone = -1
bktTypeInvalid bucketType = 0
bktTypeCouchbase = iota
bktTypeMemcached = iota
)
// ServiceType specifies a particular Couchbase service type.
type ServiceType int
const (
// MemdService represents a memcached service.
MemdService = ServiceType(1)
// MgmtService represents a management service (typically ns_server).
MgmtService = ServiceType(2)
// CapiService represents a CouchAPI service (typically for views).
CapiService = ServiceType(3)
// N1qlService represents a N1QL service (typically for query).
N1qlService = ServiceType(4)
// FtsService represents a full-text-search service.
FtsService = ServiceType(5)
// CbasService represents an analytics service.
CbasService = ServiceType(6)
)
// DcpAgentPriority specifies the priority level for a dcp stream
type DcpAgentPriority uint8
const (
// DcpAgentPriorityLow sets the priority for the dcp stream to low
DcpAgentPriorityLow = DcpAgentPriority(0)
// DcpAgentPriorityMed sets the priority for the dcp stream to medium
DcpAgentPriorityMed = DcpAgentPriority(1)
// DcpAgentPriorityHigh sets the priority for the dcp stream to high
DcpAgentPriorityHigh = DcpAgentPriority(2)
)
type durabilityLevelStatus uint32
const (
durabilityLevelStatusUnknown = durabilityLevelStatus(0x00)
durabilityLevelStatusSupported = durabilityLevelStatus(0x01)
durabilityLevelStatusUnsupported = durabilityLevelStatus(0x02)
)
type createAsDeletedStatus uint32
const (
createAsDeletedStatusUnknown = createAsDeletedStatus(0x00)
createAsDeletedStatusSupported = createAsDeletedStatus(0x01)
createAsDeletedStatusUnsupported = createAsDeletedStatus(0x02)
)
// ClusterCapability represents a capability that the cluster supports
type ClusterCapability uint32
const (
// ClusterCapabilityEnhancedPreparedStatements represents that the cluster supports enhanced prepared statements.
ClusterCapabilityEnhancedPreparedStatements = ClusterCapability(0x01)
)
// DCPBackfillOrder represents the order in which vBuckets will be backfilled by the cluster.
type DCPBackfillOrder uint8
const (
// DCPBackfillOrderRoundRobin means that all the requested vBuckets will be backfilled together where each vBucket
// has some data backfilled before moving on to the next. This is the default behaviour.
DCPBackfillOrderRoundRobin DCPBackfillOrder = iota + 1
// DCPBackfillOrderSequential means that all the data for the first vBucket will be streamed before advancing onto
// the next vBucket.
DCPBackfillOrderSequential
)

19
vendor/github.com/couchbase/gocbcore/v9/crud.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package gocbcore
// Cas represents a unique revision of a document. This can be used
// to perform optimistic locking.
type Cas uint64
// VbUUID represents a unique identifier for a particular vbucket history.
type VbUUID uint64
// SeqNo is a sequential mutation number indicating the order and precise
// position of a write that has occurred.
type SeqNo uint64
// MutationToken represents a particular mutation within the cluster.
type MutationToken struct {
VbID uint16
VbUUID VbUUID
SeqNo SeqNo
}

Some files were not shown because too many files have changed in this diff Show More