Godep updating dependencies

This commit is contained in:
Armon Dadgar 2015-06-30 12:07:00 -07:00
parent e025c33ab9
commit 19b127f9f2
78 changed files with 27672 additions and 8 deletions

25
Godeps/Godeps.json generated
View file

@ -1,9 +1,6 @@
{
"ImportPath": "github.com/hashicorp/vault",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/armon/go-metrics",
@ -67,16 +64,24 @@
"Comment": "v1-14-g406aa05",
"Rev": "406aa05eb8272fb8aa201e410afa6f9fdcb2bf68"
},
{
"ImportPath": "github.com/go-ldap/ldap",
"Comment": "v1-14-g406aa05",
"Rev": "406aa05eb8272fb8aa201e410afa6f9fdcb2bf68"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",
"Comment": "v1.2-112-gfb72997",
"Rev": "fb7299726d2e68745a8805b14f2ff44b5c2cfa84"
},
{
"ImportPath": "github.com/gocql/gocql",
"Comment": "1st_gen_framing-187-g80e812a",
"Rev": "80e812acf0ab386dd34271acc10d22514c0a67ba"
},
{
"ImportPath": "github.com/golang/groupcache/lru",
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987"
},
{
"ImportPath": "github.com/golang/snappy/snappy",
"Rev": "eaa750b9bf4dcb7cb20454be850613b66cda3273"
},
{
"ImportPath": "github.com/google/go-github/github",
"Rev": "fccd5bb66f985db0a0d150342ca0a9529a23488a"
@ -196,6 +201,10 @@
"ImportPath": "gopkg.in/asn1-ber.v1",
"Comment": "v1",
"Rev": "9eae18c3681ae3d3c677ac2b80a8fe57de45fc09"
},
{
"ImportPath": "speter.net/go/exp/math/dec/inf",
"Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"
}
]
}

View file

@ -0,0 +1,4 @@
gocql-fuzz
fuzz-corpus
fuzz-work
gocql.test

View file

@ -0,0 +1,42 @@
language: go
sudo: false
cache:
directories:
- $HOME/.ccm/repository
matrix:
fast_finish: true
env:
global:
- GOMAXPROCS=2
matrix:
- CASS=1.2.19 AUTH=false
- CASS=2.0.14 AUTH=false
- CASS=2.1.5 AUTH=false
- CASS=2.1.5 AUTH=true
go:
- 1.3
- 1.4
install:
- pip install --user cql PyYAML six
- go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/cover
- git clone https://github.com/pcmanus/ccm.git
- pushd ccm
- ./setup.py install --user
- popd
- go get .
script:
- set -e
- go test -v -tags unit
- PATH=$PATH:$HOME/.local/bin bash -x integration.sh $CASS $AUTH
- go vet .
notifications:
- email: false

51
Godeps/_workspace/src/github.com/gocql/gocql/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,51 @@
# This source file refers to The gocql Authors for copyright purposes.
Christoph Hack <christoph@tux21b.org>
Jonathan Rudenberg <jonathan@titanous.com>
Thorsten von Eicken <tve@rightscale.com>
Matt Robenolt <mattr@disqus.com>
Phillip Couto <phillip.couto@stemstudios.com>
Niklas Korz <korz.niklask@gmail.com>
Nimi Wariboko Jr <nimi@channelmeter.com>
Ghais Issa <ghais.issa@gmail.com>
Sasha Klizhentas <klizhentas@gmail.com>
Konstantin Cherkasov <k.cherkasoff@gmail.com>
Ben Hood <0x6e6562@gmail.com>
Pete Hopkins <phopkins@gmail.com>
Chris Bannister <c.bannister@gmail.com>
Maxim Bublis <b@codemonkey.ru>
Alex Zorin <git@zor.io>
Kasper Middelboe Petersen <me@phant.dk>
Harpreet Sawhney <harpreet.sawhney@gmail.com>
Charlie Andrews <charlieandrews.cwa@gmail.com>
Stanislavs Koikovs <stanislavs.koikovs@gmail.com>
Dan Forest <bonjour@dan.tf>
Miguel Serrano <miguelvps@gmail.com>
Stefan Radomski <gibheer@zero-knowledge.org>
Josh Wright <jshwright@gmail.com>
Jacob Rhoden <jacob.rhoden@gmail.com>
Ben Frye <benfrye@gmail.com>
Fred McCann <fred@sharpnoodles.com>
Dan Simmons <dan@simmons.io>
Muir Manders <muir@retailnext.net>
Sankar P <sankar.curiosity@gmail.com>
Julien Da Silva <julien.dasilva@gmail.com>
Dan Kennedy <daniel@firstcs.co.uk>
Nick Dhupia<nick.dhupia@gmail.com>
Yasuharu Goto <matope.ono@gmail.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com>
Matthias Kadenbach <matthias.kadenbach@gmail.com>
Dean Elbaz <elbaz.dean@gmail.com>
Mike Berman <evencode@gmail.com>
Dmitriy Fedorenko <c0va23@gmail.com>
Zach Marcantel <zmarcantel@gmail.com>
James Maloney <jamessagan@gmail.com>
Ashwin Purohit <purohit@gmail.com>
Dan Kinder <dkinder.is.me@gmail.com>
Oliver Beattie <oliver@obeattie.com>
Justin Corpron <justin@retailnext.com>
Miles Delahunty <miles.delahunty@gmail.com>
Zach Badgett <zach.badgett@gmail.com>
Maciek Sakrejda <maciek@heroku.com>
Jeff Mitchell <jeffrey.mitchell@gmail.com>
Baptiste Fontaine <b@ptistefontaine.fr>

View file

@ -0,0 +1,78 @@
# Contributing to gocql
**TL;DR** - this manifesto sets out the bare mimimum requirements for submitting a patch to gocql.
This guide outlines the process of landing patches in gocql and the general approach to maintaining the code base.
## Background
The goal of the gocql project is to provide a stable and robust CQL driver for Golang. gocql is a community driven project that is coordinated by a small team of core developers.
## Minimum Requirement Checklist
The following is a check list of requirements that need to be satisfied in order for us to merge your patch:
* You should raise a pull request to gocql/gocql on Github
* The pull request has a title that clearly summarizes the purpose of the patch
* The motivation behind the patch is clearly defined in the pull request summary
* Your name and email have been added to the `AUTHORS` file (for copyright purposes)
* The patch will merge cleanly
* The test coverage does not fall below the critical threshold (currently 64%)
* The merge commit passes the regression test suite on Travis
* `go fmt` has been applied to the submitted code
* Functional changes (i.e. new features or changed behavior) are appropriately documented, either as a godoc or in the README (non-functional changes such as bug fixes may not require documentation)
If there are any requirements that can't be reasonably satifisfied, please state this either on the pull request or as part of discussion on the mailing list. Where appropriate, the core team may apply discretion and make an exception to these requirements.
## Beyond The Checklist
In addition to stating the hard requirements, there are a bunch of things that we consider when assessing changes to the library. These soft requirements are helpful pointers of how to get a patch landed quicker and with less fuss.
### General QA Approach
The gocql team needs to consider the ongoing maintainability of the library at all times. Patches that look like they will introduce maintenance issues for the team will not be accepted.
Your patch will get merged quicker if you have decent test cases that provide test coverage for the new behavior you wish to introduce.
Unit tests are good, integration tests are even better. An example of a unit test is `marshal_test.go` - this tests the serialization code in isolation. `cassandra_test.go` is an integration test suite that is executed against every version of Cassandra that gocql supports as part of the CI process on Travis.
That said, the point of writing tests is to provide a safety net to catch regressions, so there is no need to go overboard with tests. Remember that the more tests you write, the more code we will have to maintain. So there's a balance to strike there.
### When It's Too Difficult To Automate Testing
There are legitimate examples of where it is infeasible to write a regression test for a change. Never fear, we will still consider the patch and quite possibly accept the change without a test. The gocql team takes a pragmatic approach to testing. At the end of the day, you could be addressing an issue that is too difficult to reproduce in a test suite, but still occurs in a real production app. In this case, your production app is the test case, and we will have to trust that your change is good.
Examples of pull requests that have been accepted without tests include:
* https://github.com/gocql/gocql/pull/181 - this patch would otherwise require a multi-node cluster to be booted as part of the CI build
* https://github.com/gocql/gocql/pull/179 - this bug can only be reproduced under heavy load in certain circumstances
### Sign Off Procedure
Generally speaking, a pull request can get merged by any one of the core gocql team. If your change is minor, chances are that one team member will just go ahead and merge it there and then. As stated earlier, suitable test coverage will increase the likelihood that a single reviewer will assess and merge your change. If your change has no test coverage, or looks like it may have wider implications for the health and stability of the library, the reviewer may elect to refer the change to another team member to acheive consensus before proceeding. Therefore, the tighter and cleaner your patch is, the quicker it will go through the review process.
### Supported Features
gocql is a low level wire driver for Cassandra CQL. By and large, we would like to keep the functional scope of the library as narrow as possible. We think that gocql should be tight and focussed, and we will be naturally sceptical of things that could just as easily be implemented in a higher layer. Inevitably you will come accross something that could be implemented in a higher layer, save for a minor change to the core API. In this instance, please strike up a conversation with the gocql team. Chances are we will understand what you are trying to acheive and will try to accommodate this in a maintainable way.
### Longer Term Evolution
There are some long term plans for gocql that have to be taken into account when assessing changes. That said, gocql is ultimately a community driven project and we don't have a massive development budget, so sometimes the long term view might need to be de-prioritized ahead of short term changes.
## Officially Supported Server Versions
Currently, the officiallly supported versions of the Cassandra server include:
* 1.2.18
* 2.0.9
Chances are that gocql will work with many other versions. If you would like us to support a particular version of Cassandra, please start a conversation about what version you'd like us to consider. We are more likely to accept a new version if you help out by extending the regression suite to cover the new version to be supported.
## The Core Dev Team
The core development team includes:
* tux21b
* phillipCouto
* Zariel
* 0x6e6562

27
Godeps/_workspace/src/github.com/gocql/gocql/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The gocql Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

202
Godeps/_workspace/src/github.com/gocql/gocql/README.md generated vendored Normal file
View file

@ -0,0 +1,202 @@
gocql
=====
[![Join the chat at https://gitter.im/gocql/gocql](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gocql/gocql?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://travis-ci.org/gocql/gocql.png?branch=master)](https://travis-ci.org/gocql/gocql)
[![GoDoc](http://godoc.org/github.com/gocql/gocql?status.png)](http://godoc.org/github.com/gocql/gocql)
Package gocql implements a fast and robust Cassandra client for the
Go programming language.
Project Website: http://gocql.github.io/<br>
API documentation: http://godoc.org/github.com/gocql/gocql<br>
Discussions: https://groups.google.com/forum/#!forum/gocql
Production Stability
---------
The underlying framing code was rewritten as part of [#339](https://github.com/gocql/gocql/pull/339) and as such may have
unforseen bugs. If you run into a bug related to wire framing, please raise a ticket and we will try to resolve this as soon as we can. If you require a stable version to pin your production app against, we have tagged the previous stable version in source code, so you can build against this. The tag is called 1st_gen_framing ([180456fef0a3c6d02c51dc7211f49b55e9315867](https://github.com/gocql/gocql/commit/180456fef0a3c6d02c51dc7211f49b55e9315867)). This note will be removed as the new generation framing code base matures.
Supported Versions
------------------
The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build:
Go/Cassandra | 1.2.19 | 2.0.14 | 2.1.5
-------------| -------| ------| ---------
1.3 | yes | yes | yes
1.4 | yes | yes | yes
Sunsetting Model
----------------
In general, the gocql team will focus on supporting the current and previous versions of Golang. gocql may still work with older versions of Golang, but offical support for these versions will have been sunset.
Installation
------------
go get github.com/gocql/gocql
Features
--------
* Modern Cassandra client using the native transport
* Automatic type conversations between Cassandra and Go
* Support for all common types including sets, lists and maps
* Custom types can implement a `Marshaler` and `Unmarshaler` interface
* Strict type conversations without any loss of precision
* Built-In support for UUIDs (version 1 and 4)
* Support for logged, unlogged and counter batches
* Cluster management
* Automatic reconnect on connection failures with exponential falloff
* Round robin distribution of queries to different hosts
* Round robin distribution of queries to different connections on a host
* Each connection can execute up to n concurrent queries (whereby n is the limit set by the protocol version the client chooses to use)
* Optional automatic discovery of nodes
* Optional support for periodic node discovery via system.peers
* Policy based connection pool with token aware and round-robin policy implementations
* Support for password authentication
* Iteration over paged results with configurable page size
* Support for TLS/SSL
* Optional frame compression (using snappy)
* Automatic query preparation
* Support for query tracing
* Experimental support for [binary protocol version 3](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v3.spec)
* Support for up to 32768 streams
* Support for tuple types
* Support for client side timestamps by default
* Support for UDTs via a custom marshaller or struct tags
* An API to access the schema metadata of a given keyspace
Please visit the [Roadmap](https://github.com/gocql/gocql/wiki/Roadmap) page to see what is on the horizion.
Important Default Keyspace Changes
----------------------------------
gocql no longer supports executing "use <keyspace>" statements to simplfy the library. The user still has the
ability to define the default keyspace for connections but now the keyspace can only be defined before a
session is created. Queries can still access keyspaces by indicating the keyspace in the query:
```sql
SELECT * FROM example2.table;
```
Example of correct usage:
```go
cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3")
cluster.Keyspace = "example"
...
session, err := cluster.CreateSession()
```
Example of incorrect usage:
```go
cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3")
cluster.Keyspace = "example"
...
session, err := cluster.CreateSession()
if err = session.Query("use example2").Exec(); err != nil {
log.Fatal(err)
}
```
This will result in an err being returned from the session.Query line as the user is trying to execute a "use"
statement.
Example
-------
```go
/* Before you execute the program, Launch `cqlsh` and execute:
create keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
create table example.tweet(timeline text, id UUID, text text, PRIMARY KEY(id));
create index on example.tweet(timeline);
*/
package main
import (
"fmt"
"log"
"github.com/gocql/gocql"
)
func main() {
// connect to the cluster
cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3")
cluster.Keyspace = "example"
cluster.Consistency = gocql.Quorum
session, _ := cluster.CreateSession()
defer session.Close()
// insert a tweet
if err := session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`,
"me", gocql.TimeUUID(), "hello world").Exec(); err != nil {
log.Fatal(err)
}
var id gocql.UUID
var text string
/* Search for a specific set of records whose 'timeline' column matches
* the value 'me'. The secondary index that we created earlier will be
* used for optimizing the search */
if err := session.Query(`SELECT id, text FROM tweet WHERE timeline = ? LIMIT 1`,
"me").Consistency(gocql.One).Scan(&id, &text); err != nil {
log.Fatal(err)
}
fmt.Println("Tweet:", id, text)
// list all tweets
iter := session.Query(`SELECT id, text FROM tweet WHERE timeline = ?`, "me").Iter()
for iter.Scan(&id, &text) {
fmt.Println("Tweet:", id, text)
}
if err := iter.Close(); err != nil {
log.Fatal(err)
}
}
```
Data Binding
------------
There are various ways to bind application level data structures to CQL statements:
* You can write the data binding by hand, as outlined in the Tweet example. This provides you with the greatest flexibility, but it does mean that you need to keep your application code in sync with your Cassandra schema.
* You can dynamically marshal an entire query result into an `[]map[string]interface{}` using the `SliceMap()` API. This returns a slice of row maps keyed by CQL column mames. This method requires no special interaction with the gocql API, but it does require your application to be able to deal with a key value view of your data.
* As a refinement on the `SliceMap()` API you can also call `MapScan()` which returns `map[string]interface{}` instances in a row by row fashion.
* The `Bind()` API provides a client app with a low level mechanism to introspect query meta data and extract appropriate field values from application level data structures.
* Building on top of the gocql driver, [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement.
* Another external project that layers on top of gocql is [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax.
* [gocassa](https://github.com/hailocab/gocassa) is an external project that layers on top of gocql to provide convenient query building and data binding.
* [gocqltable](https://github.com/elvtechnology/gocqltable) provides an ORM-style convenience layer to make CRUD operations with gocql easier.
Ecosphere
---------
The following community maintained tools are known to integrate with gocql:
* [migrate](https://github.com/mattes/migrate) is a migration handling tool written in Go with Cassandra support.
* [negronicql](https://github.com/mikebthun/negronicql) is gocql middleware for Negroni.
* [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement.
* [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax.
* [gocassa](https://github.com/hailocab/gocassa) provides query building, adds data binding, and provides easy-to-use "recipe" tables for common query use-cases.
* [gocqltable](https://github.com/elvtechnology/gocqltable) is a wrapper around gocql that aims to simplify common operations whilst working the library.
Other Projects
--------------
* [gocqldriver](https://github.com/tux21b/gocqldriver) is the predecessor of gocql based on Go's "database/sql" package. This project isn't maintained anymore, because Cassandra wasn't a good fit for the traditional "database/sql" API. Use this package instead.
SEO
---
For some reason, when you google `golang cassandra`, this project doesn't feature very highly in the result list. But if you google `go cassandra`, then we're a bit higher up the list. So this is note to try to convince Google that Golang is an alias for Go.
License
-------
> Copyright (c) 2012-2015 The gocql Authors. All rights reserved.
> Use of this source code is governed by a BSD-style
> license that can be found in the LICENSE file.

View file

@ -0,0 +1,60 @@
// +build all integration
package gocql
import (
"strings"
"testing"
)
func TestProto1BatchInsert(t *testing.T) {
session := createSession(t)
if err := session.Query("CREATE TABLE large (id int primary key)").Exec(); err != nil {
t.Fatal("create table:", err)
}
defer session.Close()
begin := "BEGIN BATCH"
end := "APPLY BATCH"
query := "INSERT INTO large (id) VALUES (?)"
fullQuery := strings.Join([]string{begin, query, end}, "\n")
args := []interface{}{5}
if err := session.Query(fullQuery, args...).Consistency(Quorum).Exec(); err != nil {
t.Fatal(err)
}
}
func TestShouldPrepareFunction(t *testing.T) {
var shouldPrepareTests = []struct {
Stmt string
Result bool
}{
{`
BEGIN BATCH
INSERT INTO users (userID, password)
VALUES ('smith', 'secret')
APPLY BATCH
;
`, true},
{`INSERT INTO users (userID, password, name) VALUES ('user2', 'ch@ngem3b', 'second user')`, true},
{`BEGIN COUNTER BATCH UPDATE stats SET views = views + 1 WHERE pageid = 1 APPLY BATCH`, true},
{`delete name from users where userID = 'smith';`, true},
{` UPDATE users SET password = 'secret' WHERE userID = 'smith' `, true},
{`CREATE TABLE users (
user_name varchar PRIMARY KEY,
password varchar,
gender varchar,
session_token varchar,
state varchar,
birth_year bigint
);`, false},
}
for _, test := range shouldPrepareTests {
q := &Query{stmt: test.Stmt}
if got := q.shouldPrepare(); got != test.Result {
t.Fatalf("%q: got %v, expected %v\n", test.Stmt, got, test.Result)
}
}
}

File diff suppressed because it is too large Load diff

111
Godeps/_workspace/src/github.com/gocql/gocql/cluster.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"errors"
"sync"
"time"
"github.com/golang/groupcache/lru"
)
const defaultMaxPreparedStmts = 1000
//Package global reference to Prepared Statements LRU
var stmtsLRU preparedLRU
//preparedLRU is the prepared statement cache
type preparedLRU struct {
sync.Mutex
lru *lru.Cache
}
//Max adjusts the maximum size of the cache and cleans up the oldest records if
//the new max is lower than the previous value. Not concurrency safe.
func (p *preparedLRU) Max(max int) {
for p.lru.Len() > max {
p.lru.RemoveOldest()
}
p.lru.MaxEntries = max
}
func initStmtsLRU(max int) {
if stmtsLRU.lru != nil {
stmtsLRU.Max(max)
} else {
stmtsLRU.lru = lru.New(max)
}
}
// To enable periodic node discovery enable DiscoverHosts in ClusterConfig
type DiscoveryConfig struct {
// If not empty will filter all discoverred hosts to a single Data Centre (default: "")
DcFilter string
// If not empty will filter all discoverred hosts to a single Rack (default: "")
RackFilter string
// The interval to check for new hosts (default: 30s)
Sleep time.Duration
}
// ClusterConfig is a struct to configure the default cluster implementation
// of gocoql. It has a varity of attributes that can be used to modify the
// behavior to fit the most common use cases. Applications that requre a
// different setup must implement their own cluster.
type ClusterConfig struct {
Hosts []string // addresses for the initial connections
CQLVersion string // CQL version (default: 3.0.0)
ProtoVersion int // version of the native protocol (default: 2)
Timeout time.Duration // connection timeout (default: 600ms)
Port int // port (default: 9042)
Keyspace string // initial keyspace (optional)
NumConns int // number of connections per host (default: 2)
NumStreams int // number of streams per connection (default: max per protocol, either 128 or 32768)
Consistency Consistency // default consistency level (default: Quorum)
Compressor Compressor // compression algorithm (default: nil)
Authenticator Authenticator // authenticator (default: nil)
RetryPolicy RetryPolicy // Default retry policy to use for queries (default: 0)
SocketKeepalive time.Duration // The keepalive period to use, enabled if > 0 (default: 0)
ConnPoolType NewPoolFunc // The function used to create the connection pool for the session (default: NewSimplePool)
DiscoverHosts bool // If set, gocql will attempt to automatically discover other members of the Cassandra cluster (default: false)
MaxPreparedStmts int // Sets the maximum cache size for prepared statements globally for gocql (default: 1000)
MaxRoutingKeyInfo int // Sets the maximum cache size for query info about statements for each session (default: 1000)
PageSize int // Default page size to use for created sessions (default: 0)
SerialConsistency SerialConsistency // Sets the consistency for the serial part of queries, values can be either SERIAL or LOCAL_SERIAL (default: unset)
Discovery DiscoveryConfig
SslOpts *SslOptions
DefaultTimestamp bool // Sends a client side timestamp for all requests which overrides the timestamp at which it arrives at the server. (default: true, only enabled for protocol 3 and above)
}
// NewCluster generates a new config for the default cluster implementation.
func NewCluster(hosts ...string) *ClusterConfig {
cfg := &ClusterConfig{
Hosts: hosts,
CQLVersion: "3.0.0",
ProtoVersion: 2,
Timeout: 600 * time.Millisecond,
Port: 9042,
NumConns: 2,
Consistency: Quorum,
ConnPoolType: NewSimplePool,
DiscoverHosts: false,
MaxPreparedStmts: defaultMaxPreparedStmts,
MaxRoutingKeyInfo: 1000,
DefaultTimestamp: true,
}
return cfg
}
// CreateSession initializes the cluster based on this config and returns a
// session object that can be used to interact with the database.
func (cfg *ClusterConfig) CreateSession() (*Session, error) {
return NewSession(*cfg)
}
var (
ErrNoHosts = errors.New("no hosts provided")
ErrNoConnectionsStarted = errors.New("no connections were made when creating the session")
ErrHostQueryFailed = errors.New("unable to populate Hosts")
)

View file

@ -0,0 +1,28 @@
package gocql
import (
"github.com/golang/snappy/snappy"
)
type Compressor interface {
Name() string
Encode(data []byte) ([]byte, error)
Decode(data []byte) ([]byte, error)
}
// SnappyCompressor implements the Compressor interface and can be used to
// compress incoming and outgoing frames. The snappy compression algorithm
// aims for very high speeds and reasonable compression.
type SnappyCompressor struct{}
func (s SnappyCompressor) Name() string {
return "snappy"
}
func (s SnappyCompressor) Encode(data []byte) ([]byte, error) {
return snappy.Encode(nil, data)
}
func (s SnappyCompressor) Decode(data []byte) ([]byte, error) {
return snappy.Decode(nil, data)
}

View file

@ -0,0 +1,40 @@
// +build all unit
package gocql
import (
"bytes"
"github.com/golang/snappy/snappy"
"testing"
)
func TestSnappyCompressor(t *testing.T) {
c := SnappyCompressor{}
if c.Name() != "snappy" {
t.Fatalf("expected name to be 'snappy', got %v", c.Name())
}
str := "My Test String"
//Test Encoding
if expected, err := snappy.Encode(nil, []byte(str)); err != nil {
t.Fatalf("failed to encode '%v' with error %v", str, err)
} else if res, err := c.Encode([]byte(str)); err != nil {
t.Fatalf("failed to encode '%v' with error %v", str, err)
} else if bytes.Compare(expected, res) != 0 {
t.Fatal("failed to match the expected encoded value with the result encoded value.")
}
val, err := c.Encode([]byte(str))
if err != nil {
t.Fatalf("failed to encode '%v' with error '%v'", str, err)
}
//Test Decoding
if expected, err := snappy.Decode(nil, val); err != nil {
t.Fatalf("failed to decode '%v' with error %v", val, err)
} else if res, err := c.Decode(val); err != nil {
t.Fatalf("failed to decode '%v' with error %v", val, err)
} else if bytes.Compare(expected, res) != 0 {
t.Fatal("failed to match the expected decoded value with the result decoded value.")
}
}

796
Godeps/_workspace/src/github.com/gocql/gocql/conn.go generated vendored Normal file
View file

@ -0,0 +1,796 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
//JoinHostPort is a utility to return a address string that can be used
//gocql.Conn to form a connection with a host.
func JoinHostPort(addr string, port int) string {
addr = strings.TrimSpace(addr)
if _, _, err := net.SplitHostPort(addr); err != nil {
addr = net.JoinHostPort(addr, strconv.Itoa(port))
}
return addr
}
type Authenticator interface {
Challenge(req []byte) (resp []byte, auth Authenticator, err error)
Success(data []byte) error
}
type PasswordAuthenticator struct {
Username string
Password string
}
func (p PasswordAuthenticator) Challenge(req []byte) ([]byte, Authenticator, error) {
if string(req) != "org.apache.cassandra.auth.PasswordAuthenticator" {
return nil, nil, fmt.Errorf("unexpected authenticator %q", req)
}
resp := make([]byte, 2+len(p.Username)+len(p.Password))
resp[0] = 0
copy(resp[1:], p.Username)
resp[len(p.Username)+1] = 0
copy(resp[2+len(p.Username):], p.Password)
return resp, nil, nil
}
func (p PasswordAuthenticator) Success(data []byte) error {
return nil
}
type SslOptions struct {
tls.Config
// CertPath and KeyPath are optional depending on server
// config, but both fields must be omitted to avoid using a
// client certificate
CertPath string
KeyPath string
CaPath string //optional depending on server config
// If you want to verify the hostname and server cert (like a wildcard for cass cluster) then you should turn this on
// This option is basically the inverse of InSecureSkipVerify
// See InSecureSkipVerify in http://golang.org/pkg/crypto/tls/ for more info
EnableHostVerification bool
}
type ConnConfig struct {
ProtoVersion int
CQLVersion string
Timeout time.Duration
NumStreams int
Compressor Compressor
Authenticator Authenticator
Keepalive time.Duration
tlsConfig *tls.Config
}
type ConnErrorHandler interface {
HandleError(conn *Conn, err error, closed bool)
}
// How many timeouts we will allow to occur before the connection is closed
// and restarted. This is to prevent a single query timeout from killing a connection
// which may be serving more queries just fine.
// Default is 10, should not be changed concurrently with queries.
var TimeoutLimit int64 = 10
// Conn is a single connection to a Cassandra node. It can be used to execute
// queries, but users are usually advised to use a more reliable, higher
// level API.
type Conn struct {
conn net.Conn
r *bufio.Reader
timeout time.Duration
headerBuf []byte
uniq chan int
calls []callReq
errorHandler ConnErrorHandler
compressor Compressor
auth Authenticator
addr string
version uint8
currentKeyspace string
started bool
closed int32
quit chan struct{}
timeouts int64
}
// Connect establishes a connection to a Cassandra node.
// You must also call the Serve method before you can execute any queries.
func Connect(addr string, cfg ConnConfig, errorHandler ConnErrorHandler) (*Conn, error) {
var (
err error
conn net.Conn
)
dialer := &net.Dialer{
Timeout: cfg.Timeout,
}
if cfg.tlsConfig != nil {
// the TLS config is safe to be reused by connections but it must not
// be modified after being used.
conn, err = tls.DialWithDialer(dialer, "tcp", addr, cfg.tlsConfig)
} else {
conn, err = dialer.Dial("tcp", addr)
}
if err != nil {
return nil, err
}
// going to default to proto 2
if cfg.ProtoVersion < protoVersion1 || cfg.ProtoVersion > protoVersion3 {
log.Printf("unsupported protocol version: %d using 2\n", cfg.ProtoVersion)
cfg.ProtoVersion = 2
}
headerSize := 8
maxStreams := 128
if cfg.ProtoVersion > protoVersion2 {
maxStreams = 32768
headerSize = 9
}
if cfg.NumStreams <= 0 || cfg.NumStreams > maxStreams {
cfg.NumStreams = maxStreams
}
c := &Conn{
conn: conn,
r: bufio.NewReader(conn),
uniq: make(chan int, cfg.NumStreams),
calls: make([]callReq, cfg.NumStreams),
timeout: cfg.Timeout,
version: uint8(cfg.ProtoVersion),
addr: conn.RemoteAddr().String(),
errorHandler: errorHandler,
compressor: cfg.Compressor,
auth: cfg.Authenticator,
headerBuf: make([]byte, headerSize),
quit: make(chan struct{}),
}
if cfg.Keepalive > 0 {
c.setKeepalive(cfg.Keepalive)
}
for i := 0; i < cfg.NumStreams; i++ {
c.calls[i].resp = make(chan error, 1)
c.uniq <- i
}
go c.serve()
if err := c.startup(&cfg); err != nil {
conn.Close()
return nil, err
}
c.started = true
return c, nil
}
func (c *Conn) Write(p []byte) (int, error) {
if c.timeout > 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.timeout))
}
return c.conn.Write(p)
}
func (c *Conn) Read(p []byte) (n int, err error) {
const maxAttempts = 5
for i := 0; i < maxAttempts; i++ {
var nn int
if c.timeout > 0 {
c.conn.SetReadDeadline(time.Now().Add(c.timeout))
}
nn, err = io.ReadFull(c.r, p[n:])
n += nn
if err == nil {
break
}
if verr, ok := err.(net.Error); !ok || !verr.Temporary() {
break
}
}
return
}
func (c *Conn) startup(cfg *ConnConfig) error {
m := map[string]string{
"CQL_VERSION": cfg.CQLVersion,
}
if c.compressor != nil {
m["COMPRESSION"] = c.compressor.Name()
}
frame, err := c.exec(&writeStartupFrame{opts: m}, nil)
if err != nil {
return err
}
switch v := frame.(type) {
case error:
return v
case *readyFrame:
return nil
case *authenticateFrame:
return c.authenticateHandshake(v)
default:
return NewErrProtocol("Unknown type of response to startup frame: %s", v)
}
}
func (c *Conn) authenticateHandshake(authFrame *authenticateFrame) error {
if c.auth == nil {
return fmt.Errorf("authentication required (using %q)", authFrame.class)
}
resp, challenger, err := c.auth.Challenge([]byte(authFrame.class))
if err != nil {
return err
}
req := &writeAuthResponseFrame{data: resp}
for {
frame, err := c.exec(req, nil)
if err != nil {
return err
}
switch v := frame.(type) {
case error:
return v
case *authSuccessFrame:
if challenger != nil {
return challenger.Success(v.data)
}
return nil
case *authChallengeFrame:
resp, challenger, err = challenger.Challenge(v.data)
if err != nil {
return err
}
req = &writeAuthResponseFrame{
data: resp,
}
default:
return fmt.Errorf("unknown frame response during authentication: %v", v)
}
}
}
// Serve starts the stream multiplexer for this connection, which is required
// to execute any queries. This method runs as long as the connection is
// open and is therefore usually called in a separate goroutine.
func (c *Conn) serve() {
var (
err error
)
for {
err = c.recv()
if err != nil {
break
}
}
c.closeWithError(err)
}
func (c *Conn) recv() error {
// not safe for concurrent reads
// read a full header, ignore timeouts, as this is being ran in a loop
// TODO: TCP level deadlines? or just query level deadlines?
if c.timeout > 0 {
c.conn.SetReadDeadline(time.Time{})
}
// were just reading headers over and over and copy bodies
head, err := readHeader(c.r, c.headerBuf)
if err != nil {
return err
}
call := &c.calls[head.stream]
err = call.framer.readFrame(&head)
if err != nil {
// only net errors should cause the connection to be closed. Though
// cassandra returning corrupt frames will be returned here as well.
if _, ok := err.(net.Error); ok {
return err
}
}
if !atomic.CompareAndSwapInt32(&call.waiting, 1, 0) {
// the waiting thread timed out and is no longer waiting, the stream has
// not yet been readded to the chan so it cant be used again,
c.releaseStream(head.stream)
return nil
}
// we either, return a response to the caller, the caller timedout, or the
// connection has closed. Either way we should never block indefinatly here
select {
case call.resp <- err:
case <-call.timeout:
c.releaseStream(head.stream)
case <-c.quit:
}
return nil
}
type callReq struct {
// could use a waitgroup but this allows us to do timeouts on the read/send
resp chan error
framer *framer
waiting int32
timeout chan struct{} // indicates to recv() that a call has timedout
}
func (c *Conn) releaseStream(stream int) {
call := &c.calls[stream]
framerPool.Put(call.framer)
call.framer = nil
select {
case c.uniq <- stream:
default:
}
}
func (c *Conn) handleTimeout() {
if atomic.AddInt64(&c.timeouts, 1) > TimeoutLimit {
c.closeWithError(ErrTooManyTimeouts)
}
}
func (c *Conn) exec(req frameWriter, tracer Tracer) (frame, error) {
// TODO: move tracer onto conn
var stream int
select {
case stream = <-c.uniq:
case <-c.quit:
return nil, ErrConnectionClosed
}
call := &c.calls[stream]
// resp is basically a waiting semaphore protecting the framer
framer := newFramer(c, c, c.compressor, c.version)
call.framer = framer
call.timeout = make(chan struct{})
if tracer != nil {
framer.trace()
}
if !atomic.CompareAndSwapInt32(&call.waiting, 0, 1) {
return nil, errors.New("gocql: stream is busy or closed")
}
defer atomic.StoreInt32(&call.waiting, 0)
err := req.writeFrame(framer, stream)
if err != nil {
return nil, err
}
select {
case err := <-call.resp:
// dont release the stream if detect a timeout as another request can reuse
// that stream and get a response for the old request, which we have no
// easy way of detecting.
defer c.releaseStream(stream)
if err != nil {
return nil, err
}
case <-time.After(c.timeout):
close(call.timeout)
c.handleTimeout()
return nil, ErrTimeoutNoResponse
case <-c.quit:
return nil, ErrConnectionClosed
}
if v := framer.header.version.version(); v != c.version {
return nil, NewErrProtocol("unexpected protocol version in response: got %d expected %d", v, c.version)
}
frame, err := framer.parseFrame()
if err != nil {
return nil, err
}
if len(framer.traceID) > 0 {
tracer.Trace(framer.traceID)
}
return frame, nil
}
func (c *Conn) prepareStatement(stmt string, trace Tracer) (*resultPreparedFrame, error) {
stmtsLRU.Lock()
if stmtsLRU.lru == nil {
initStmtsLRU(defaultMaxPreparedStmts)
}
stmtCacheKey := c.addr + c.currentKeyspace + stmt
if val, ok := stmtsLRU.lru.Get(stmtCacheKey); ok {
stmtsLRU.Unlock()
flight := val.(*inflightPrepare)
flight.wg.Wait()
return flight.info, flight.err
}
flight := new(inflightPrepare)
flight.wg.Add(1)
stmtsLRU.lru.Add(stmtCacheKey, flight)
stmtsLRU.Unlock()
prep := &writePrepareFrame{
statement: stmt,
}
resp, err := c.exec(prep, trace)
if err != nil {
flight.err = err
flight.wg.Done()
return nil, err
}
switch x := resp.(type) {
case *resultPreparedFrame:
flight.info = x
case error:
flight.err = x
default:
flight.err = NewErrProtocol("Unknown type in response to prepare frame: %s", x)
}
flight.wg.Done()
if flight.err != nil {
stmtsLRU.Lock()
stmtsLRU.lru.Remove(stmtCacheKey)
stmtsLRU.Unlock()
}
return flight.info, flight.err
}
func (c *Conn) executeQuery(qry *Query) *Iter {
params := queryParams{
consistency: qry.cons,
}
// frame checks that it is not 0
params.serialConsistency = qry.serialCons
params.defaultTimestamp = qry.defaultTimestamp
if len(qry.pageState) > 0 {
params.pagingState = qry.pageState
}
if qry.pageSize > 0 {
params.pageSize = qry.pageSize
}
var frame frameWriter
if qry.shouldPrepare() {
// Prepare all DML queries. Other queries can not be prepared.
info, err := c.prepareStatement(qry.stmt, qry.trace)
if err != nil {
return &Iter{err: err}
}
var values []interface{}
if qry.binding == nil {
values = qry.values
} else {
binding := &QueryInfo{
Id: info.preparedID,
Args: info.reqMeta.columns,
Rval: info.respMeta.columns,
}
values, err = qry.binding(binding)
if err != nil {
return &Iter{err: err}
}
}
if len(values) != len(info.reqMeta.columns) {
return &Iter{err: ErrQueryArgLength}
}
params.values = make([]queryValues, len(values))
for i := 0; i < len(values); i++ {
val, err := Marshal(info.reqMeta.columns[i].TypeInfo, values[i])
if err != nil {
return &Iter{err: err}
}
v := &params.values[i]
v.value = val
// TODO: handle query binding names
}
frame = &writeExecuteFrame{
preparedID: info.preparedID,
params: params,
}
} else {
frame = &writeQueryFrame{
statement: qry.stmt,
params: params,
}
}
resp, err := c.exec(frame, qry.trace)
if err != nil {
return &Iter{err: err}
}
switch x := resp.(type) {
case *resultVoidFrame:
return &Iter{}
case *resultRowsFrame:
iter := &Iter{
meta: x.meta,
rows: x.rows,
}
if len(x.meta.pagingState) > 0 {
iter.next = &nextIter{
qry: *qry,
pos: int((1 - qry.prefetch) * float64(len(iter.rows))),
}
iter.next.qry.pageState = x.meta.pagingState
if iter.next.pos < 1 {
iter.next.pos = 1
}
}
return iter
case *resultKeyspaceFrame, *resultSchemaChangeFrame:
return &Iter{}
case *RequestErrUnprepared:
stmtsLRU.Lock()
stmtCacheKey := c.addr + c.currentKeyspace + qry.stmt
if _, ok := stmtsLRU.lru.Get(stmtCacheKey); ok {
stmtsLRU.lru.Remove(stmtCacheKey)
stmtsLRU.Unlock()
return c.executeQuery(qry)
}
stmtsLRU.Unlock()
return &Iter{err: x}
case error:
return &Iter{err: x}
default:
return &Iter{err: NewErrProtocol("Unknown type in response to execute query: %s", x)}
}
}
func (c *Conn) Pick(qry *Query) *Conn {
if c.Closed() {
return nil
}
return c
}
func (c *Conn) Closed() bool {
return atomic.LoadInt32(&c.closed) == 1
}
func (c *Conn) closeWithError(err error) {
if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
return
}
for id := 0; id < len(c.calls); id++ {
req := &c.calls[id]
// we need to send the error to all waiting queries, put the state
// of this conn into not active so that it can not execute any queries.
atomic.StoreInt32(&req.waiting, -1)
if err != nil {
select {
case req.resp <- err:
default:
}
}
}
close(c.quit)
c.conn.Close()
if c.started && err != nil {
c.errorHandler.HandleError(c, err, true)
}
}
func (c *Conn) Close() {
c.closeWithError(nil)
}
func (c *Conn) Address() string {
return c.addr
}
func (c *Conn) AvailableStreams() int {
return len(c.uniq)
}
func (c *Conn) UseKeyspace(keyspace string) error {
q := &writeQueryFrame{statement: `USE "` + keyspace + `"`}
q.params.consistency = Any
resp, err := c.exec(q, nil)
if err != nil {
return err
}
switch x := resp.(type) {
case *resultKeyspaceFrame:
case error:
return x
default:
return NewErrProtocol("unknown frame in response to USE: %v", x)
}
c.currentKeyspace = keyspace
return nil
}
func (c *Conn) executeBatch(batch *Batch) error {
if c.version == protoVersion1 {
return ErrUnsupported
}
n := len(batch.Entries)
req := &writeBatchFrame{
typ: batch.Type,
statements: make([]batchStatment, n),
consistency: batch.Cons,
serialConsistency: batch.serialCons,
defaultTimestamp: batch.defaultTimestamp,
}
stmts := make(map[string]string)
for i := 0; i < n; i++ {
entry := &batch.Entries[i]
b := &req.statements[i]
if len(entry.Args) > 0 || entry.binding != nil {
info, err := c.prepareStatement(entry.Stmt, nil)
if err != nil {
return err
}
var args []interface{}
if entry.binding == nil {
args = entry.Args
} else {
binding := &QueryInfo{
Id: info.preparedID,
Args: info.reqMeta.columns,
Rval: info.respMeta.columns,
}
args, err = entry.binding(binding)
if err != nil {
return err
}
}
if len(args) != len(info.reqMeta.columns) {
return ErrQueryArgLength
}
b.preparedID = info.preparedID
stmts[string(info.preparedID)] = entry.Stmt
b.values = make([]queryValues, len(info.reqMeta.columns))
for j := 0; j < len(info.reqMeta.columns); j++ {
val, err := Marshal(info.reqMeta.columns[j].TypeInfo, args[j])
if err != nil {
return err
}
b.values[j].value = val
// TODO: add names
}
} else {
b.statement = entry.Stmt
}
}
// TODO: should batch support tracing?
resp, err := c.exec(req, nil)
if err != nil {
return err
}
switch x := resp.(type) {
case *resultVoidFrame:
return nil
case *RequestErrUnprepared:
stmt, found := stmts[string(x.StatementId)]
if found {
stmtsLRU.Lock()
stmtsLRU.lru.Remove(c.addr + c.currentKeyspace + stmt)
stmtsLRU.Unlock()
}
if found {
return c.executeBatch(batch)
} else {
return x
}
case error:
return x
default:
return NewErrProtocol("Unknown type in response to batch statement: %s", x)
}
}
func (c *Conn) setKeepalive(d time.Duration) error {
if tc, ok := c.conn.(*net.TCPConn); ok {
err := tc.SetKeepAlivePeriod(d)
if err != nil {
return err
}
return tc.SetKeepAlive(true)
}
return nil
}
type inflightPrepare struct {
info *resultPreparedFrame
err error
wg sync.WaitGroup
}
var (
ErrQueryArgLength = errors.New("gocql: query argument length mismatch")
ErrTimeoutNoResponse = errors.New("gocql: no response recieved from cassandra within timeout period")
ErrTooManyTimeouts = errors.New("gocql: too many query timeouts on the connection")
ErrConnectionClosed = errors.New("gocql: connection closed waiting for response")
)

View file

@ -0,0 +1,769 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build all unit
package gocql
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
const (
defaultProto = protoVersion2
)
func TestJoinHostPort(t *testing.T) {
tests := map[string]string{
"127.0.0.1:0": JoinHostPort("127.0.0.1", 0),
"127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142),
"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0),
"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142),
}
for k, v := range tests {
if k != v {
t.Fatalf("expected '%v', got '%v'", k, v)
}
}
}
func TestSimple(t *testing.T) {
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
cluster := NewCluster(srv.Address)
cluster.ProtoVersion = int(defaultProto)
db, err := cluster.CreateSession()
if err != nil {
t.Errorf("0x%x: NewCluster: %v", defaultProto, err)
return
}
if err := db.Query("void").Exec(); err != nil {
t.Errorf("0x%x: %v", defaultProto, err)
}
}
func TestSSLSimple(t *testing.T) {
srv := NewSSLTestServer(t, defaultProto)
defer srv.Stop()
db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()
if err != nil {
t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
}
if err := db.Query("void").Exec(); err != nil {
t.Fatalf("0x%x: %v", defaultProto, err)
}
}
func TestSSLSimpleNoClientCert(t *testing.T) {
srv := NewSSLTestServer(t, defaultProto)
defer srv.Stop()
db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()
if err != nil {
t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
}
if err := db.Query("void").Exec(); err != nil {
t.Fatalf("0x%x: %v", defaultProto, err)
}
}
func createTestSslCluster(hosts string, proto uint8, useClientCert bool) *ClusterConfig {
cluster := NewCluster(hosts)
sslOpts := &SslOptions{
CaPath: "testdata/pki/ca.crt",
EnableHostVerification: false,
}
if useClientCert {
sslOpts.CertPath = "testdata/pki/gocql.crt"
sslOpts.KeyPath = "testdata/pki/gocql.key"
}
cluster.SslOpts = sslOpts
cluster.ProtoVersion = int(proto)
return cluster
}
func TestClosed(t *testing.T) {
t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis")
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
cluster := NewCluster(srv.Address)
cluster.ProtoVersion = int(defaultProto)
session, err := cluster.CreateSession()
defer session.Close()
if err != nil {
t.Errorf("0x%x: NewCluster: %v", defaultProto, err)
return
}
if err := session.Query("void").Exec(); err != ErrSessionClosed {
t.Errorf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err)
return
}
}
func newTestSession(addr string, proto uint8) (*Session, error) {
cluster := NewCluster(addr)
cluster.ProtoVersion = int(proto)
return cluster.CreateSession()
}
func TestTimeout(t *testing.T) {
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
db, err := newTestSession(srv.Address, defaultProto)
if err != nil {
t.Errorf("NewCluster: %v", err)
return
}
defer db.Close()
go func() {
<-time.After(2 * time.Second)
t.Errorf("no timeout")
}()
if err := db.Query("kill").Exec(); err == nil {
t.Errorf("expected error")
}
}
// TestQueryRetry will test to make sure that gocql will execute
// the exact amount of retry queries designated by the user.
func TestQueryRetry(t *testing.T) {
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
db, err := newTestSession(srv.Address, defaultProto)
if err != nil {
t.Fatalf("NewCluster: %v", err)
}
defer db.Close()
go func() {
<-time.After(5 * time.Second)
t.Fatalf("no timeout")
}()
rt := &SimpleRetryPolicy{NumRetries: 1}
qry := db.Query("kill").RetryPolicy(rt)
if err := qry.Exec(); err == nil {
t.Fatalf("expected error")
}
requests := atomic.LoadInt64(&srv.nKillReq)
attempts := qry.Attempts()
if requests != int64(attempts) {
t.Fatalf("expected requests %v to match query attemps %v", requests, attempts)
}
//Minus 1 from the requests variable since there is the initial query attempt
if requests-1 != int64(rt.NumRetries) {
t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1)
}
}
func TestSimplePoolRoundRobin(t *testing.T) {
servers := make([]*TestServer, 5)
addrs := make([]string, len(servers))
for n := 0; n < len(servers); n++ {
servers[n] = NewTestServer(t, defaultProto)
addrs[n] = servers[n].Address
defer servers[n].Stop()
}
cluster := NewCluster(addrs...)
cluster.ProtoVersion = defaultProto
db, err := cluster.CreateSession()
time.Sleep(1 * time.Second) // Sleep to allow the Cluster.fillPool to complete
if err != nil {
t.Fatalf("NewCluster: %v", err)
}
var wg sync.WaitGroup
wg.Add(5)
for n := 0; n < 5; n++ {
go func() {
for j := 0; j < 5; j++ {
if err := db.Query("void").Exec(); err != nil {
t.Fatal(err)
}
}
wg.Done()
}()
}
wg.Wait()
diff := 0
for n := 1; n < len(servers); n++ {
d := 0
if servers[n].nreq > servers[n-1].nreq {
d = int(servers[n].nreq - servers[n-1].nreq)
} else {
d = int(servers[n-1].nreq - servers[n].nreq)
}
if d > diff {
diff = d
}
}
if diff > 0 {
t.Errorf("Expected 0 difference in usage but was %d", diff)
}
}
func TestConnClosing(t *testing.T) {
t.Skip("Skipping until test can be ran reliably")
srv := NewTestServer(t, protoVersion2)
defer srv.Stop()
db, err := NewCluster(srv.Address).CreateSession()
if err != nil {
t.Errorf("NewCluster: %v", err)
}
defer db.Close()
numConns := db.cfg.NumConns
count := db.cfg.NumStreams * numConns
wg := &sync.WaitGroup{}
wg.Add(count)
for i := 0; i < count; i++ {
go func(wg *sync.WaitGroup) {
wg.Done()
db.Query("kill").Exec()
}(wg)
}
wg.Wait()
time.Sleep(1 * time.Second) //Sleep so the fillPool can complete.
pool := db.Pool.(ConnectionPool)
conns := pool.Size()
if conns != numConns {
t.Errorf("Expected to have %d connections but have %d", numConns, conns)
}
}
func TestStreams_Protocol1(t *testing.T) {
srv := NewTestServer(t, protoVersion1)
defer srv.Stop()
// TODO: these are more like session tests and should instead operate
// on a single Conn
cluster := NewCluster(srv.Address)
cluster.NumConns = 1
cluster.ProtoVersion = 1
db, err := cluster.CreateSession()
if err != nil {
t.Fatal(err)
}
defer db.Close()
var wg sync.WaitGroup
for i := 0; i < db.cfg.NumStreams; i++ {
// here were just validating that if we send NumStream request we get
// a response for every stream and the lengths for the queries are set
// correctly.
wg.Add(1)
go func() {
defer wg.Done()
if err := db.Query("void").Exec(); err != nil {
t.Error(err)
}
}()
}
wg.Wait()
}
func TestStreams_Protocol2(t *testing.T) {
srv := NewTestServer(t, protoVersion2)
defer srv.Stop()
// TODO: these are more like session tests and should instead operate
// on a single Conn
cluster := NewCluster(srv.Address)
cluster.NumConns = 1
cluster.ProtoVersion = 2
db, err := cluster.CreateSession()
if err != nil {
t.Fatal(err)
}
defer db.Close()
for i := 0; i < db.cfg.NumStreams; i++ {
// the test server processes each conn synchronously
// here were just validating that if we send NumStream request we get
// a response for every stream and the lengths for the queries are set
// correctly.
if err = db.Query("void").Exec(); err != nil {
t.Fatal(err)
}
}
}
func TestStreams_Protocol3(t *testing.T) {
srv := NewTestServer(t, protoVersion3)
defer srv.Stop()
// TODO: these are more like session tests and should instead operate
// on a single Conn
cluster := NewCluster(srv.Address)
cluster.NumConns = 1
cluster.ProtoVersion = 3
db, err := cluster.CreateSession()
if err != nil {
t.Fatal(err)
}
defer db.Close()
for i := 0; i < db.cfg.NumStreams; i++ {
// the test server processes each conn synchronously
// here were just validating that if we send NumStream request we get
// a response for every stream and the lengths for the queries are set
// correctly.
if err = db.Query("void").Exec(); err != nil {
t.Fatal(err)
}
}
}
func BenchmarkProtocolV3(b *testing.B) {
srv := NewTestServer(b, protoVersion3)
defer srv.Stop()
// TODO: these are more like session tests and should instead operate
// on a single Conn
cluster := NewCluster(srv.Address)
cluster.NumConns = 1
cluster.ProtoVersion = 3
db, err := cluster.CreateSession()
if err != nil {
b.Fatal(err)
}
defer db.Close()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if err = db.Query("void").Exec(); err != nil {
b.Fatal(err)
}
}
}
func TestRoundRobinConnPoolRoundRobin(t *testing.T) {
// create 5 test servers
servers := make([]*TestServer, 5)
addrs := make([]string, len(servers))
for n := 0; n < len(servers); n++ {
servers[n] = NewTestServer(t, defaultProto)
addrs[n] = servers[n].Address
defer servers[n].Stop()
}
// create a new cluster using the policy-based round robin conn pool
cluster := NewCluster(addrs...)
cluster.ConnPoolType = NewRoundRobinConnPool
db, err := cluster.CreateSession()
if err != nil {
t.Fatalf("failed to create a new session: %v", err)
}
// Sleep to allow the pool to fill
time.Sleep(100 * time.Millisecond)
// run concurrent queries against the pool, server usage should
// be even
var wg sync.WaitGroup
wg.Add(5)
for n := 0; n < 5; n++ {
go func() {
for j := 0; j < 5; j++ {
if err := db.Query("void").Exec(); err != nil {
t.Errorf("Query failed with error: %v", err)
}
}
wg.Done()
}()
}
wg.Wait()
db.Close()
// wait for the pool to drain
time.Sleep(100 * time.Millisecond)
size := db.Pool.Size()
if size != 0 {
t.Errorf("connection pool did not drain, still contains %d connections", size)
}
// verify that server usage is even
diff := 0
for n := 1; n < len(servers); n++ {
d := 0
if servers[n].nreq > servers[n-1].nreq {
d = int(servers[n].nreq - servers[n-1].nreq)
} else {
d = int(servers[n-1].nreq - servers[n].nreq)
}
if d > diff {
diff = d
}
}
if diff > 0 {
t.Errorf("expected 0 difference in usage but was %d", diff)
}
}
// This tests that the policy connection pool handles SSL correctly
func TestPolicyConnPoolSSL(t *testing.T) {
srv := NewSSLTestServer(t, defaultProto)
defer srv.Stop()
cluster := createTestSslCluster(srv.Address, defaultProto, true)
cluster.ConnPoolType = NewRoundRobinConnPool
db, err := cluster.CreateSession()
if err != nil {
t.Fatalf("failed to create new session: %v", err)
}
if err := db.Query("void").Exec(); err != nil {
t.Errorf("query failed due to error: %v", err)
}
db.Close()
// wait for the pool to drain
time.Sleep(100 * time.Millisecond)
size := db.Pool.Size()
if size != 0 {
t.Errorf("connection pool did not drain, still contains %d connections", size)
}
}
func TestQueryTimeout(t *testing.T) {
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
cluster := NewCluster(srv.Address)
// Set the timeout arbitrarily low so that the query hits the timeout in a
// timely manner.
cluster.Timeout = 1 * time.Millisecond
db, err := cluster.CreateSession()
if err != nil {
t.Errorf("NewCluster: %v", err)
}
defer db.Close()
ch := make(chan error, 1)
go func() {
err := db.Query("timeout").Exec()
if err != nil {
ch <- err
return
}
t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout)
}()
select {
case err := <-ch:
if err != ErrTimeoutNoResponse {
t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err)
}
case <-time.After(10*time.Millisecond + db.cfg.Timeout):
// ensure that the query goroutines have been scheduled
t.Fatalf("query did not timeout after %v", db.cfg.Timeout)
}
}
func TestQueryTimeoutReuseStream(t *testing.T) {
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
cluster := NewCluster(srv.Address)
// Set the timeout arbitrarily low so that the query hits the timeout in a
// timely manner.
cluster.Timeout = 1 * time.Millisecond
cluster.NumConns = 1
cluster.NumStreams = 1
db, err := cluster.CreateSession()
if err != nil {
t.Fatalf("NewCluster: %v", err)
}
defer db.Close()
db.Query("slow").Exec()
err = db.Query("void").Exec()
if err != nil {
t.Fatal(err)
}
}
func TestQueryTimeoutClose(t *testing.T) {
srv := NewTestServer(t, defaultProto)
defer srv.Stop()
cluster := NewCluster(srv.Address)
// Set the timeout arbitrarily low so that the query hits the timeout in a
// timely manner.
cluster.Timeout = 1000 * time.Millisecond
cluster.NumConns = 1
cluster.NumStreams = 1
db, err := cluster.CreateSession()
if err != nil {
t.Fatalf("NewCluster: %v", err)
}
ch := make(chan error)
go func() {
err := db.Query("timeout").Exec()
ch <- err
}()
// ensure that the above goroutine gets sheduled
time.Sleep(50 * time.Millisecond)
db.Close()
select {
case err = <-ch:
case <-time.After(1 * time.Second):
t.Fatal("timedout waiting to get a response once cluster is closed")
}
if err != ErrConnectionClosed {
t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err)
}
}
func NewTestServer(t testing.TB, protocol uint8) *TestServer {
laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
listen, err := net.ListenTCP("tcp", laddr)
if err != nil {
t.Fatal(err)
}
headerSize := 8
if protocol > protoVersion2 {
headerSize = 9
}
srv := &TestServer{
Address: listen.Addr().String(),
listen: listen,
t: t,
protocol: protocol,
headerSize: headerSize,
quit: make(chan struct{}),
}
go srv.serve()
return srv
}
func NewSSLTestServer(t testing.TB, protocol uint8) *TestServer {
pem, err := ioutil.ReadFile("testdata/pki/ca.crt")
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pem) {
t.Errorf("Failed parsing or appending certs")
}
mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key")
if err != nil {
t.Errorf("could not load cert")
}
config := &tls.Config{
Certificates: []tls.Certificate{mycert},
RootCAs: certPool,
}
listen, err := tls.Listen("tcp", "127.0.0.1:0", config)
if err != nil {
t.Fatal(err)
}
headerSize := 8
if protocol > protoVersion2 {
headerSize = 9
}
srv := &TestServer{
Address: listen.Addr().String(),
listen: listen,
t: t,
protocol: protocol,
headerSize: headerSize,
quit: make(chan struct{}),
}
go srv.serve()
return srv
}
type TestServer struct {
Address string
t testing.TB
nreq uint64
listen net.Listener
nKillReq int64
compressor Compressor
protocol byte
headerSize int
quit chan struct{}
}
func (srv *TestServer) serve() {
defer srv.listen.Close()
for {
conn, err := srv.listen.Accept()
if err != nil {
break
}
go func(conn net.Conn) {
defer conn.Close()
for {
framer, err := srv.readFrame(conn)
if err != nil {
if err == io.EOF {
return
}
srv.t.Error(err)
return
}
atomic.AddUint64(&srv.nreq, 1)
go srv.process(framer)
}
}(conn)
}
}
func (srv *TestServer) Stop() {
srv.listen.Close()
close(srv.quit)
}
func (srv *TestServer) process(f *framer) {
head := f.header
if head == nil {
srv.t.Error("process frame with a nil header")
return
}
switch head.op {
case opStartup:
f.writeHeader(0, opReady, head.stream)
case opOptions:
f.writeHeader(0, opSupported, head.stream)
f.writeShort(0)
case opQuery:
query := f.readLongString()
first := query
if n := strings.Index(query, " "); n > 0 {
first = first[:n]
}
switch strings.ToLower(first) {
case "kill":
atomic.AddInt64(&srv.nKillReq, 1)
f.writeHeader(0, opError, head.stream)
f.writeInt(0x1001)
f.writeString("query killed")
case "use":
f.writeInt(resultKindKeyspace)
f.writeString(strings.TrimSpace(query[3:]))
case "void":
f.writeHeader(0, opResult, head.stream)
f.writeInt(resultKindVoid)
case "timeout":
<-srv.quit
return
case "slow":
go func() {
f.writeHeader(0, opResult, head.stream)
f.writeInt(resultKindVoid)
f.wbuf[0] = srv.protocol | 0x80
select {
case <-srv.quit:
case <-time.After(50 * time.Millisecond):
f.finishWrite()
}
}()
return
default:
f.writeHeader(0, opResult, head.stream)
f.writeInt(resultKindVoid)
}
default:
f.writeHeader(0, opError, head.stream)
f.writeInt(0)
f.writeString("not supported")
}
f.wbuf[0] = srv.protocol | 0x80
if err := f.finishWrite(); err != nil {
srv.t.Error(err)
}
}
func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {
buf := make([]byte, srv.headerSize)
head, err := readHeader(conn, buf)
if err != nil {
return nil, err
}
framer := newFramer(conn, conn, nil, srv.protocol)
err = framer.readFrame(&head)
if err != nil {
return nil, err
}
// should be a request frame
if head.version.response() {
return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version)
} else if head.version.version() != srv.protocol {
return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version())
}
return framer, nil
}

View file

@ -0,0 +1,895 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"sync"
"time"
)
/*ConnectionPool represents the interface gocql will use to work with a collection of connections.
Purpose
The connection pool in gocql opens and closes connections as well as selects an available connection
for gocql to execute a query against. The pool is also respnsible for handling connection errors that
are caught by the connection experiencing the error.
A connection pool should make a copy of the variables used from the ClusterConfig provided to the pool
upon creation. ClusterConfig is a pointer and can be modified after the creation of the pool. This can
lead to issues with variables being modified outside the expectations of the ConnectionPool type.
Example of Single Connection Pool:
type SingleConnection struct {
conn *Conn
cfg *ClusterConfig
}
func NewSingleConnection(cfg *ClusterConfig) ConnectionPool {
addr := JoinHostPort(cfg.Hosts[0], cfg.Port)
connCfg := ConnConfig{
ProtoVersion: cfg.ProtoVersion,
CQLVersion: cfg.CQLVersion,
Timeout: cfg.Timeout,
NumStreams: cfg.NumStreams,
Compressor: cfg.Compressor,
Authenticator: cfg.Authenticator,
Keepalive: cfg.SocketKeepalive,
}
pool := SingleConnection{cfg:cfg}
pool.conn = Connect(addr,connCfg,pool)
return &pool
}
func (s *SingleConnection) HandleError(conn *Conn, err error, closed bool) {
if closed {
connCfg := ConnConfig{
ProtoVersion: cfg.ProtoVersion,
CQLVersion: cfg.CQLVersion,
Timeout: cfg.Timeout,
NumStreams: cfg.NumStreams,
Compressor: cfg.Compressor,
Authenticator: cfg.Authenticator,
Keepalive: cfg.SocketKeepalive,
}
s.conn = Connect(conn.Address(),connCfg,s)
}
}
func (s *SingleConnection) Pick(qry *Query) *Conn {
if s.conn.isClosed {
return nil
}
return s.conn
}
func (s *SingleConnection) Size() int {
return 1
}
func (s *SingleConnection) Close() {
s.conn.Close()
}
This is a very simple example of a type that exposes the connection pool interface. To assign
this type as the connection pool to use you would assign it to the ClusterConfig like so:
cluster := NewCluster("127.0.0.1")
cluster.ConnPoolType = NewSingleConnection
...
session, err := cluster.CreateSession()
To see a more complete example of a ConnectionPool implementation please see the SimplePool type.
*/
type ConnectionPool interface {
SetHosts
Pick(*Query) *Conn
Size() int
Close()
}
// interface to implement to receive the host information
type SetHosts interface {
SetHosts(hosts []HostInfo)
}
// interface to implement to receive the partitioner value
type SetPartitioner interface {
SetPartitioner(partitioner string)
}
//NewPoolFunc is the type used by ClusterConfig to create a pool of a specific type.
type NewPoolFunc func(*ClusterConfig) (ConnectionPool, error)
//SimplePool is the current implementation of the connection pool inside gocql. This
//pool is meant to be a simple default used by gocql so users can get up and running
//quickly.
type SimplePool struct {
cfg *ClusterConfig
hostPool *RoundRobin
connPool map[string]*RoundRobin
conns map[*Conn]struct{}
keyspace string
hostMu sync.RWMutex
// this is the set of current hosts which the pool will attempt to connect to
hosts map[string]*HostInfo
// protects hostpool, connPoll, conns, quit
mu sync.Mutex
cFillingPool chan int
quit bool
quitWait chan bool
quitOnce sync.Once
tlsConfig *tls.Config
}
func setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) {
// ca cert is optional
if sslOpts.CaPath != "" {
if sslOpts.RootCAs == nil {
sslOpts.RootCAs = x509.NewCertPool()
}
pem, err := ioutil.ReadFile(sslOpts.CaPath)
if err != nil {
return nil, fmt.Errorf("connectionpool: unable to open CA certs: %v", err)
}
if !sslOpts.RootCAs.AppendCertsFromPEM(pem) {
return nil, errors.New("connectionpool: failed parsing or CA certs")
}
}
if sslOpts.CertPath != "" || sslOpts.KeyPath != "" {
mycert, err := tls.LoadX509KeyPair(sslOpts.CertPath, sslOpts.KeyPath)
if err != nil {
return nil, fmt.Errorf("connectionpool: unable to load X509 key pair: %v", err)
}
sslOpts.Certificates = append(sslOpts.Certificates, mycert)
}
sslOpts.InsecureSkipVerify = !sslOpts.EnableHostVerification
return &sslOpts.Config, nil
}
//NewSimplePool is the function used by gocql to create the simple connection pool.
//This is the default if no other pool type is specified.
func NewSimplePool(cfg *ClusterConfig) (ConnectionPool, error) {
pool := &SimplePool{
cfg: cfg,
hostPool: NewRoundRobin(),
connPool: make(map[string]*RoundRobin),
conns: make(map[*Conn]struct{}),
quitWait: make(chan bool),
cFillingPool: make(chan int, 1),
keyspace: cfg.Keyspace,
hosts: make(map[string]*HostInfo),
}
for _, host := range cfg.Hosts {
// seed hosts have unknown topology
// TODO: Handle populating this during SetHosts
pool.hosts[host] = &HostInfo{Peer: host}
}
if cfg.SslOpts != nil {
config, err := setupTLSConfig(cfg.SslOpts)
if err != nil {
return nil, err
}
pool.tlsConfig = config
}
//Walk through connecting to hosts. As soon as one host connects
//defer the remaining connections to cluster.fillPool()
for i := 0; i < len(cfg.Hosts); i++ {
addr := JoinHostPort(cfg.Hosts[i], cfg.Port)
if pool.connect(addr) == nil {
pool.cFillingPool <- 1
go pool.fillPool()
break
}
}
return pool, nil
}
func (c *SimplePool) connect(addr string) error {
cfg := ConnConfig{
ProtoVersion: c.cfg.ProtoVersion,
CQLVersion: c.cfg.CQLVersion,
Timeout: c.cfg.Timeout,
NumStreams: c.cfg.NumStreams,
Compressor: c.cfg.Compressor,
Authenticator: c.cfg.Authenticator,
Keepalive: c.cfg.SocketKeepalive,
tlsConfig: c.tlsConfig,
}
conn, err := Connect(addr, cfg, c)
if err != nil {
log.Printf("connect: failed to connect to %q: %v", addr, err)
return err
}
return c.addConn(conn)
}
func (c *SimplePool) addConn(conn *Conn) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.quit {
conn.Close()
return nil
}
//Set the connection's keyspace if any before adding it to the pool
if c.keyspace != "" {
if err := conn.UseKeyspace(c.keyspace); err != nil {
log.Printf("error setting connection keyspace. %v", err)
conn.Close()
return err
}
}
connPool := c.connPool[conn.Address()]
if connPool == nil {
connPool = NewRoundRobin()
c.connPool[conn.Address()] = connPool
c.hostPool.AddNode(connPool)
}
connPool.AddNode(conn)
c.conns[conn] = struct{}{}
return nil
}
//fillPool manages the pool of connections making sure that each host has the correct
//amount of connections defined. Also the method will test a host with one connection
//instead of flooding the host with number of connections defined in the cluster config
func (c *SimplePool) fillPool() {
//Debounce large amounts of requests to fill pool
select {
case <-time.After(1 * time.Millisecond):
return
case <-c.cFillingPool:
defer func() { c.cFillingPool <- 1 }()
}
c.mu.Lock()
isClosed := c.quit
c.mu.Unlock()
//Exit if cluster(session) is closed
if isClosed {
return
}
c.hostMu.RLock()
//Walk through list of defined hosts
var wg sync.WaitGroup
for host := range c.hosts {
addr := JoinHostPort(host, c.cfg.Port)
numConns := 1
//See if the host already has connections in the pool
c.mu.Lock()
conns, ok := c.connPool[addr]
c.mu.Unlock()
if ok {
//if the host has enough connections just exit
numConns = conns.Size()
if numConns >= c.cfg.NumConns {
continue
}
} else {
//See if the host is reachable
if err := c.connect(addr); err != nil {
continue
}
}
//This is reached if the host is responsive and needs more connections
//Create connections for host synchronously to mitigate flooding the host.
wg.Add(1)
go func(a string, conns int) {
defer wg.Done()
for ; conns < c.cfg.NumConns; conns++ {
c.connect(a)
}
}(addr, numConns)
}
c.hostMu.RUnlock()
//Wait until we're finished connecting to each host before returning
wg.Wait()
}
// Should only be called if c.mu is locked
func (c *SimplePool) removeConnLocked(conn *Conn) {
conn.Close()
connPool := c.connPool[conn.addr]
if connPool == nil {
return
}
connPool.RemoveNode(conn)
if connPool.Size() == 0 {
c.hostPool.RemoveNode(connPool)
delete(c.connPool, conn.addr)
}
delete(c.conns, conn)
}
func (c *SimplePool) removeConn(conn *Conn) {
c.mu.Lock()
defer c.mu.Unlock()
c.removeConnLocked(conn)
}
//HandleError is called by a Connection object to report to the pool an error has occured.
//Logic is then executed within the pool to clean up the erroroneous connection and try to
//top off the pool.
func (c *SimplePool) HandleError(conn *Conn, err error, closed bool) {
if !closed {
// ignore all non-fatal errors
return
}
c.removeConn(conn)
c.mu.Lock()
poolClosed := c.quit
c.mu.Unlock()
if !poolClosed {
go c.fillPool() // top off pool.
}
}
//Pick selects a connection to be used by the query.
func (c *SimplePool) Pick(qry *Query) *Conn {
//Check if connections are available
c.mu.Lock()
conns := len(c.conns)
c.mu.Unlock()
if conns == 0 {
//try to populate the pool before returning.
c.fillPool()
}
return c.hostPool.Pick(qry)
}
//Size returns the number of connections currently active in the pool
func (p *SimplePool) Size() int {
p.mu.Lock()
conns := len(p.conns)
p.mu.Unlock()
return conns
}
//Close kills the pool and all associated connections.
func (c *SimplePool) Close() {
c.quitOnce.Do(func() {
c.mu.Lock()
defer c.mu.Unlock()
c.quit = true
close(c.quitWait)
for conn := range c.conns {
c.removeConnLocked(conn)
}
})
}
func (c *SimplePool) SetHosts(hosts []HostInfo) {
c.hostMu.Lock()
toRemove := make(map[string]struct{})
for k := range c.hosts {
toRemove[k] = struct{}{}
}
for _, host := range hosts {
host := host
delete(toRemove, host.Peer)
// we already have it
if _, ok := c.hosts[host.Peer]; ok {
// TODO: Check rack, dc, token range is consistent, trigger topology change
// update stored host
continue
}
c.hosts[host.Peer] = &host
}
// can we hold c.mu whilst iterating this loop?
for addr := range toRemove {
c.removeHostLocked(addr)
}
c.hostMu.Unlock()
c.fillPool()
}
func (c *SimplePool) removeHostLocked(addr string) {
if _, ok := c.hosts[addr]; !ok {
return
}
delete(c.hosts, addr)
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.connPool[addr]; !ok {
return
}
for conn := range c.conns {
if conn.Address() == addr {
c.removeConnLocked(conn)
}
}
}
//NewRoundRobinConnPool creates a connection pool which selects hosts by
//round-robin, and then selects a connection for that host by round-robin.
func NewRoundRobinConnPool(cfg *ClusterConfig) (ConnectionPool, error) {
return NewPolicyConnPool(
cfg,
NewRoundRobinHostPolicy(),
NewRoundRobinConnPolicy,
)
}
//NewTokenAwareConnPool creates a connection pool which selects hosts by
//a token aware policy, and then selects a connection for that host by
//round-robin.
func NewTokenAwareConnPool(cfg *ClusterConfig) (ConnectionPool, error) {
return NewPolicyConnPool(
cfg,
NewTokenAwareHostPolicy(NewRoundRobinHostPolicy()),
NewRoundRobinConnPolicy,
)
}
type policyConnPool struct {
port int
numConns int
connCfg ConnConfig
keyspace string
mu sync.RWMutex
hostPolicy HostSelectionPolicy
connPolicy func() ConnSelectionPolicy
hostConnPools map[string]*hostConnPool
}
//Creates a policy based connection pool. This func isn't meant to be directly
//used as a NewPoolFunc in ClusterConfig, instead a func should be created
//which satisfies the NewPoolFunc type, which calls this func with the desired
//hostPolicy and connPolicy; see NewRoundRobinConnPool or NewTokenAwareConnPool
//for examples.
func NewPolicyConnPool(
cfg *ClusterConfig,
hostPolicy HostSelectionPolicy,
connPolicy func() ConnSelectionPolicy,
) (ConnectionPool, error) {
var err error
var tlsConfig *tls.Config
if cfg.SslOpts != nil {
tlsConfig, err = setupTLSConfig(cfg.SslOpts)
if err != nil {
return nil, err
}
}
// create the pool
pool := &policyConnPool{
port: cfg.Port,
numConns: cfg.NumConns,
connCfg: ConnConfig{
ProtoVersion: cfg.ProtoVersion,
CQLVersion: cfg.CQLVersion,
Timeout: cfg.Timeout,
NumStreams: cfg.NumStreams,
Compressor: cfg.Compressor,
Authenticator: cfg.Authenticator,
Keepalive: cfg.SocketKeepalive,
tlsConfig: tlsConfig,
},
keyspace: cfg.Keyspace,
hostPolicy: hostPolicy,
connPolicy: connPolicy,
hostConnPools: map[string]*hostConnPool{},
}
hosts := make([]HostInfo, len(cfg.Hosts))
for i, hostAddr := range cfg.Hosts {
hosts[i].Peer = hostAddr
}
pool.SetHosts(hosts)
return pool, nil
}
func (p *policyConnPool) SetHosts(hosts []HostInfo) {
p.mu.Lock()
toRemove := make(map[string]struct{})
for addr := range p.hostConnPools {
toRemove[addr] = struct{}{}
}
// TODO connect to hosts in parallel, but wait for pools to be
// created before returning
for i := range hosts {
pool, exists := p.hostConnPools[hosts[i].Peer]
if !exists {
// create a connection pool for the host
pool = newHostConnPool(
hosts[i].Peer,
p.port,
p.numConns,
p.connCfg,
p.keyspace,
p.connPolicy(),
)
p.hostConnPools[hosts[i].Peer] = pool
} else {
// still have this host, so don't remove it
delete(toRemove, hosts[i].Peer)
}
}
for addr := range toRemove {
pool := p.hostConnPools[addr]
delete(p.hostConnPools, addr)
pool.Close()
}
// update the policy
p.hostPolicy.SetHosts(hosts)
p.mu.Unlock()
}
func (p *policyConnPool) SetPartitioner(partitioner string) {
p.hostPolicy.SetPartitioner(partitioner)
}
func (p *policyConnPool) Size() int {
p.mu.RLock()
count := 0
for _, pool := range p.hostConnPools {
count += pool.Size()
}
p.mu.RUnlock()
return count
}
func (p *policyConnPool) Pick(qry *Query) *Conn {
nextHost := p.hostPolicy.Pick(qry)
p.mu.RLock()
var host *HostInfo
var conn *Conn
for conn == nil {
host = nextHost()
if host == nil {
break
}
conn = p.hostConnPools[host.Peer].Pick(qry)
}
p.mu.RUnlock()
return conn
}
func (p *policyConnPool) Close() {
p.mu.Lock()
// remove the hosts from the policy
p.hostPolicy.SetHosts([]HostInfo{})
// close the pools
for addr, pool := range p.hostConnPools {
delete(p.hostConnPools, addr)
pool.Close()
}
p.mu.Unlock()
}
// hostConnPool is a connection pool for a single host.
// Connection selection is based on a provided ConnSelectionPolicy
type hostConnPool struct {
host string
port int
addr string
size int
connCfg ConnConfig
keyspace string
policy ConnSelectionPolicy
// protection for conns, closed, filling
mu sync.RWMutex
conns []*Conn
closed bool
filling bool
}
func newHostConnPool(
host string,
port int,
size int,
connCfg ConnConfig,
keyspace string,
policy ConnSelectionPolicy,
) *hostConnPool {
pool := &hostConnPool{
host: host,
port: port,
addr: JoinHostPort(host, port),
size: size,
connCfg: connCfg,
keyspace: keyspace,
policy: policy,
conns: make([]*Conn, 0, size),
filling: false,
closed: false,
}
// fill the pool with the initial connections before returning
pool.fill()
return pool
}
// Pick a connection from this connection pool for the given query.
func (pool *hostConnPool) Pick(qry *Query) *Conn {
pool.mu.RLock()
if pool.closed {
pool.mu.RUnlock()
return nil
}
empty := len(pool.conns) == 0
pool.mu.RUnlock()
if empty {
// try to fill the empty pool
go pool.fill()
return nil
}
return pool.policy.Pick(qry)
}
//Size returns the number of connections currently active in the pool
func (pool *hostConnPool) Size() int {
pool.mu.RLock()
defer pool.mu.RUnlock()
return len(pool.conns)
}
//Close the connection pool
func (pool *hostConnPool) Close() {
pool.mu.Lock()
defer pool.mu.Unlock()
if pool.closed {
return
}
pool.closed = true
// drain, but don't wait
go pool.drain()
}
// Fill the connection pool
func (pool *hostConnPool) fill() {
pool.mu.RLock()
// avoid filling a closed pool, or concurrent filling
if pool.closed || pool.filling {
pool.mu.RUnlock()
return
}
// determine the filling work to be done
startCount := len(pool.conns)
fillCount := pool.size - startCount
// avoid filling a full (or overfull) pool
if fillCount <= 0 {
pool.mu.RUnlock()
return
}
// switch from read to write lock
pool.mu.RUnlock()
pool.mu.Lock()
// double check everything since the lock was released
startCount = len(pool.conns)
fillCount = pool.size - startCount
if pool.closed || pool.filling || fillCount <= 0 {
// looks like another goroutine already beat this
// goroutine to the filling
pool.mu.Unlock()
return
}
// ok fill the pool
pool.filling = true
// allow others to access the pool while filling
pool.mu.Unlock()
// only this goroutine should make calls to fill/empty the pool at this
// point until after this routine or its subordinates calls
// fillingStopped
// fill only the first connection synchronously
if startCount == 0 {
err := pool.connect()
pool.logConnectErr(err)
if err != nil {
// probably unreachable host
go pool.fillingStopped()
return
}
// filled one
fillCount--
// connect all connections to this host in sync
for fillCount > 0 {
err := pool.connect()
pool.logConnectErr(err)
// decrement, even on error
fillCount--
}
go pool.fillingStopped()
return
}
// fill the rest of the pool asynchronously
go func() {
for fillCount > 0 {
err := pool.connect()
pool.logConnectErr(err)
// decrement, even on error
fillCount--
}
// mark the end of filling
pool.fillingStopped()
}()
}
func (pool *hostConnPool) logConnectErr(err error) {
if opErr, ok := err.(*net.OpError); ok && (opErr.Op == "dial" || opErr.Op == "read") {
// connection refused
// these are typical during a node outage so avoid log spam.
} else if err != nil {
// unexpected error
log.Printf("error: failed to connect to %s due to error: %v", pool.addr, err)
}
}
// transition back to a not-filling state.
func (pool *hostConnPool) fillingStopped() {
// wait for some time to avoid back-to-back filling
// this provides some time between failed attempts
// to fill the pool for the host to recover
time.Sleep(time.Duration(rand.Int31n(100)+31) * time.Millisecond)
pool.mu.Lock()
pool.filling = false
pool.mu.Unlock()
}
// create a new connection to the host and add it to the pool
func (pool *hostConnPool) connect() error {
// try to connect
conn, err := Connect(pool.addr, pool.connCfg, pool)
if err != nil {
return err
}
if pool.keyspace != "" {
// set the keyspace
if err := conn.UseKeyspace(pool.keyspace); err != nil {
conn.Close()
return err
}
}
// add the Conn to the pool
pool.mu.Lock()
defer pool.mu.Unlock()
if pool.closed {
conn.Close()
return nil
}
pool.conns = append(pool.conns, conn)
pool.policy.SetConns(pool.conns)
return nil
}
// handle any error from a Conn
func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {
if !closed {
// still an open connection, so continue using it
return
}
pool.mu.Lock()
defer pool.mu.Unlock()
if pool.closed {
// pool closed
return
}
// find the connection index
for i, candidate := range pool.conns {
if candidate == conn {
// remove the connection, not preserving order
pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1]
// update the policy
pool.policy.SetConns(pool.conns)
// lost a connection, so fill the pool
go pool.fill()
break
}
}
}
// removes and closes all connections from the pool
func (pool *hostConnPool) drain() {
pool.mu.Lock()
defer pool.mu.Unlock()
// empty the pool
conns := pool.conns
pool.conns = pool.conns[:0]
// update the policy
pool.policy.SetConns(pool.conns)
// close the connections
for _, conn := range conns {
conn.Close()
}
}

9
Godeps/_workspace/src/github.com/gocql/gocql/doc.go generated vendored Normal file
View file

@ -0,0 +1,9 @@
// Copyright (c) 2012-2015 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gocql implements a fast and robust Cassandra driver for the
// Go programming language.
package gocql
// TODO(tux21b): write more docs.

88
Godeps/_workspace/src/github.com/gocql/gocql/errors.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
package gocql
import "fmt"
const (
errServer = 0x0000
errProtocol = 0x000A
errCredentials = 0x0100
errUnavailable = 0x1000
errOverloaded = 0x1001
errBootstrapping = 0x1002
errTruncate = 0x1003
errWriteTimeout = 0x1100
errReadTimeout = 0x1200
errSyntax = 0x2000
errUnauthorized = 0x2100
errInvalid = 0x2200
errConfig = 0x2300
errAlreadyExists = 0x2400
errUnprepared = 0x2500
)
type RequestError interface {
Code() int
Message() string
Error() string
}
type errorFrame struct {
frameHeader
code int
message string
}
func (e errorFrame) Code() int {
return e.code
}
func (e errorFrame) Message() string {
return e.message
}
func (e errorFrame) Error() string {
return e.Message()
}
func (e errorFrame) String() string {
return fmt.Sprintf("[error code=%x message=%q]", e.code, e.message)
}
type RequestErrUnavailable struct {
errorFrame
Consistency Consistency
Required int
Alive int
}
func (e *RequestErrUnavailable) String() string {
return fmt.Sprintf("[request_error_unavailable consistency=%s required=%d alive=%d]", e.Consistency, e.Required, e.Alive)
}
type RequestErrWriteTimeout struct {
errorFrame
Consistency Consistency
Received int
BlockFor int
WriteType string
}
type RequestErrReadTimeout struct {
errorFrame
Consistency Consistency
Received int
BlockFor int
DataPresent byte
}
type RequestErrAlreadyExists struct {
errorFrame
Keyspace string
Table string
}
type RequestErrUnprepared struct {
errorFrame
StatementId []byte
}

View file

@ -0,0 +1,29 @@
// +build all integration
package gocql
import (
"testing"
)
func TestErrorsParse(t *testing.T) {
session := createSession(t)
defer session.Close()
if err := createTable(session, `CREATE TABLE errors_parse (id int primary key)`); err != nil {
t.Fatal("create:", err)
}
if err := createTable(session, `CREATE TABLE errors_parse (id int primary key)`); err == nil {
t.Fatal("Should have gotten already exists error from cassandra server.")
} else {
switch e := err.(type) {
case *RequestErrAlreadyExists:
if e.Table != "errors_parse" {
t.Fatal("Failed to parse error response from cassandra for ErrAlreadyExists.")
}
default:
t.Fatal("Failed to parse error response from cassandra for ErrAlreadyExists.")
}
}
}

1480
Godeps/_workspace/src/github.com/gocql/gocql/frame.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,98 @@
package gocql
import (
"bytes"
"testing"
)
func TestFuzzBugs(t *testing.T) {
// these inputs are found using go-fuzz (https://github.com/dvyukov/go-fuzz)
// and should cause a panic unless fixed.
tests := [][]byte{
[]byte("00000\xa0000"),
[]byte("\x8000\x0e\x00\x00\x00\x000"),
[]byte("\x8000\x00\x00\x00\x00\t0000000000"),
[]byte("\xa0\xff\x01\xae\xefqE\xf2\x1a"),
[]byte("\x8200\b\x00\x00\x00c\x00\x00\x00\x02000\x01\x00\x00\x00\x03" +
"\x00\n0000000000\x00\x14000000" +
"00000000000000\x00\x020000" +
"\x00\a000000000\x00\x050000000" +
"\xff0000000000000000000" +
"0000000"),
[]byte("\x82\xe600\x00\x00\x00\x000"),
[]byte("\x8200\b\x00\x00\x00\b0\x00\x00\x00\x040000"),
[]byte("\x8200\x00\x00\x00\x00\x100\x00\x00\x12\x00\x00\x0000000" +
"00000"),
[]byte("\x83000\b\x00\x00\x00\x14\x00\x00\x00\x020000000" +
"000000000"),
[]byte("\x83000\b\x00\x00\x000\x00\x00\x00\x04\x00\x1000000" +
"00000000000000e00000" +
"000\x800000000000000000" +
"0000000000000"),
}
for i, test := range tests {
t.Logf("test %d input: %q", i, test)
var bw bytes.Buffer
r := bytes.NewReader(test)
head, err := readHeader(r, make([]byte, 9))
if err != nil {
continue
}
framer := newFramer(r, &bw, nil, byte(head.version))
err = framer.readFrame(&head)
if err != nil {
continue
}
_, err = framer.parseFrame()
if err != nil {
continue
}
t.Errorf("(%d) expected to fail for input %q", i, test)
}
}
func TestFrameWriteTooLong(t *testing.T) {
w := &bytes.Buffer{}
framer := newFramer(nil, w, nil, 2)
framer.writeHeader(0, opStartup, 1)
framer.writeBytes(make([]byte, maxFrameSize+1))
err := framer.finishWrite()
if err != ErrFrameTooBig {
t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err)
}
}
func TestFrameReadTooLong(t *testing.T) {
r := &bytes.Buffer{}
r.Write(make([]byte, maxFrameSize+1))
// write a new header right after this frame to verify that we can read it
r.Write([]byte{0x02, 0x00, 0x00, opReady, 0x00, 0x00, 0x00, 0x00})
framer := newFramer(r, nil, nil, 2)
head := frameHeader{
version: 2,
op: opReady,
length: r.Len() - 8,
}
err := framer.readFrame(&head)
if err != ErrFrameTooBig {
t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err)
}
head, err = readHeader(r, make([]byte, 8))
if err != nil {
t.Fatal(err)
}
if head.op != opReady {
t.Fatalf("expected to get header %v got %v", opReady, head.op)
}
}

33
Godeps/_workspace/src/github.com/gocql/gocql/fuzz.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
// +build gofuzz
package gocql
import "bytes"
func Fuzz(data []byte) int {
var bw bytes.Buffer
r := bytes.NewReader(data)
head, err := readHeader(r, make([]byte, 9))
if err != nil {
return 0
}
framer := newFramer(r, &bw, nil, byte(head.version))
err = framer.readFrame(&head)
if err != nil {
return 0
}
frame, err := framer.parseFrame()
if err != nil {
return 0
}
if frame != nil {
return 1
}
return 2
}

180
Godeps/_workspace/src/github.com/gocql/gocql/helpers.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"math/big"
"reflect"
"strings"
"time"
"speter.net/go/exp/math/dec/inf"
)
type RowData struct {
Columns []string
Values []interface{}
}
func goType(t TypeInfo) reflect.Type {
switch t.Type() {
case TypeVarchar, TypeAscii, TypeInet:
return reflect.TypeOf(*new(string))
case TypeBigInt, TypeCounter:
return reflect.TypeOf(*new(int64))
case TypeTimestamp:
return reflect.TypeOf(*new(time.Time))
case TypeBlob:
return reflect.TypeOf(*new([]byte))
case TypeBoolean:
return reflect.TypeOf(*new(bool))
case TypeFloat:
return reflect.TypeOf(*new(float32))
case TypeDouble:
return reflect.TypeOf(*new(float64))
case TypeInt:
return reflect.TypeOf(*new(int))
case TypeDecimal:
return reflect.TypeOf(*new(*inf.Dec))
case TypeUUID, TypeTimeUUID:
return reflect.TypeOf(*new(UUID))
case TypeList, TypeSet:
return reflect.SliceOf(goType(t.(CollectionType).Elem))
case TypeMap:
return reflect.MapOf(goType(t.(CollectionType).Key), goType(t.(CollectionType).Elem))
case TypeVarint:
return reflect.TypeOf(*new(*big.Int))
case TypeTuple:
// what can we do here? all there is to do is to make a list of interface{}
tuple := t.(TupleTypeInfo)
return reflect.TypeOf(make([]interface{}, len(tuple.Elems)))
default:
return nil
}
}
func dereference(i interface{}) interface{} {
return reflect.Indirect(reflect.ValueOf(i)).Interface()
}
func getApacheCassandraType(class string) Type {
switch strings.TrimPrefix(class, apacheCassandraTypePrefix) {
case "AsciiType":
return TypeAscii
case "LongType":
return TypeBigInt
case "BytesType":
return TypeBlob
case "BooleanType":
return TypeBoolean
case "CounterColumnType":
return TypeCounter
case "DecimalType":
return TypeDecimal
case "DoubleType":
return TypeDouble
case "FloatType":
return TypeFloat
case "Int32Type":
return TypeInt
case "DateType", "TimestampType":
return TypeTimestamp
case "UUIDType":
return TypeUUID
case "UTF8Type":
return TypeVarchar
case "IntegerType":
return TypeVarint
case "TimeUUIDType":
return TypeTimeUUID
case "InetAddressType":
return TypeInet
case "MapType":
return TypeMap
case "ListType":
return TypeList
case "SetType":
return TypeSet
case "TupleType":
return TypeTuple
default:
return TypeCustom
}
}
func (r *RowData) rowMap(m map[string]interface{}) {
for i, column := range r.Columns {
val := dereference(r.Values[i])
if valVal := reflect.ValueOf(val); valVal.Kind() == reflect.Slice {
valCopy := reflect.MakeSlice(valVal.Type(), valVal.Len(), valVal.Cap())
reflect.Copy(valCopy, valVal)
m[column] = valCopy.Interface()
} else {
m[column] = val
}
}
}
func (iter *Iter) RowData() (RowData, error) {
if iter.err != nil {
return RowData{}, iter.err
}
columns := make([]string, 0)
values := make([]interface{}, 0)
for _, column := range iter.Columns() {
val := column.TypeInfo.New()
columns = append(columns, column.Name)
values = append(values, val)
}
rowData := RowData{
Columns: columns,
Values: values,
}
return rowData, nil
}
// SliceMap is a helper function to make the API easier to use
// returns the data from the query in the form of []map[string]interface{}
func (iter *Iter) SliceMap() ([]map[string]interface{}, error) {
if iter.err != nil {
return nil, iter.err
}
// Not checking for the error because we just did
rowData, _ := iter.RowData()
dataToReturn := make([]map[string]interface{}, 0)
for iter.Scan(rowData.Values...) {
m := make(map[string]interface{})
rowData.rowMap(m)
dataToReturn = append(dataToReturn, m)
}
if iter.err != nil {
return nil, iter.err
}
return dataToReturn, nil
}
// MapScan takes a map[string]interface{} and populates it with a row
// That is returned from cassandra.
func (iter *Iter) MapScan(m map[string]interface{}) bool {
if iter.err != nil {
return false
}
// Not checking for the error because we just did
rowData, _ := iter.RowData()
for i, col := range rowData.Columns {
if dest, ok := m[col]; ok {
rowData.Values[i] = dest
}
}
if iter.Scan(rowData.Values...) {
rowData.rowMap(m)
return true
}
return false
}

View file

@ -0,0 +1,119 @@
package gocql
import (
"log"
"net"
"time"
)
type HostInfo struct {
Peer string
DataCenter string
Rack string
HostId string
Tokens []string
}
// Polls system.peers at a specific interval to find new hosts
type ringDescriber struct {
dcFilter string
rackFilter string
prevHosts []HostInfo
prevPartitioner string
session *Session
closeChan chan bool
}
func (r *ringDescriber) GetHosts() (
hosts []HostInfo,
partitioner string,
err error,
) {
// we need conn to be the same because we need to query system.peers and system.local
// on the same node to get the whole cluster
conn := r.session.Pool.Pick(nil)
if conn == nil {
return r.prevHosts, r.prevPartitioner, nil
}
query := r.session.Query("SELECT data_center, rack, host_id, tokens, partitioner FROM system.local")
iter := conn.executeQuery(query)
host := HostInfo{}
iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens, &partitioner)
if err = iter.Close(); err != nil {
return nil, "", err
}
addr, _, err := net.SplitHostPort(conn.Address())
if err != nil {
// this should not happen, ever, as this is the address that was dialed by conn, here
// a panic makes sense, please report a bug if it occurs.
panic(err)
}
host.Peer = addr
hosts = []HostInfo{host}
query = r.session.Query("SELECT peer, data_center, rack, host_id, tokens FROM system.peers")
iter = conn.executeQuery(query)
host = HostInfo{}
for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
if r.matchFilter(&host) {
hosts = append(hosts, host)
}
host = HostInfo{}
}
if err = iter.Close(); err != nil {
return nil, "", err
}
r.prevHosts = hosts
r.prevPartitioner = partitioner
return hosts, partitioner, nil
}
func (r *ringDescriber) matchFilter(host *HostInfo) bool {
if r.dcFilter != "" && r.dcFilter != host.DataCenter {
return false
}
if r.rackFilter != "" && r.rackFilter != host.Rack {
return false
}
return true
}
func (h *ringDescriber) run(sleep time.Duration) {
if sleep == 0 {
sleep = 30 * time.Second
}
for {
select {
case <-time.After(sleep):
// if we have 0 hosts this will return the previous list of hosts to
// attempt to reconnect to the cluster otherwise we would never find
// downed hosts again, could possibly have an optimisation to only
// try to add new hosts if GetHosts didnt error and the hosts didnt change.
hosts, partitioner, err := h.GetHosts()
if err != nil {
log.Println("RingDescriber: unable to get ring topology:", err)
} else {
h.session.Pool.SetHosts(hosts)
if v, ok := h.session.Pool.(SetPartitioner); ok {
v.SetPartitioner(partitioner)
}
}
case <-h.closeChan:
return
}
}
}

View file

@ -0,0 +1,83 @@
#!/bin/bash
set -e
function run_tests() {
local clusterSize=3
local version=$1
local auth=$2
if [ "$auth" = true ]; then
clusterSize=1
fi
local keypath="$(pwd)/testdata/pki"
local conf=(
"client_encryption_options.enabled: true"
"client_encryption_options.keystore: $keypath/.keystore"
"client_encryption_options.keystore_password: cassandra"
"client_encryption_options.require_client_auth: true"
"client_encryption_options.truststore: $keypath/.truststore"
"client_encryption_options.truststore_password: cassandra"
"concurrent_reads: 2"
"concurrent_writes: 2"
"rpc_server_type: sync"
"rpc_min_threads: 2"
"rpc_max_threads: 2"
"write_request_timeout_in_ms: 5000"
"read_request_timeout_in_ms: 5000"
)
ccm remove test || true
ccm create test -v binary:$version -n $clusterSize -d --vnodes --jvm_arg="-Xmx256m -XX:NewSize=100m"
ccm updateconf "${conf[@]}"
if [ "$auth" = true ]
then
ccm updateconf 'authenticator: PasswordAuthenticator' 'authorizer: CassandraAuthorizer'
rm -rf $HOME/.ccm/test/node1/data/system_auth
fi
ccm start -v
ccm status
ccm node1 nodetool status
local proto=2
if [[ $version == 1.2.* ]]; then
proto=1
elif [[ $version == 2.1.* ]]; then
proto=3
fi
if [ "$auth" = true ]
then
sleep 30s
go test -v . -timeout 15s -run=TestAuthentication -tags integration -runssl -runauth -proto=$proto -cluster=$(ccm liveset) -clusterSize=$clusterSize -autowait=1000ms
else
go test -timeout 5m -tags integration -cover -v -runssl -proto=$proto -rf=3 -cluster=$(ccm liveset) -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy ./... | tee results.txt
if [ ${PIPESTATUS[0]} -ne 0 ]; then
echo "--- FAIL: ccm status follows:"
ccm status
ccm node1 nodetool status
ccm node1 showlog > status.log
cat status.log
echo "--- FAIL: Received a non-zero exit code from the go test execution, please investigate this"
exit 1
fi
cover=`cat results.txt | grep coverage: | grep -o "[0-9]\{1,3\}" | head -n 1`
if [[ $cover -lt "55" ]]; then
echo "--- FAIL: expected coverage of at least 60 %, but coverage was $cover %"
exit 1
fi
fi
ccm remove
}
run_tests $1 $2

1621
Godeps/_workspace/src/github.com/gocql/gocql/marshal.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,796 @@
// +build all unit
package gocql
import (
"bytes"
"math"
"math/big"
"net"
"reflect"
"strings"
"testing"
"time"
"speter.net/go/exp/math/dec/inf"
)
var marshalTests = []struct {
Info TypeInfo
Data []byte
Value interface{}
}{
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte("hello world"),
[]byte("hello world"),
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte("hello world"),
"hello world",
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte(nil),
[]byte(nil),
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte("hello world"),
MyString("hello world"),
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte("HELLO WORLD"),
CustomString("hello world"),
},
{
NativeType{proto: 2, typ: TypeBlob},
[]byte("hello\x00"),
[]byte("hello\x00"),
},
{
NativeType{proto: 2, typ: TypeBlob},
[]byte(nil),
[]byte(nil),
},
{
NativeType{proto: 2, typ: TypeTimeUUID},
[]byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0},
func() UUID {
x, _ := UUIDFromBytes([]byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0})
return x
}(),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x00\x00\x00\x00"),
0,
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x01\x02\x03\x04"),
int(16909060),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x80\x00\x00\x00"),
int32(math.MinInt32),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x7f\xff\xff\xff"),
int32(math.MaxInt32),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x00\x00\x00\x00"),
"0",
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x01\x02\x03\x04"),
"16909060",
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x80\x00\x00\x00"),
"-2147483648", // math.MinInt32
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x7f\xff\xff\xff"),
"2147483647", // math.MaxInt32
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x00\x00\x00\x00\x00\x00\x00\x00"),
0,
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x01\x02\x03\x04\x05\x06\x07\x08"),
72623859790382856,
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x80\x00\x00\x00\x00\x00\x00\x00"),
int64(math.MinInt64),
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x7f\xff\xff\xff\xff\xff\xff\xff"),
int64(math.MaxInt64),
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x00\x00\x00\x00\x00\x00\x00\x00"),
"0",
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x01\x02\x03\x04\x05\x06\x07\x08"),
"72623859790382856",
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x80\x00\x00\x00\x00\x00\x00\x00"),
"-9223372036854775808", // math.MinInt64
},
{
NativeType{proto: 2, typ: TypeBigInt},
[]byte("\x7f\xff\xff\xff\xff\xff\xff\xff"),
"9223372036854775807", // math.MaxInt64
},
{
NativeType{proto: 2, typ: TypeBoolean},
[]byte("\x00"),
false,
},
{
NativeType{proto: 2, typ: TypeBoolean},
[]byte("\x01"),
true,
},
{
NativeType{proto: 2, typ: TypeFloat},
[]byte("\x40\x49\x0f\xdb"),
float32(3.14159265),
},
{
NativeType{proto: 2, typ: TypeDouble},
[]byte("\x40\x09\x21\xfb\x53\xc8\xd4\xf1"),
float64(3.14159265),
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x00\x00"),
inf.NewDec(0, 0),
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x00\x64"),
inf.NewDec(100, 0),
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x02\x19"),
decimalize("0.25"),
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x13\xD5\a;\x20\x14\xA2\x91"),
decimalize("-0.0012095473475870063"), // From the iconara/cql-rb test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x13*\xF8\xC4\xDF\xEB]o"),
decimalize("0.0012095473475870063"), // From the iconara/cql-rb test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x12\xF2\xD8\x02\xB6R\x7F\x99\xEE\x98#\x99\xA9V"),
decimalize("-1042342234234.123423435647768234"), // From the iconara/cql-rb test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\r\nJ\x04\"^\x91\x04\x8a\xb1\x18\xfe"),
decimalize("1243878957943.1234124191998"), // From the datastax/python-driver test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x06\xe5\xde]\x98Y"),
decimalize("-112233.441191"), // From the datastax/python-driver test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x14\x00\xfa\xce"),
decimalize("0.00000000000000064206"), // From the datastax/python-driver test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\x00\x00\x00\x14\xff\x052"),
decimalize("-0.00000000000000064206"), // From the datastax/python-driver test suite
},
{
NativeType{proto: 2, typ: TypeDecimal},
[]byte("\xff\xff\xff\x9c\x00\xfa\xce"),
inf.NewDec(64206, -100), // From the datastax/python-driver test suite
},
{
NativeType{proto: 2, typ: TypeTimestamp},
[]byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
},
{
NativeType{proto: 2, typ: TypeTimestamp},
[]byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
int64(1376387523000),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeList},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
[]int{1, 2},
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeList},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
[2]int{1, 2},
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeSet},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
[]int{1, 2},
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeSet},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte(nil),
[]int(nil),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeMap},
Key: NativeType{proto: 2, typ: TypeVarchar},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte("\x00\x01\x00\x03foo\x00\x04\x00\x00\x00\x01"),
map[string]int{"foo": 1},
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeMap},
Key: NativeType{proto: 2, typ: TypeVarchar},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte(nil),
map[string]int(nil),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeList},
Elem: NativeType{proto: 2, typ: TypeVarchar},
},
bytes.Join([][]byte{
[]byte("\x00\x01\xFF\xFF"),
bytes.Repeat([]byte("X"), 65535)}, []byte("")),
[]string{strings.Repeat("X", 65535)},
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeMap},
Key: NativeType{proto: 2, typ: TypeVarchar},
Elem: NativeType{proto: 2, typ: TypeVarchar},
},
bytes.Join([][]byte{
[]byte("\x00\x01\xFF\xFF"),
bytes.Repeat([]byte("X"), 65535),
[]byte("\xFF\xFF"),
bytes.Repeat([]byte("Y"), 65535)}, []byte("")),
map[string]string{
strings.Repeat("X", 65535): strings.Repeat("Y", 65535),
},
},
{
NativeType{proto: 2, typ: TypeVarint},
[]byte("\x00"),
0,
},
{
NativeType{proto: 2, typ: TypeVarint},
[]byte("\x37\xE2\x3C\xEC"),
int32(937573612),
},
{
NativeType{proto: 2, typ: TypeVarint},
[]byte("\x37\xE2\x3C\xEC"),
big.NewInt(937573612),
},
{
NativeType{proto: 2, typ: TypeVarint},
[]byte("\x03\x9EV \x15\f\x03\x9DK\x18\xCDI\\$?\a["),
bigintize("1231312312331283012830129382342342412123"), // From the iconara/cql-rb test suite
},
{
NativeType{proto: 2, typ: TypeVarint},
[]byte("\xC9v\x8D:\x86"),
big.NewInt(-234234234234), // From the iconara/cql-rb test suite
},
{
NativeType{proto: 2, typ: TypeVarint},
[]byte("f\x1e\xfd\xf2\xe3\xb1\x9f|\x04_\x15"),
bigintize("123456789123456789123456789"), // From the datastax/python-driver test suite
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\x7F\x00\x00\x01"),
net.ParseIP("127.0.0.1").To4(),
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\xFF\xFF\xFF\xFF"),
net.ParseIP("255.255.255.255").To4(),
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\x7F\x00\x00\x01"),
"127.0.0.1",
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\xFF\xFF\xFF\xFF"),
"255.255.255.255",
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\x21\xDA\x00\xd3\x00\x00\x2f\x3b\x02\xaa\x00\xff\xfe\x28\x9c\x5a"),
"21da:d3:0:2f3b:2aa:ff:fe28:9c5a",
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29"),
"fe80::202:b3ff:fe1e:8329",
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\x21\xDA\x00\xd3\x00\x00\x2f\x3b\x02\xaa\x00\xff\xfe\x28\x9c\x5a"),
net.ParseIP("21da:d3:0:2f3b:2aa:ff:fe28:9c5a"),
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29"),
net.ParseIP("fe80::202:b3ff:fe1e:8329"),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte(nil),
nil,
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte("nullable string"),
func() *string {
value := "nullable string"
return &value
}(),
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte{},
(*string)(nil),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte("\x7f\xff\xff\xff"),
func() *int {
var value int = math.MaxInt32
return &value
}(),
},
{
NativeType{proto: 2, typ: TypeInt},
[]byte(nil),
(*int)(nil),
},
{
NativeType{proto: 2, typ: TypeTimeUUID},
[]byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0},
&UUID{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0},
},
{
NativeType{proto: 2, typ: TypeTimeUUID},
[]byte{},
(*UUID)(nil),
},
{
NativeType{proto: 2, typ: TypeTimestamp},
[]byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
func() *time.Time {
t := time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC)
return &t
}(),
},
{
NativeType{proto: 2, typ: TypeTimestamp},
[]byte(nil),
(*time.Time)(nil),
},
{
NativeType{proto: 2, typ: TypeBoolean},
[]byte("\x00"),
func() *bool {
b := false
return &b
}(),
},
{
NativeType{proto: 2, typ: TypeBoolean},
[]byte("\x01"),
func() *bool {
b := true
return &b
}(),
},
{
NativeType{proto: 2, typ: TypeBoolean},
[]byte(nil),
(*bool)(nil),
},
{
NativeType{proto: 2, typ: TypeFloat},
[]byte("\x40\x49\x0f\xdb"),
func() *float32 {
f := float32(3.14159265)
return &f
}(),
},
{
NativeType{proto: 2, typ: TypeFloat},
[]byte(nil),
(*float32)(nil),
},
{
NativeType{proto: 2, typ: TypeDouble},
[]byte("\x40\x09\x21\xfb\x53\xc8\xd4\xf1"),
func() *float64 {
d := float64(3.14159265)
return &d
}(),
},
{
NativeType{proto: 2, typ: TypeDouble},
[]byte(nil),
(*float64)(nil),
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte("\x7F\x00\x00\x01"),
func() *net.IP {
ip := net.ParseIP("127.0.0.1").To4()
return &ip
}(),
},
{
NativeType{proto: 2, typ: TypeInet},
[]byte(nil),
(*net.IP)(nil),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeList},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
func() *[]int {
l := []int{1, 2}
return &l
}(),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeList},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte(nil),
(*[]int)(nil),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeMap},
Key: NativeType{proto: 2, typ: TypeVarchar},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte("\x00\x01\x00\x03foo\x00\x04\x00\x00\x00\x01"),
func() *map[string]int {
m := map[string]int{"foo": 1}
return &m
}(),
},
{
CollectionType{
NativeType: NativeType{proto: 2, typ: TypeMap},
Key: NativeType{proto: 2, typ: TypeVarchar},
Elem: NativeType{proto: 2, typ: TypeInt},
},
[]byte(nil),
(*map[string]int)(nil),
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte("HELLO WORLD"),
func() *CustomString {
customString := CustomString("hello world")
return &customString
}(),
},
{
NativeType{proto: 2, typ: TypeVarchar},
[]byte(nil),
(*CustomString)(nil),
},
}
func decimalize(s string) *inf.Dec {
i, _ := new(inf.Dec).SetString(s)
return i
}
func bigintize(s string) *big.Int {
i, _ := new(big.Int).SetString(s, 10)
return i
}
func TestMarshal(t *testing.T) {
for i, test := range marshalTests {
data, err := Marshal(test.Info, test.Value)
if err != nil {
t.Errorf("marshalTest[%d]: %v", i, err)
continue
}
if !bytes.Equal(data, test.Data) {
t.Errorf("marshalTest[%d]: expected %q, got %q (%#v)", i, test.Data, data, test.Value)
}
}
}
func TestUnmarshal(t *testing.T) {
for i, test := range marshalTests {
if test.Value != nil {
v := reflect.New(reflect.TypeOf(test.Value))
err := Unmarshal(test.Info, test.Data, v.Interface())
if err != nil {
t.Errorf("unmarshalTest[%d]: %v", i, err)
continue
}
if !reflect.DeepEqual(v.Elem().Interface(), test.Value) {
t.Errorf("unmarshalTest[%d]: expected %#v, got %#v.", i, test.Value, v.Elem().Interface())
}
} else {
if err := Unmarshal(test.Info, test.Data, test.Value); nil == err {
t.Errorf("unmarshalTest[%d]: %#v not return error.", i, test.Value)
}
}
}
}
func TestMarshalVarint(t *testing.T) {
varintTests := []struct {
Value interface{}
Marshaled []byte
Unmarshaled *big.Int
}{
{
Value: int8(0),
Marshaled: []byte("\x00"),
Unmarshaled: big.NewInt(0),
},
{
Value: uint8(255),
Marshaled: []byte("\x00\xFF"),
Unmarshaled: big.NewInt(255),
},
{
Value: int8(-1),
Marshaled: []byte("\xFF"),
Unmarshaled: big.NewInt(-1),
},
{
Value: big.NewInt(math.MaxInt32),
Marshaled: []byte("\x7F\xFF\xFF\xFF"),
Unmarshaled: big.NewInt(math.MaxInt32),
},
{
Value: big.NewInt(int64(math.MaxInt32) + 1),
Marshaled: []byte("\x00\x80\x00\x00\x00"),
Unmarshaled: big.NewInt(int64(math.MaxInt32) + 1),
},
{
Value: big.NewInt(math.MinInt32),
Marshaled: []byte("\x80\x00\x00\x00"),
Unmarshaled: big.NewInt(math.MinInt32),
},
{
Value: big.NewInt(int64(math.MinInt32) - 1),
Marshaled: []byte("\xFF\x7F\xFF\xFF\xFF"),
Unmarshaled: big.NewInt(int64(math.MinInt32) - 1),
},
{
Value: math.MinInt64,
Marshaled: []byte("\x80\x00\x00\x00\x00\x00\x00\x00"),
Unmarshaled: big.NewInt(math.MinInt64),
},
{
Value: uint64(math.MaxInt64) + 1,
Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00"),
Unmarshaled: bigintize("9223372036854775808"),
},
{
Value: bigintize("2361183241434822606848"), // 2**71
Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00"),
Unmarshaled: bigintize("2361183241434822606848"),
},
{
Value: bigintize("-9223372036854775809"), // -2**63 - 1
Marshaled: []byte("\xFF\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF"),
Unmarshaled: bigintize("-9223372036854775809"),
},
}
for i, test := range varintTests {
data, err := Marshal(NativeType{proto: 2, typ: TypeVarint}, test.Value)
if err != nil {
t.Errorf("error marshaling varint: %v (test #%d)", err, i)
}
if !bytes.Equal(test.Marshaled, data) {
t.Errorf("marshaled varint mismatch: expected %v, got %v (test #%d)", test.Marshaled, data, i)
}
binder := new(big.Int)
err = Unmarshal(NativeType{proto: 2, typ: TypeVarint}, test.Marshaled, binder)
if err != nil {
t.Errorf("error unmarshaling varint: %v (test #%d)", err, i)
}
if test.Unmarshaled.Cmp(binder) != 0 {
t.Errorf("unmarshaled varint mismatch: expected %v, got %v (test #%d)", test.Unmarshaled, binder, i)
}
}
}
func equalStringSlice(leftList, rightList []string) bool {
if len(leftList) != len(rightList) {
return false
}
for index := range leftList {
if rightList[index] != leftList[index] {
return false
}
}
return true
}
func TestMarshalList(t *testing.T) {
typeInfo := CollectionType{
NativeType: NativeType{proto: 2, typ: TypeList},
Elem: NativeType{proto: 2, typ: TypeVarchar},
}
sourceLists := [][]string{
[]string{"valueA"},
[]string{"valueA", "valueB"},
[]string{"valueB"},
}
listDatas := [][]byte{}
for _, list := range sourceLists {
listData, marshalErr := Marshal(typeInfo, list)
if nil != marshalErr {
t.Errorf("Error marshal %+v of type %+v: %s", list, typeInfo, marshalErr)
}
listDatas = append(listDatas, listData)
}
outputLists := [][]string{}
var outputList []string
for _, listData := range listDatas {
if unmarshalErr := Unmarshal(typeInfo, listData, &outputList); nil != unmarshalErr {
t.Error(unmarshalErr)
}
outputLists = append(outputLists, outputList)
}
for index, sourceList := range sourceLists {
outputList := outputLists[index]
if !equalStringSlice(sourceList, outputList) {
t.Errorf("Lists %+v not equal to lists %+v, but should", sourceList, outputList)
}
}
}
type CustomString string
func (c CustomString) MarshalCQL(info TypeInfo) ([]byte, error) {
return []byte(strings.ToUpper(string(c))), nil
}
func (c *CustomString) UnmarshalCQL(info TypeInfo, data []byte) error {
*c = CustomString(strings.ToLower(string(data)))
return nil
}
type MyString string
type MyInt int
var typeLookupTest = []struct {
TypeName string
ExpectedType Type
}{
{"AsciiType", TypeAscii},
{"LongType", TypeBigInt},
{"BytesType", TypeBlob},
{"BooleanType", TypeBoolean},
{"CounterColumnType", TypeCounter},
{"DecimalType", TypeDecimal},
{"DoubleType", TypeDouble},
{"FloatType", TypeFloat},
{"Int32Type", TypeInt},
{"DateType", TypeTimestamp},
{"TimestampType", TypeTimestamp},
{"UUIDType", TypeUUID},
{"UTF8Type", TypeVarchar},
{"IntegerType", TypeVarint},
{"TimeUUIDType", TypeTimeUUID},
{"InetAddressType", TypeInet},
{"MapType", TypeMap},
{"ListType", TypeList},
{"SetType", TypeSet},
{"unknown", TypeCustom},
}
func testType(t *testing.T, cassType string, expectedType Type) {
if computedType := getApacheCassandraType(apacheCassandraTypePrefix + cassType); computedType != expectedType {
t.Errorf("Cassandra custom type lookup for %s failed. Expected %s, got %s.", cassType, expectedType.String(), computedType.String())
}
}
func TestLookupCassType(t *testing.T) {
for _, lookupTest := range typeLookupTest {
testType(t, lookupTest.TypeName, lookupTest.ExpectedType)
}
}
type MyPointerMarshaler struct{}
func (m *MyPointerMarshaler) MarshalCQL(_ TypeInfo) ([]byte, error) {
return []byte{42}, nil
}
func TestMarshalPointer(t *testing.T) {
m := &MyPointerMarshaler{}
typ := NativeType{proto: 2, typ: TypeInt}
data, err := Marshal(typ, m)
if err != nil {
t.Errorf("Pointer marshaling failed. Error: %s", err)
}
if len(data) != 1 || data[0] != 42 {
t.Errorf("Pointer marshaling failed. Expected %+v, got %+v", []byte{42}, data)
}
}

View file

@ -0,0 +1,871 @@
// Copyright (c) 2015 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"encoding/hex"
"encoding/json"
"fmt"
"log"
"strconv"
"strings"
"sync"
)
// schema metadata for a keyspace
type KeyspaceMetadata struct {
Name string
DurableWrites bool
StrategyClass string
StrategyOptions map[string]interface{}
Tables map[string]*TableMetadata
}
// schema metadata for a table (a.k.a. column family)
type TableMetadata struct {
Keyspace string
Name string
KeyValidator string
Comparator string
DefaultValidator string
KeyAliases []string
ColumnAliases []string
ValueAlias string
PartitionKey []*ColumnMetadata
ClusteringColumns []*ColumnMetadata
Columns map[string]*ColumnMetadata
}
// schema metadata for a column
type ColumnMetadata struct {
Keyspace string
Table string
Name string
ComponentIndex int
Kind string
Validator string
Type TypeInfo
Order ColumnOrder
Index ColumnIndexMetadata
}
// the ordering of the column with regard to its comparator
type ColumnOrder bool
const (
ASC ColumnOrder = false
DESC = true
)
type ColumnIndexMetadata struct {
Name string
Type string
Options map[string]interface{}
}
// Column kind values
const (
PARTITION_KEY = "partition_key"
CLUSTERING_KEY = "clustering_key"
REGULAR = "regular"
)
// default alias values
const (
DEFAULT_KEY_ALIAS = "key"
DEFAULT_COLUMN_ALIAS = "column"
DEFAULT_VALUE_ALIAS = "value"
)
// queries the cluster for schema information for a specific keyspace
type schemaDescriber struct {
session *Session
mu sync.Mutex
cache map[string]*KeyspaceMetadata
}
// creates a session bound schema describer which will query and cache
// keyspace metadata
func newSchemaDescriber(session *Session) *schemaDescriber {
return &schemaDescriber{
session: session,
cache: map[string]*KeyspaceMetadata{},
}
}
// returns the cached KeyspaceMetadata held by the describer for the named
// keyspace.
func (s *schemaDescriber) getSchema(keyspaceName string) (*KeyspaceMetadata, error) {
s.mu.Lock()
defer s.mu.Unlock()
// TODO handle schema change events
metadata, found := s.cache[keyspaceName]
if !found {
// refresh the cache for this keyspace
err := s.refreshSchema(keyspaceName)
if err != nil {
return nil, err
}
metadata = s.cache[keyspaceName]
}
return metadata, nil
}
// forcibly updates the current KeyspaceMetadata held by the schema describer
// for a given named keyspace.
func (s *schemaDescriber) refreshSchema(keyspaceName string) error {
var err error
// query the system keyspace for schema data
// TODO retrieve concurrently
keyspace, err := getKeyspaceMetadata(s.session, keyspaceName)
if err != nil {
return err
}
tables, err := getTableMetadata(s.session, keyspaceName)
if err != nil {
return err
}
columns, err := getColumnMetadata(s.session, keyspaceName)
if err != nil {
return err
}
// organize the schema data
compileMetadata(s.session.cfg.ProtoVersion, keyspace, tables, columns)
// update the cache
s.cache[keyspaceName] = keyspace
return nil
}
// "compiles" derived information about keyspace, table, and column metadata
// for a keyspace from the basic queried metadata objects returned by
// getKeyspaceMetadata, getTableMetadata, and getColumnMetadata respectively;
// Links the metadata objects together and derives the column composition of
// the partition key and clustering key for a table.
func compileMetadata(
protoVersion int,
keyspace *KeyspaceMetadata,
tables []TableMetadata,
columns []ColumnMetadata,
) {
keyspace.Tables = make(map[string]*TableMetadata)
for i := range tables {
tables[i].Columns = make(map[string]*ColumnMetadata)
keyspace.Tables[tables[i].Name] = &tables[i]
}
// add columns from the schema data
for i := range columns {
// decode the validator for TypeInfo and order
validatorParsed := parseType(columns[i].Validator)
columns[i].Type = validatorParsed.types[0]
columns[i].Order = ASC
if validatorParsed.reversed[0] {
columns[i].Order = DESC
}
table := keyspace.Tables[columns[i].Table]
table.Columns[columns[i].Name] = &columns[i]
}
if protoVersion == 1 {
compileV1Metadata(tables)
} else {
compileV2Metadata(tables)
}
}
// Compiles derived information from TableMetadata which have had
// ColumnMetadata added already. V1 protocol does not return as much
// column metadata as V2+ (because V1 doesn't support the "type" column in the
// system.schema_columns table) so determining PartitionKey and ClusterColumns
// is more complex.
func compileV1Metadata(tables []TableMetadata) {
for i := range tables {
table := &tables[i]
// decode the key validator
keyValidatorParsed := parseType(table.KeyValidator)
// decode the comparator
comparatorParsed := parseType(table.Comparator)
// the partition key length is the same as the number of types in the
// key validator
table.PartitionKey = make([]*ColumnMetadata, len(keyValidatorParsed.types))
// V1 protocol only returns "regular" columns from
// system.schema_columns (there is no type field for columns)
// so the alias information is used to
// create the partition key and clustering columns
// construct the partition key from the alias
for i := range table.PartitionKey {
var alias string
if len(table.KeyAliases) > i {
alias = table.KeyAliases[i]
} else if i == 0 {
alias = DEFAULT_KEY_ALIAS
} else {
alias = DEFAULT_KEY_ALIAS + strconv.Itoa(i+1)
}
column := &ColumnMetadata{
Keyspace: table.Keyspace,
Table: table.Name,
Name: alias,
Type: keyValidatorParsed.types[i],
Kind: PARTITION_KEY,
ComponentIndex: i,
}
table.PartitionKey[i] = column
table.Columns[alias] = column
}
// determine the number of clustering columns
size := len(comparatorParsed.types)
if comparatorParsed.isComposite {
if len(comparatorParsed.collections) != 0 ||
(len(table.ColumnAliases) == size-1 &&
comparatorParsed.types[size-1].Type() == TypeVarchar) {
size = size - 1
}
} else {
if !(len(table.ColumnAliases) != 0 || len(table.Columns) == 0) {
size = 0
}
}
table.ClusteringColumns = make([]*ColumnMetadata, size)
for i := range table.ClusteringColumns {
var alias string
if len(table.ColumnAliases) > i {
alias = table.ColumnAliases[i]
} else if i == 0 {
alias = DEFAULT_COLUMN_ALIAS
} else {
alias = DEFAULT_COLUMN_ALIAS + strconv.Itoa(i+1)
}
order := ASC
if comparatorParsed.reversed[i] {
order = DESC
}
column := &ColumnMetadata{
Keyspace: table.Keyspace,
Table: table.Name,
Name: alias,
Type: comparatorParsed.types[i],
Order: order,
Kind: CLUSTERING_KEY,
ComponentIndex: i,
}
table.ClusteringColumns[i] = column
table.Columns[alias] = column
}
if size != len(comparatorParsed.types)-1 {
alias := DEFAULT_VALUE_ALIAS
if len(table.ValueAlias) > 0 {
alias = table.ValueAlias
}
// decode the default validator
defaultValidatorParsed := parseType(table.DefaultValidator)
column := &ColumnMetadata{
Keyspace: table.Keyspace,
Table: table.Name,
Name: alias,
Type: defaultValidatorParsed.types[0],
Kind: REGULAR,
}
table.Columns[alias] = column
}
}
}
// The simpler compile case for V2+ protocol
func compileV2Metadata(tables []TableMetadata) {
for i := range tables {
table := &tables[i]
partitionColumnCount := countColumnsOfKind(table.Columns, PARTITION_KEY)
table.PartitionKey = make([]*ColumnMetadata, partitionColumnCount)
clusteringColumnCount := countColumnsOfKind(table.Columns, CLUSTERING_KEY)
table.ClusteringColumns = make([]*ColumnMetadata, clusteringColumnCount)
for _, column := range table.Columns {
if column.Kind == PARTITION_KEY {
table.PartitionKey[column.ComponentIndex] = column
} else if column.Kind == CLUSTERING_KEY {
table.ClusteringColumns[column.ComponentIndex] = column
}
}
}
}
// returns the count of coluns with the given "kind" value.
func countColumnsOfKind(columns map[string]*ColumnMetadata, kind string) int {
count := 0
for _, column := range columns {
if column.Kind == kind {
count++
}
}
return count
}
// query only for the keyspace metadata for the specified keyspace from system.schema_keyspace
func getKeyspaceMetadata(
session *Session,
keyspaceName string,
) (*KeyspaceMetadata, error) {
query := session.Query(
`
SELECT durable_writes, strategy_class, strategy_options
FROM system.schema_keyspaces
WHERE keyspace_name = ?
`,
keyspaceName,
)
// Set a routing key to avoid GetRoutingKey from computing the routing key
// TODO use a separate connection (pool) for system keyspace queries.
query.RoutingKey([]byte{})
keyspace := &KeyspaceMetadata{Name: keyspaceName}
var strategyOptionsJSON []byte
err := query.Scan(
&keyspace.DurableWrites,
&keyspace.StrategyClass,
&strategyOptionsJSON,
)
if err != nil {
return nil, fmt.Errorf("Error querying keyspace schema: %v", err)
}
err = json.Unmarshal(strategyOptionsJSON, &keyspace.StrategyOptions)
if err != nil {
return nil, fmt.Errorf(
"Invalid JSON value '%s' as strategy_options for in keyspace '%s': %v",
strategyOptionsJSON, keyspace.Name, err,
)
}
return keyspace, nil
}
// query for only the table metadata in the specified keyspace from system.schema_columnfamilies
func getTableMetadata(
session *Session,
keyspaceName string,
) ([]TableMetadata, error) {
query := session.Query(
`
SELECT
columnfamily_name,
key_validator,
comparator,
default_validator,
key_aliases,
column_aliases,
value_alias
FROM system.schema_columnfamilies
WHERE keyspace_name = ?
`,
keyspaceName,
)
// Set a routing key to avoid GetRoutingKey from computing the routing key
// TODO use a separate connection (pool) for system keyspace queries.
query.RoutingKey([]byte{})
iter := query.Iter()
tables := []TableMetadata{}
table := TableMetadata{Keyspace: keyspaceName}
var keyAliasesJSON []byte
var columnAliasesJSON []byte
for iter.Scan(
&table.Name,
&table.KeyValidator,
&table.Comparator,
&table.DefaultValidator,
&keyAliasesJSON,
&columnAliasesJSON,
&table.ValueAlias,
) {
var err error
// decode the key aliases
if keyAliasesJSON != nil {
table.KeyAliases = []string{}
err = json.Unmarshal(keyAliasesJSON, &table.KeyAliases)
if err != nil {
iter.Close()
return nil, fmt.Errorf(
"Invalid JSON value '%s' as key_aliases for in table '%s': %v",
keyAliasesJSON, table.Name, err,
)
}
}
// decode the column aliases
if columnAliasesJSON != nil {
table.ColumnAliases = []string{}
err = json.Unmarshal(columnAliasesJSON, &table.ColumnAliases)
if err != nil {
iter.Close()
return nil, fmt.Errorf(
"Invalid JSON value '%s' as column_aliases for in table '%s': %v",
columnAliasesJSON, table.Name, err,
)
}
}
tables = append(tables, table)
table = TableMetadata{Keyspace: keyspaceName}
}
err := iter.Close()
if err != nil && err != ErrNotFound {
return nil, fmt.Errorf("Error querying table schema: %v", err)
}
return tables, nil
}
// query for only the column metadata in the specified keyspace from system.schema_columns
func getColumnMetadata(
session *Session,
keyspaceName string,
) ([]ColumnMetadata, error) {
// Deal with differences in protocol versions
var stmt string
var scan func(*Iter, *ColumnMetadata, *[]byte) bool
if session.cfg.ProtoVersion == 1 {
// V1 does not support the type column, and all returned rows are
// of kind "regular".
stmt = `
SELECT
columnfamily_name,
column_name,
component_index,
validator,
index_name,
index_type,
index_options
FROM system.schema_columns
WHERE keyspace_name = ?
`
scan = func(
iter *Iter,
column *ColumnMetadata,
indexOptionsJSON *[]byte,
) bool {
// all columns returned by V1 are regular
column.Kind = REGULAR
return iter.Scan(
&column.Table,
&column.Name,
&column.ComponentIndex,
&column.Validator,
&column.Index.Name,
&column.Index.Type,
&indexOptionsJSON,
)
}
} else {
// V2+ supports the type column
stmt = `
SELECT
columnfamily_name,
column_name,
component_index,
validator,
index_name,
index_type,
index_options,
type
FROM system.schema_columns
WHERE keyspace_name = ?
`
scan = func(
iter *Iter,
column *ColumnMetadata,
indexOptionsJSON *[]byte,
) bool {
return iter.Scan(
&column.Table,
&column.Name,
&column.ComponentIndex,
&column.Validator,
&column.Index.Name,
&column.Index.Type,
&indexOptionsJSON,
&column.Kind,
)
}
}
// get the columns metadata
columns := []ColumnMetadata{}
column := ColumnMetadata{Keyspace: keyspaceName}
var indexOptionsJSON []byte
query := session.Query(stmt, keyspaceName)
// Set a routing key to avoid GetRoutingKey from computing the routing key
// TODO use a separate connection (pool) for system keyspace queries.
query.RoutingKey([]byte{})
iter := query.Iter()
for scan(iter, &column, &indexOptionsJSON) {
var err error
// decode the index options
if indexOptionsJSON != nil {
err = json.Unmarshal(indexOptionsJSON, &column.Index.Options)
if err != nil {
iter.Close()
return nil, fmt.Errorf(
"Invalid JSON value '%s' as index_options for column '%s' in table '%s': %v",
indexOptionsJSON,
column.Name,
column.Table,
err,
)
}
}
columns = append(columns, column)
column = ColumnMetadata{Keyspace: keyspaceName}
}
err := iter.Close()
if err != nil && err != ErrNotFound {
return nil, fmt.Errorf("Error querying column schema: %v", err)
}
return columns, nil
}
// type definition parser state
type typeParser struct {
input string
index int
}
// the type definition parser result
type typeParserResult struct {
isComposite bool
types []TypeInfo
reversed []bool
collections map[string]TypeInfo
}
// Parse the type definition used for validator and comparator schema data
func parseType(def string) typeParserResult {
parser := &typeParser{input: def}
return parser.parse()
}
const (
REVERSED_TYPE = "org.apache.cassandra.db.marshal.ReversedType"
COMPOSITE_TYPE = "org.apache.cassandra.db.marshal.CompositeType"
COLLECTION_TYPE = "org.apache.cassandra.db.marshal.ColumnToCollectionType"
LIST_TYPE = "org.apache.cassandra.db.marshal.ListType"
SET_TYPE = "org.apache.cassandra.db.marshal.SetType"
MAP_TYPE = "org.apache.cassandra.db.marshal.MapType"
)
// represents a class specification in the type def AST
type typeParserClassNode struct {
name string
params []typeParserParamNode
// this is the segment of the input string that defined this node
input string
}
// represents a class parameter in the type def AST
type typeParserParamNode struct {
name *string
class typeParserClassNode
}
func (t *typeParser) parse() typeParserResult {
// parse the AST
ast, ok := t.parseClassNode()
if !ok {
// treat this is a custom type
return typeParserResult{
isComposite: false,
types: []TypeInfo{
NativeType{
typ: TypeCustom,
custom: t.input,
},
},
reversed: []bool{false},
collections: nil,
}
}
// interpret the AST
if strings.HasPrefix(ast.name, COMPOSITE_TYPE) {
count := len(ast.params)
// look for a collections param
last := ast.params[count-1]
collections := map[string]TypeInfo{}
if strings.HasPrefix(last.class.name, COLLECTION_TYPE) {
count--
for _, param := range last.class.params {
// decode the name
var name string
decoded, err := hex.DecodeString(*param.name)
if err != nil {
log.Printf(
"Error parsing type '%s', contains collection name '%s' with an invalid format: %v",
t.input,
*param.name,
err,
)
// just use the provided name
name = *param.name
} else {
name = string(decoded)
}
collections[name] = param.class.asTypeInfo()
}
}
types := make([]TypeInfo, count)
reversed := make([]bool, count)
for i, param := range ast.params[:count] {
class := param.class
reversed[i] = strings.HasPrefix(class.name, REVERSED_TYPE)
if reversed[i] {
class = class.params[0].class
}
types[i] = class.asTypeInfo()
}
return typeParserResult{
isComposite: true,
types: types,
reversed: reversed,
collections: collections,
}
} else {
// not composite, so one type
class := *ast
reversed := strings.HasPrefix(class.name, REVERSED_TYPE)
if reversed {
class = class.params[0].class
}
typeInfo := class.asTypeInfo()
return typeParserResult{
isComposite: false,
types: []TypeInfo{typeInfo},
reversed: []bool{reversed},
}
}
}
func (class *typeParserClassNode) asTypeInfo() TypeInfo {
if strings.HasPrefix(class.name, LIST_TYPE) {
elem := class.params[0].class.asTypeInfo()
return CollectionType{
NativeType: NativeType{
typ: TypeList,
},
Elem: elem,
}
}
if strings.HasPrefix(class.name, SET_TYPE) {
elem := class.params[0].class.asTypeInfo()
return CollectionType{
NativeType: NativeType{
typ: TypeSet,
},
Elem: elem,
}
}
if strings.HasPrefix(class.name, MAP_TYPE) {
key := class.params[0].class.asTypeInfo()
elem := class.params[1].class.asTypeInfo()
return CollectionType{
NativeType: NativeType{
typ: TypeMap,
},
Key: key,
Elem: elem,
}
}
// must be a simple type or custom type
info := NativeType{typ: getApacheCassandraType(class.name)}
if info.typ == TypeCustom {
// add the entire class definition
info.custom = class.input
}
return info
}
// CLASS := ID [ PARAMS ]
func (t *typeParser) parseClassNode() (node *typeParserClassNode, ok bool) {
t.skipWhitespace()
startIndex := t.index
name, ok := t.nextIdentifier()
if !ok {
return nil, false
}
params, ok := t.parseParamNodes()
if !ok {
return nil, false
}
endIndex := t.index
node = &typeParserClassNode{
name: name,
params: params,
input: t.input[startIndex:endIndex],
}
return node, true
}
// PARAMS := "(" PARAM { "," PARAM } ")"
// PARAM := [ PARAM_NAME ":" ] CLASS
// PARAM_NAME := ID
func (t *typeParser) parseParamNodes() (params []typeParserParamNode, ok bool) {
t.skipWhitespace()
// the params are optional
if t.index == len(t.input) || t.input[t.index] != '(' {
return nil, true
}
params = []typeParserParamNode{}
// consume the '('
t.index++
t.skipWhitespace()
for t.input[t.index] != ')' {
// look for a named param, but if no colon, then we want to backup
backupIndex := t.index
// name will be a hex encoded version of a utf-8 string
name, ok := t.nextIdentifier()
if !ok {
return nil, false
}
hasName := true
// TODO handle '=>' used for DynamicCompositeType
t.skipWhitespace()
if t.input[t.index] == ':' {
// there is a name for this parameter
// consume the ':'
t.index++
t.skipWhitespace()
} else {
// no name, backup
hasName = false
t.index = backupIndex
}
// parse the next full parameter
classNode, ok := t.parseClassNode()
if !ok {
return nil, false
}
if hasName {
params = append(
params,
typeParserParamNode{name: &name, class: *classNode},
)
} else {
params = append(
params,
typeParserParamNode{class: *classNode},
)
}
t.skipWhitespace()
if t.input[t.index] == ',' {
// consume the comma
t.index++
t.skipWhitespace()
}
}
// consume the ')'
t.index++
return params, true
}
func (t *typeParser) skipWhitespace() {
for t.index < len(t.input) && isWhitespaceChar(t.input[t.index]) {
t.index++
}
}
func isWhitespaceChar(c byte) bool {
return c == ' ' || c == '\n' || c == '\t'
}
// ID := LETTER { LETTER }
// LETTER := "0"..."9" | "a"..."z" | "A"..."Z" | "-" | "+" | "." | "_" | "&"
func (t *typeParser) nextIdentifier() (id string, found bool) {
startIndex := t.index
for t.index < len(t.input) && isIdentifierChar(t.input[t.index]) {
t.index++
}
if startIndex == t.index {
return "", false
}
return t.input[startIndex:t.index], true
}
func isIdentifierChar(c byte) bool {
return (c >= '0' && c <= '9') ||
(c >= 'a' && c <= 'z') ||
(c >= 'A' && c <= 'Z') ||
c == '-' ||
c == '+' ||
c == '.' ||
c == '_' ||
c == '&'
}

View file

@ -0,0 +1,802 @@
// Copyright (c) 2015 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"strconv"
"testing"
)
// Tests V1 and V2 metadata "compilation" from example data which might be returned
// from metadata schema queries (see getKeyspaceMetadata, getTableMetadata, and getColumnMetadata)
func TestCompileMetadata(t *testing.T) {
// V1 tests - these are all based on real examples from the integration test ccm cluster
keyspace := &KeyspaceMetadata{
Name: "V1Keyspace",
}
tables := []TableMetadata{
TableMetadata{
// This table, found in the system keyspace, has no key aliases or column aliases
Keyspace: "V1Keyspace",
Name: "Schema",
KeyValidator: "org.apache.cassandra.db.marshal.BytesType",
Comparator: "org.apache.cassandra.db.marshal.UTF8Type",
DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
KeyAliases: []string{},
ColumnAliases: []string{},
ValueAlias: "",
},
TableMetadata{
// This table, found in the system keyspace, has key aliases, column aliases, and a value alias.
Keyspace: "V1Keyspace",
Name: "hints",
KeyValidator: "org.apache.cassandra.db.marshal.UUIDType",
Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.TimeUUIDType,org.apache.cassandra.db.marshal.Int32Type)",
DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
KeyAliases: []string{"target_id"},
ColumnAliases: []string{"hint_id", "message_version"},
ValueAlias: "mutation",
},
TableMetadata{
// This table, found in the system keyspace, has a comparator with collections, but no column aliases
Keyspace: "V1Keyspace",
Name: "peers",
KeyValidator: "org.apache.cassandra.db.marshal.InetAddressType",
Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(746f6b656e73:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)))",
DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
KeyAliases: []string{"peer"},
ColumnAliases: []string{},
ValueAlias: "",
},
TableMetadata{
// This table, found in the system keyspace, has a column alias, but not a composite comparator
Keyspace: "V1Keyspace",
Name: "IndexInfo",
KeyValidator: "org.apache.cassandra.db.marshal.UTF8Type",
Comparator: "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)",
DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
KeyAliases: []string{"table_name"},
ColumnAliases: []string{"index_name"},
ValueAlias: "",
},
TableMetadata{
// This table, found in the gocql_test keyspace following an integration test run, has a composite comparator with collections as well as a column alias
Keyspace: "V1Keyspace",
Name: "wiki_page",
KeyValidator: "org.apache.cassandra.db.marshal.UTF8Type",
Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.TimeUUIDType,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(74616773:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type),6174746163686d656e7473:org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.BytesType)))",
DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
KeyAliases: []string{"title"},
ColumnAliases: []string{"revid"},
ValueAlias: "",
},
TableMetadata{
// This is a made up example with multiple unnamed aliases
Keyspace: "V1Keyspace",
Name: "no_names",
KeyValidator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UUIDType)",
Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)",
DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
KeyAliases: []string{},
ColumnAliases: []string{},
ValueAlias: "",
},
}
columns := []ColumnMetadata{
// Here are the regular columns from the peers table for testing regular columns
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "data_center", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"},
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "host_id", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType"},
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rack", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"},
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "release_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"},
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rpc_address", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.InetAddressType"},
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "schema_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType"},
ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "tokens", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)"},
}
compileMetadata(1, keyspace, tables, columns)
assertKeyspaceMetadata(
t,
keyspace,
&KeyspaceMetadata{
Name: "V1Keyspace",
Tables: map[string]*TableMetadata{
"Schema": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "key",
Type: NativeType{typ: TypeBlob},
},
},
ClusteringColumns: []*ColumnMetadata{},
Columns: map[string]*ColumnMetadata{
"key": &ColumnMetadata{
Name: "key",
Type: NativeType{typ: TypeBlob},
Kind: PARTITION_KEY,
},
},
},
"hints": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "target_id",
Type: NativeType{typ: TypeUUID},
},
},
ClusteringColumns: []*ColumnMetadata{
&ColumnMetadata{
Name: "hint_id",
Type: NativeType{typ: TypeTimeUUID},
Order: ASC,
},
&ColumnMetadata{
Name: "message_version",
Type: NativeType{typ: TypeInt},
Order: ASC,
},
},
Columns: map[string]*ColumnMetadata{
"target_id": &ColumnMetadata{
Name: "target_id",
Type: NativeType{typ: TypeUUID},
Kind: PARTITION_KEY,
},
"hint_id": &ColumnMetadata{
Name: "hint_id",
Type: NativeType{typ: TypeTimeUUID},
Order: ASC,
Kind: CLUSTERING_KEY,
},
"message_version": &ColumnMetadata{
Name: "message_version",
Type: NativeType{typ: TypeInt},
Order: ASC,
Kind: CLUSTERING_KEY,
},
"mutation": &ColumnMetadata{
Name: "mutation",
Type: NativeType{typ: TypeBlob},
Kind: REGULAR,
},
},
},
"peers": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "peer",
Type: NativeType{typ: TypeInet},
},
},
ClusteringColumns: []*ColumnMetadata{},
Columns: map[string]*ColumnMetadata{
"peer": &ColumnMetadata{
Name: "peer",
Type: NativeType{typ: TypeInet},
Kind: PARTITION_KEY,
},
"data_center": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "data_center", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}},
"host_id": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "host_id", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType", Type: NativeType{typ: TypeUUID}},
"rack": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rack", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}},
"release_version": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "release_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}},
"rpc_address": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rpc_address", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.InetAddressType", Type: NativeType{typ: TypeInet}},
"schema_version": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "schema_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType", Type: NativeType{typ: TypeUUID}},
"tokens": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "tokens", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)", Type: CollectionType{NativeType: NativeType{typ: TypeSet}}},
},
},
"IndexInfo": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "table_name",
Type: NativeType{typ: TypeVarchar},
},
},
ClusteringColumns: []*ColumnMetadata{
&ColumnMetadata{
Name: "index_name",
Type: NativeType{typ: TypeVarchar},
Order: DESC,
},
},
Columns: map[string]*ColumnMetadata{
"table_name": &ColumnMetadata{
Name: "table_name",
Type: NativeType{typ: TypeVarchar},
Kind: PARTITION_KEY,
},
"index_name": &ColumnMetadata{
Name: "index_name",
Type: NativeType{typ: TypeVarchar},
Order: DESC,
Kind: CLUSTERING_KEY,
},
"value": &ColumnMetadata{
Name: "value",
Type: NativeType{typ: TypeBlob},
Kind: REGULAR,
},
},
},
"wiki_page": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "title",
Type: NativeType{typ: TypeVarchar},
},
},
ClusteringColumns: []*ColumnMetadata{
&ColumnMetadata{
Name: "revid",
Type: NativeType{typ: TypeTimeUUID},
Order: ASC,
},
},
Columns: map[string]*ColumnMetadata{
"title": &ColumnMetadata{
Name: "title",
Type: NativeType{typ: TypeVarchar},
Kind: PARTITION_KEY,
},
"revid": &ColumnMetadata{
Name: "revid",
Type: NativeType{typ: TypeTimeUUID},
Kind: CLUSTERING_KEY,
},
},
},
"no_names": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "key",
Type: NativeType{typ: TypeUUID},
},
&ColumnMetadata{
Name: "key2",
Type: NativeType{typ: TypeUUID},
},
},
ClusteringColumns: []*ColumnMetadata{
&ColumnMetadata{
Name: "column",
Type: NativeType{typ: TypeInt},
Order: ASC,
},
&ColumnMetadata{
Name: "column2",
Type: NativeType{typ: TypeInt},
Order: ASC,
},
&ColumnMetadata{
Name: "column3",
Type: NativeType{typ: TypeInt},
Order: ASC,
},
},
Columns: map[string]*ColumnMetadata{
"key": &ColumnMetadata{
Name: "key",
Type: NativeType{typ: TypeUUID},
Kind: PARTITION_KEY,
},
"key2": &ColumnMetadata{
Name: "key2",
Type: NativeType{typ: TypeUUID},
Kind: PARTITION_KEY,
},
"column": &ColumnMetadata{
Name: "column",
Type: NativeType{typ: TypeInt},
Order: ASC,
Kind: CLUSTERING_KEY,
},
"column2": &ColumnMetadata{
Name: "column2",
Type: NativeType{typ: TypeInt},
Order: ASC,
Kind: CLUSTERING_KEY,
},
"column3": &ColumnMetadata{
Name: "column3",
Type: NativeType{typ: TypeInt},
Order: ASC,
Kind: CLUSTERING_KEY,
},
"value": &ColumnMetadata{
Name: "value",
Type: NativeType{typ: TypeBlob},
Kind: REGULAR,
},
},
},
},
},
)
// V2 test - V2+ protocol is simpler so here are some toy examples to verify that the mapping works
keyspace = &KeyspaceMetadata{
Name: "V2Keyspace",
}
tables = []TableMetadata{
TableMetadata{
Keyspace: "V2Keyspace",
Name: "Table1",
},
TableMetadata{
Keyspace: "V2Keyspace",
Name: "Table2",
},
}
columns = []ColumnMetadata{
ColumnMetadata{
Keyspace: "V2Keyspace",
Table: "Table1",
Name: "Key1",
Kind: PARTITION_KEY,
ComponentIndex: 0,
Validator: "org.apache.cassandra.db.marshal.UTF8Type",
},
ColumnMetadata{
Keyspace: "V2Keyspace",
Table: "Table2",
Name: "Column1",
Kind: PARTITION_KEY,
ComponentIndex: 0,
Validator: "org.apache.cassandra.db.marshal.UTF8Type",
},
ColumnMetadata{
Keyspace: "V2Keyspace",
Table: "Table2",
Name: "Column2",
Kind: CLUSTERING_KEY,
ComponentIndex: 0,
Validator: "org.apache.cassandra.db.marshal.UTF8Type",
},
ColumnMetadata{
Keyspace: "V2Keyspace",
Table: "Table2",
Name: "Column3",
Kind: CLUSTERING_KEY,
ComponentIndex: 1,
Validator: "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)",
},
ColumnMetadata{
Keyspace: "V2Keyspace",
Table: "Table2",
Name: "Column4",
Kind: REGULAR,
Validator: "org.apache.cassandra.db.marshal.UTF8Type",
},
}
compileMetadata(2, keyspace, tables, columns)
assertKeyspaceMetadata(
t,
keyspace,
&KeyspaceMetadata{
Name: "V2Keyspace",
Tables: map[string]*TableMetadata{
"Table1": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "Key1",
Type: NativeType{typ: TypeVarchar},
},
},
ClusteringColumns: []*ColumnMetadata{},
Columns: map[string]*ColumnMetadata{
"Key1": &ColumnMetadata{
Name: "Key1",
Type: NativeType{typ: TypeVarchar},
Kind: PARTITION_KEY,
},
},
},
"Table2": &TableMetadata{
PartitionKey: []*ColumnMetadata{
&ColumnMetadata{
Name: "Column1",
Type: NativeType{typ: TypeVarchar},
},
},
ClusteringColumns: []*ColumnMetadata{
&ColumnMetadata{
Name: "Column2",
Type: NativeType{typ: TypeVarchar},
Order: ASC,
},
&ColumnMetadata{
Name: "Column3",
Type: NativeType{typ: TypeVarchar},
Order: DESC,
},
},
Columns: map[string]*ColumnMetadata{
"Column1": &ColumnMetadata{
Name: "Column1",
Type: NativeType{typ: TypeVarchar},
Kind: PARTITION_KEY,
},
"Column2": &ColumnMetadata{
Name: "Column2",
Type: NativeType{typ: TypeVarchar},
Order: ASC,
Kind: CLUSTERING_KEY,
},
"Column3": &ColumnMetadata{
Name: "Column3",
Type: NativeType{typ: TypeVarchar},
Order: DESC,
Kind: CLUSTERING_KEY,
},
"Column4": &ColumnMetadata{
Name: "Column4",
Type: NativeType{typ: TypeVarchar},
Kind: REGULAR,
},
},
},
},
},
)
}
// Helper function for asserting that actual metadata returned was as expected
func assertKeyspaceMetadata(t *testing.T, actual, expected *KeyspaceMetadata) {
if len(expected.Tables) != len(actual.Tables) {
t.Errorf("Expected len(%s.Tables) to be %v but was %v", expected.Name, len(expected.Tables), len(actual.Tables))
}
for keyT := range expected.Tables {
et := expected.Tables[keyT]
at, found := actual.Tables[keyT]
if !found {
t.Errorf("Expected %s.Tables[%s] but was not found", expected.Name, keyT)
} else {
if keyT != at.Name {
t.Errorf("Expected %s.Tables[%s].Name to be %v but was %v", expected.Name, keyT, keyT, at.Name)
}
if len(et.PartitionKey) != len(at.PartitionKey) {
t.Errorf("Expected len(%s.Tables[%s].PartitionKey) to be %v but was %v", expected.Name, keyT, len(et.PartitionKey), len(at.PartitionKey))
} else {
for i := range et.PartitionKey {
if et.PartitionKey[i].Name != at.PartitionKey[i].Name {
t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Name to be '%v' but was '%v'", expected.Name, keyT, i, et.PartitionKey[i].Name, at.PartitionKey[i].Name)
}
if expected.Name != at.PartitionKey[i].Keyspace {
t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Keyspace to be '%v' but was '%v'", expected.Name, keyT, i, expected.Name, at.PartitionKey[i].Keyspace)
}
if keyT != at.PartitionKey[i].Table {
t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Table to be '%v' but was '%v'", expected.Name, keyT, i, keyT, at.PartitionKey[i].Table)
}
if et.PartitionKey[i].Type.Type() != at.PartitionKey[i].Type.Type() {
t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Type.Type to be %v but was %v", expected.Name, keyT, i, et.PartitionKey[i].Type.Type(), at.PartitionKey[i].Type.Type())
}
if i != at.PartitionKey[i].ComponentIndex {
t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].ComponentIndex to be %v but was %v", expected.Name, keyT, i, i, at.PartitionKey[i].ComponentIndex)
}
if PARTITION_KEY != at.PartitionKey[i].Kind {
t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Kind to be '%v' but was '%v'", expected.Name, keyT, i, PARTITION_KEY, at.PartitionKey[i].Kind)
}
}
}
if len(et.ClusteringColumns) != len(at.ClusteringColumns) {
t.Errorf("Expected len(%s.Tables[%s].ClusteringColumns) to be %v but was %v", expected.Name, keyT, len(et.ClusteringColumns), len(at.ClusteringColumns))
} else {
for i := range et.ClusteringColumns {
if at.ClusteringColumns[i] == nil {
t.Fatalf("Unexpected nil value: %s.Tables[%s].ClusteringColumns[%d]", expected.Name, keyT, i)
}
if et.ClusteringColumns[i].Name != at.ClusteringColumns[i].Name {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Name to be '%v' but was '%v'", expected.Name, keyT, i, et.ClusteringColumns[i].Name, at.ClusteringColumns[i].Name)
}
if expected.Name != at.ClusteringColumns[i].Keyspace {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Keyspace to be '%v' but was '%v'", expected.Name, keyT, i, expected.Name, at.ClusteringColumns[i].Keyspace)
}
if keyT != at.ClusteringColumns[i].Table {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Table to be '%v' but was '%v'", expected.Name, keyT, i, keyT, at.ClusteringColumns[i].Table)
}
if et.ClusteringColumns[i].Type.Type() != at.ClusteringColumns[i].Type.Type() {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Type.Type to be %v but was %v", expected.Name, keyT, i, et.ClusteringColumns[i].Type.Type(), at.ClusteringColumns[i].Type.Type())
}
if i != at.ClusteringColumns[i].ComponentIndex {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].ComponentIndex to be %v but was %v", expected.Name, keyT, i, i, at.ClusteringColumns[i].ComponentIndex)
}
if et.ClusteringColumns[i].Order != at.ClusteringColumns[i].Order {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Order to be %v but was %v", expected.Name, keyT, i, et.ClusteringColumns[i].Order, at.ClusteringColumns[i].Order)
}
if CLUSTERING_KEY != at.ClusteringColumns[i].Kind {
t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Kind to be '%v' but was '%v'", expected.Name, keyT, i, CLUSTERING_KEY, at.ClusteringColumns[i].Kind)
}
}
}
if len(et.Columns) != len(at.Columns) {
eKeys := make([]string, 0, len(et.Columns))
for key := range et.Columns {
eKeys = append(eKeys, key)
}
aKeys := make([]string, 0, len(at.Columns))
for key := range at.Columns {
aKeys = append(aKeys, key)
}
t.Errorf("Expected len(%s.Tables[%s].Columns) to be %v (keys:%v) but was %v (keys:%v)", expected.Name, keyT, len(et.Columns), eKeys, len(at.Columns), aKeys)
} else {
for keyC := range et.Columns {
ec := et.Columns[keyC]
ac, found := at.Columns[keyC]
if !found {
t.Errorf("Expected %s.Tables[%s].Columns[%s] but was not found", expected.Name, keyT, keyC)
} else {
if keyC != ac.Name {
t.Errorf("Expected %s.Tables[%s].Columns[%s].Name to be '%v' but was '%v'", expected.Name, keyT, keyC, keyC, at.Name)
}
if expected.Name != ac.Keyspace {
t.Errorf("Expected %s.Tables[%s].Columns[%s].Keyspace to be '%v' but was '%v'", expected.Name, keyT, keyC, expected.Name, ac.Keyspace)
}
if keyT != ac.Table {
t.Errorf("Expected %s.Tables[%s].Columns[%s].Table to be '%v' but was '%v'", expected.Name, keyT, keyC, keyT, ac.Table)
}
if ec.Type.Type() != ac.Type.Type() {
t.Errorf("Expected %s.Tables[%s].Columns[%s].Type.Type to be %v but was %v", expected.Name, keyT, keyC, ec.Type.Type(), ac.Type.Type())
}
if ec.Order != ac.Order {
t.Errorf("Expected %s.Tables[%s].Columns[%s].Order to be %v but was %v", expected.Name, keyT, keyC, ec.Order, ac.Order)
}
if ec.Kind != ac.Kind {
t.Errorf("Expected %s.Tables[%s].Columns[%s].Kind to be '%v' but was '%v'", expected.Name, keyT, keyC, ec.Kind, ac.Kind)
}
}
}
}
}
}
}
// Tests the cassandra type definition parser
func TestTypeParser(t *testing.T) {
// native type
assertParseNonCompositeType(
t,
"org.apache.cassandra.db.marshal.UTF8Type",
assertTypeInfo{Type: TypeVarchar},
)
// reversed
assertParseNonCompositeType(
t,
"org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UUIDType)",
assertTypeInfo{Type: TypeUUID, Reversed: true},
)
// set
assertParseNonCompositeType(
t,
"org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.Int32Type)",
assertTypeInfo{
Type: TypeSet,
Elem: &assertTypeInfo{Type: TypeInt},
},
)
// list
assertParseNonCompositeType(
t,
"org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.TimeUUIDType)",
assertTypeInfo{
Type: TypeList,
Elem: &assertTypeInfo{Type: TypeTimeUUID},
},
)
// map
assertParseNonCompositeType(
t,
" org.apache.cassandra.db.marshal.MapType( org.apache.cassandra.db.marshal.UUIDType , org.apache.cassandra.db.marshal.BytesType ) ",
assertTypeInfo{
Type: TypeMap,
Key: &assertTypeInfo{Type: TypeUUID},
Elem: &assertTypeInfo{Type: TypeBlob},
},
)
// custom
assertParseNonCompositeType(
t,
"org.apache.cassandra.db.marshal.UserType(sandbox,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,63697479:org.apache.cassandra.db.marshal.UTF8Type,7a6970:org.apache.cassandra.db.marshal.Int32Type)",
assertTypeInfo{Type: TypeCustom, Custom: "org.apache.cassandra.db.marshal.UserType(sandbox,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,63697479:org.apache.cassandra.db.marshal.UTF8Type,7a6970:org.apache.cassandra.db.marshal.Int32Type)"},
)
assertParseNonCompositeType(
t,
"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,d=>org.apache.cassandra.db.marshal.DateType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,b=>org.apache.cassandra.db.marshal.BytesType,s=>org.apache.cassandra.db.marshal.UTF8Type,B=>org.apache.cassandra.db.marshal.BooleanType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,i=>org.apache.cassandra.db.marshal.IntegerType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType)",
assertTypeInfo{Type: TypeCustom, Custom: "org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,d=>org.apache.cassandra.db.marshal.DateType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,b=>org.apache.cassandra.db.marshal.BytesType,s=>org.apache.cassandra.db.marshal.UTF8Type,B=>org.apache.cassandra.db.marshal.BooleanType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,i=>org.apache.cassandra.db.marshal.IntegerType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType)"},
)
// composite defs
assertParseCompositeType(
t,
"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type)",
[]assertTypeInfo{
assertTypeInfo{Type: TypeVarchar},
},
nil,
)
assertParseCompositeType(
t,
"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.DateType),org.apache.cassandra.db.marshal.UTF8Type)",
[]assertTypeInfo{
assertTypeInfo{Type: TypeTimestamp, Reversed: true},
assertTypeInfo{Type: TypeVarchar},
},
nil,
)
assertParseCompositeType(
t,
"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(726f77735f6d6572676564:org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.LongType)))",
[]assertTypeInfo{
assertTypeInfo{Type: TypeVarchar},
},
map[string]assertTypeInfo{
"rows_merged": assertTypeInfo{
Type: TypeMap,
Key: &assertTypeInfo{Type: TypeInt},
Elem: &assertTypeInfo{Type: TypeBigInt},
},
},
)
}
// expected data holder
type assertTypeInfo struct {
Type Type
Reversed bool
Elem *assertTypeInfo
Key *assertTypeInfo
Custom string
}
// Helper function for asserting that the type parser returns the expected
// results for the given definition
func assertParseNonCompositeType(
t *testing.T,
def string,
typeExpected assertTypeInfo,
) {
result := parseType(def)
if len(result.reversed) != 1 {
t.Errorf("%s expected %d reversed values but there were %d", def, 1, len(result.reversed))
}
assertParseNonCompositeTypes(
t,
def,
[]assertTypeInfo{typeExpected},
result.types,
)
// expect no composite part of the result
if result.isComposite {
t.Errorf("%s: Expected not composite", def)
}
if result.collections != nil {
t.Errorf("%s: Expected nil collections: %v", def, result.collections)
}
}
// Helper function for asserting that the type parser returns the expected
// results for the given definition
func assertParseCompositeType(
t *testing.T,
def string,
typesExpected []assertTypeInfo,
collectionsExpected map[string]assertTypeInfo,
) {
result := parseType(def)
if len(result.reversed) != len(typesExpected) {
t.Errorf("%s expected %d reversed values but there were %d", def, len(typesExpected), len(result.reversed))
}
assertParseNonCompositeTypes(
t,
def,
typesExpected,
result.types,
)
// expect composite part of the result
if !result.isComposite {
t.Errorf("%s: Expected composite", def)
}
if result.collections == nil {
t.Errorf("%s: Expected non-nil collections: %v", def, result.collections)
}
for name, typeExpected := range collectionsExpected {
// check for an actual type for this name
typeActual, found := result.collections[name]
if !found {
t.Errorf("%s.tcollections: Expected param named %s but there wasn't", def, name)
} else {
// remove the actual from the collection so we can detect extras
delete(result.collections, name)
// check the type
assertParseNonCompositeTypes(
t,
def+"collections["+name+"]",
[]assertTypeInfo{typeExpected},
[]TypeInfo{typeActual},
)
}
}
if len(result.collections) != 0 {
t.Errorf("%s.collections: Expected no more types in collections, but there was %v", def, result.collections)
}
}
// Helper function for asserting that the type parser returns the expected
// results for the given definition
func assertParseNonCompositeTypes(
t *testing.T,
context string,
typesExpected []assertTypeInfo,
typesActual []TypeInfo,
) {
if len(typesActual) != len(typesExpected) {
t.Errorf("%s: Expected %d types, but there were %d", context, len(typesExpected), len(typesActual))
}
for i := range typesExpected {
typeExpected := typesExpected[i]
typeActual := typesActual[i]
// shadow copy the context for local modification
context := context
if len(typesExpected) > 1 {
context = context + "[" + strconv.Itoa(i) + "]"
}
// check the type
if typeActual.Type() != typeExpected.Type {
t.Errorf("%s: Expected to parse Type to %s but was %s", context, typeExpected.Type, typeActual.Type())
}
// check the custom
if typeActual.Custom() != typeExpected.Custom {
t.Errorf("%s: Expected to parse Custom %s but was %s", context, typeExpected.Custom, typeActual.Custom())
}
collection, _ := typeActual.(CollectionType)
// check the elem
if typeExpected.Elem != nil {
if collection.Elem == nil {
t.Errorf("%s: Expected to parse Elem, but was nil ", context)
} else {
assertParseNonCompositeTypes(
t,
context+".Elem",
[]assertTypeInfo{*typeExpected.Elem},
[]TypeInfo{collection.Elem},
)
}
} else if collection.Elem != nil {
t.Errorf("%s: Expected to not parse Elem, but was %+v", context, collection.Elem)
}
// check the key
if typeExpected.Key != nil {
if collection.Key == nil {
t.Errorf("%s: Expected to parse Key, but was nil ", context)
} else {
assertParseNonCompositeTypes(
t,
context+".Key",
[]assertTypeInfo{*typeExpected.Key},
[]TypeInfo{collection.Key},
)
}
} else if collection.Key != nil {
t.Errorf("%s: Expected to not parse Key, but was %+v", context, collection.Key)
}
}
}

View file

@ -0,0 +1,244 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//This file will be the future home for more policies
package gocql
import (
"log"
"sync"
"sync/atomic"
)
//RetryableQuery is an interface that represents a query or batch statement that
//exposes the correct functions for the retry policy logic to evaluate correctly.
type RetryableQuery interface {
Attempts() int
GetConsistency() Consistency
}
// RetryPolicy interface is used by gocql to determine if a query can be attempted
// again after a retryable error has been received. The interface allows gocql
// users to implement their own logic to determine if a query can be attempted
// again.
//
// See SimpleRetryPolicy as an example of implementing and using a RetryPolicy
// interface.
type RetryPolicy interface {
Attempt(RetryableQuery) bool
}
// SimpleRetryPolicy has simple logic for attempting a query a fixed number of times.
//
// See below for examples of usage:
//
// //Assign to the cluster
// cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: 3}
//
// //Assign to a query
// query.RetryPolicy(&gocql.SimpleRetryPolicy{NumRetries: 1})
//
type SimpleRetryPolicy struct {
NumRetries int //Number of times to retry a query
}
// Attempt tells gocql to attempt the query again based on query.Attempts being less
// than the NumRetries defined in the policy.
func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool {
return q.Attempts() <= s.NumRetries
}
//HostSelectionPolicy is an interface for selecting
//the most appropriate host to execute a given query.
type HostSelectionPolicy interface {
SetHosts
SetPartitioner
//Pick returns an iteration function over selected hosts
Pick(*Query) NextHost
}
//NextHost is an iteration function over picked hosts
type NextHost func() *HostInfo
//NewRoundRobinHostPolicy is a round-robin load balancing policy
func NewRoundRobinHostPolicy() HostSelectionPolicy {
return &roundRobinHostPolicy{hosts: []HostInfo{}}
}
type roundRobinHostPolicy struct {
hosts []HostInfo
pos uint32
mu sync.RWMutex
}
func (r *roundRobinHostPolicy) SetHosts(hosts []HostInfo) {
r.mu.Lock()
r.hosts = hosts
r.mu.Unlock()
}
func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {
// noop
}
func (r *roundRobinHostPolicy) Pick(qry *Query) NextHost {
// i is used to limit the number of attempts to find a host
// to the number of hosts known to this policy
var i uint32 = 0
return func() *HostInfo {
r.mu.RLock()
if len(r.hosts) == 0 {
r.mu.RUnlock()
return nil
}
var host *HostInfo
// always increment pos to evenly distribute traffic in case of
// failures
pos := atomic.AddUint32(&r.pos, 1)
if int(i) < len(r.hosts) {
host = &r.hosts[(pos)%uint32(len(r.hosts))]
i++
}
r.mu.RUnlock()
return host
}
}
//NewTokenAwareHostPolicy is a token aware host selection policy
func NewTokenAwareHostPolicy(fallback HostSelectionPolicy) HostSelectionPolicy {
return &tokenAwareHostPolicy{fallback: fallback, hosts: []HostInfo{}}
}
type tokenAwareHostPolicy struct {
mu sync.RWMutex
hosts []HostInfo
partitioner string
tokenRing *tokenRing
fallback HostSelectionPolicy
}
func (t *tokenAwareHostPolicy) SetHosts(hosts []HostInfo) {
t.mu.Lock()
defer t.mu.Unlock()
// always update the fallback
t.fallback.SetHosts(hosts)
t.hosts = hosts
t.resetTokenRing()
}
func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) {
t.mu.Lock()
defer t.mu.Unlock()
if t.partitioner != partitioner {
t.fallback.SetPartitioner(partitioner)
t.partitioner = partitioner
t.resetTokenRing()
}
}
func (t *tokenAwareHostPolicy) resetTokenRing() {
if t.partitioner == "" {
// partitioner not yet set
return
}
// create a new token ring
tokenRing, err := newTokenRing(t.partitioner, t.hosts)
if err != nil {
log.Printf("Unable to update the token ring due to error: %s", err)
return
}
// replace the token ring
t.tokenRing = tokenRing
}
func (t *tokenAwareHostPolicy) Pick(qry *Query) NextHost {
if qry == nil {
return t.fallback.Pick(qry)
}
routingKey, err := qry.GetRoutingKey()
if err != nil {
return t.fallback.Pick(qry)
}
if routingKey == nil {
return t.fallback.Pick(qry)
}
var host *HostInfo
t.mu.RLock()
// TODO retrieve a list of hosts based on the replication strategy
host = t.tokenRing.GetHostForPartitionKey(routingKey)
t.mu.RUnlock()
if host == nil {
return t.fallback.Pick(qry)
}
// scope these variables for the same lifetime as the iterator function
var (
hostReturned bool
fallbackIter NextHost
)
return func() *HostInfo {
if !hostReturned {
hostReturned = true
return host
}
// fallback
if fallbackIter == nil {
fallbackIter = t.fallback.Pick(qry)
}
fallbackHost := fallbackIter()
// filter the token aware selected hosts from the fallback hosts
if fallbackHost == host {
fallbackHost = fallbackIter()
}
return fallbackHost
}
}
//ConnSelectionPolicy is an interface for selecting an
//appropriate connection for executing a query
type ConnSelectionPolicy interface {
SetConns(conns []*Conn)
Pick(*Query) *Conn
}
type roundRobinConnPolicy struct {
conns []*Conn
pos uint32
mu sync.RWMutex
}
func NewRoundRobinConnPolicy() ConnSelectionPolicy {
return &roundRobinConnPolicy{}
}
func (r *roundRobinConnPolicy) SetConns(conns []*Conn) {
r.mu.Lock()
r.conns = conns
r.mu.Unlock()
}
func (r *roundRobinConnPolicy) Pick(qry *Query) *Conn {
pos := atomic.AddUint32(&r.pos, 1)
var conn *Conn
r.mu.RLock()
if len(r.conns) > 0 {
conn = r.conns[pos%uint32(len(r.conns))]
}
r.mu.RUnlock()
return conn
}

View file

@ -0,0 +1,125 @@
// Copyright (c) 2015 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import "testing"
// Tests of the round-robin host selection policy implementation
func TestRoundRobinHostPolicy(t *testing.T) {
policy := NewRoundRobinHostPolicy()
hosts := []HostInfo{
HostInfo{HostId: "0"},
HostInfo{HostId: "1"},
}
policy.SetHosts(hosts)
// the first host selected is actually at [1], but this is ok for RR
// interleaved iteration should always increment the host
iterA := policy.Pick(nil)
if actual := iterA(); actual != &hosts[1] {
t.Errorf("Expected hosts[1] but was hosts[%s]", actual.HostId)
}
iterB := policy.Pick(nil)
if actual := iterB(); actual != &hosts[0] {
t.Errorf("Expected hosts[0] but was hosts[%s]", actual.HostId)
}
if actual := iterB(); actual != &hosts[1] {
t.Errorf("Expected hosts[1] but was hosts[%s]", actual.HostId)
}
if actual := iterA(); actual != &hosts[0] {
t.Errorf("Expected hosts[0] but was hosts[%s]", actual.HostId)
}
iterC := policy.Pick(nil)
if actual := iterC(); actual != &hosts[1] {
t.Errorf("Expected hosts[1] but was hosts[%s]", actual.HostId)
}
if actual := iterC(); actual != &hosts[0] {
t.Errorf("Expected hosts[0] but was hosts[%s]", actual.HostId)
}
}
// Tests of the token-aware host selection policy implementation with a
// round-robin host selection policy fallback.
func TestTokenAwareHostPolicy(t *testing.T) {
policy := NewTokenAwareHostPolicy(NewRoundRobinHostPolicy())
query := &Query{}
iter := policy.Pick(nil)
if iter == nil {
t.Fatal("host iterator was nil")
}
actual := iter()
if actual != nil {
t.Fatalf("expected nil from iterator, but was %v", actual)
}
// set the hosts
hosts := []HostInfo{
HostInfo{Peer: "0", Tokens: []string{"00"}},
HostInfo{Peer: "1", Tokens: []string{"25"}},
HostInfo{Peer: "2", Tokens: []string{"50"}},
HostInfo{Peer: "3", Tokens: []string{"75"}},
}
policy.SetHosts(hosts)
// the token ring is not setup without the partitioner, but the fallback
// should work
if actual := policy.Pick(nil)(); actual.Peer != "1" {
t.Errorf("Expected peer 1 but was %s", actual.Peer)
}
query.RoutingKey([]byte("30"))
if actual := policy.Pick(query)(); actual.Peer != "2" {
t.Errorf("Expected peer 2 but was %s", actual.Peer)
}
policy.SetPartitioner("OrderedPartitioner")
// now the token ring is configured
query.RoutingKey([]byte("20"))
iter = policy.Pick(query)
if actual := iter(); actual.Peer != "1" {
t.Errorf("Expected peer 1 but was %s", actual.Peer)
}
// rest are round robin
if actual := iter(); actual.Peer != "3" {
t.Errorf("Expected peer 3 but was %s", actual.Peer)
}
if actual := iter(); actual.Peer != "0" {
t.Errorf("Expected peer 0 but was %s", actual.Peer)
}
if actual := iter(); actual.Peer != "2" {
t.Errorf("Expected peer 2 but was %s", actual.Peer)
}
}
// Tests of the round-robin connection selection policy implementation
func TestRoundRobinConnPolicy(t *testing.T) {
policy := NewRoundRobinConnPolicy()
conn0 := &Conn{}
conn1 := &Conn{}
conn := []*Conn{
conn0,
conn1,
}
policy.SetConns(conn)
// the first conn selected is actually at [1], but this is ok for RR
if actual := policy.Pick(nil); actual != conn1 {
t.Error("Expected conn1")
}
if actual := policy.Pick(nil); actual != conn0 {
t.Error("Expected conn0")
}
if actual := policy.Pick(nil); actual != conn1 {
t.Error("Expected conn1")
}
}

1019
Godeps/_workspace/src/github.com/gocql/gocql/session.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,255 @@
// +build all integration
package gocql
import (
"fmt"
"testing"
)
func TestSessionAPI(t *testing.T) {
cfg := &ClusterConfig{}
pool, err := NewSimplePool(cfg)
if err != nil {
t.Fatal(err)
}
s := &Session{
Pool: pool,
cfg: *cfg,
cons: Quorum,
}
defer s.Close()
s.SetConsistency(All)
if s.cons != All {
t.Fatalf("expected consistency 'All', got '%v'", s.cons)
}
s.SetPageSize(100)
if s.pageSize != 100 {
t.Fatalf("expected pageSize 100, got %v", s.pageSize)
}
s.SetPrefetch(0.75)
if s.prefetch != 0.75 {
t.Fatalf("expceted prefetch 0.75, got %v", s.prefetch)
}
trace := &traceWriter{}
s.SetTrace(trace)
if s.trace != trace {
t.Fatalf("expected traceWriter '%v',got '%v'", trace, s.trace)
}
qry := s.Query("test", 1)
if v, ok := qry.values[0].(int); !ok {
t.Fatalf("expected qry.values[0] to be an int, got %v", qry.values[0])
} else if v != 1 {
t.Fatalf("expceted qry.values[0] to be 1, got %v", v)
} else if qry.stmt != "test" {
t.Fatalf("expected qry.stmt to be 'test', got '%v'", qry.stmt)
}
boundQry := s.Bind("test", func(q *QueryInfo) ([]interface{}, error) {
return nil, nil
})
if boundQry.binding == nil {
t.Fatal("expected qry.binding to be defined, got nil")
} else if boundQry.stmt != "test" {
t.Fatalf("expected qry.stmt to be 'test', got '%v'", boundQry.stmt)
}
itr := s.executeQuery(qry)
if itr.err != ErrNoConnections {
t.Fatalf("expected itr.err to be '%v', got '%v'", ErrNoConnections, itr.err)
}
testBatch := s.NewBatch(LoggedBatch)
testBatch.Query("test")
err = s.ExecuteBatch(testBatch)
if err != ErrNoConnections {
t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrNoConnections, err)
}
s.Close()
if !s.Closed() {
t.Fatal("expected s.Closed() to be true, got false")
}
//Should just return cleanly
s.Close()
err = s.ExecuteBatch(testBatch)
if err != ErrSessionClosed {
t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrSessionClosed, err)
}
}
func TestQueryBasicAPI(t *testing.T) {
qry := &Query{}
if qry.Latency() != 0 {
t.Fatalf("expected Query.Latency() to return 0, got %v", qry.Latency())
}
qry.attempts = 2
qry.totalLatency = 4
if qry.Attempts() != 2 {
t.Fatalf("expected Query.Attempts() to return 2, got %v", qry.Attempts())
}
if qry.Latency() != 2 {
t.Fatalf("expected Query.Latency() to return 2, got %v", qry.Latency())
}
qry.Consistency(All)
if qry.GetConsistency() != All {
t.Fatalf("expected Query.GetConsistency to return 'All', got '%s'", qry.GetConsistency())
}
trace := &traceWriter{}
qry.Trace(trace)
if qry.trace != trace {
t.Fatalf("expected Query.Trace to be '%v', got '%v'", trace, qry.trace)
}
qry.PageSize(10)
if qry.pageSize != 10 {
t.Fatalf("expected Query.PageSize to be 10, got %v", qry.pageSize)
}
qry.Prefetch(0.75)
if qry.prefetch != 0.75 {
t.Fatalf("expected Query.Prefetch to be 0.75, got %v", qry.prefetch)
}
rt := &SimpleRetryPolicy{NumRetries: 3}
if qry.RetryPolicy(rt); qry.rt != rt {
t.Fatalf("expected Query.RetryPolicy to be '%v', got '%v'", rt, qry.rt)
}
qry.Bind(qry)
if qry.values[0] != qry {
t.Fatalf("expected Query.Values[0] to be '%v', got '%v'", qry, qry.values[0])
}
}
func TestQueryShouldPrepare(t *testing.T) {
toPrepare := []string{"select * ", "INSERT INTO", "update table", "delete from", "begin batch"}
cantPrepare := []string{"create table", "USE table", "LIST keyspaces", "alter table", "drop table", "grant user", "revoke user"}
q := &Query{}
for i := 0; i < len(toPrepare); i++ {
q.stmt = toPrepare[i]
if !q.shouldPrepare() {
t.Fatalf("expected Query.shouldPrepare to return true, got false for statement '%v'", toPrepare[i])
}
}
for i := 0; i < len(cantPrepare); i++ {
q.stmt = cantPrepare[i]
if q.shouldPrepare() {
t.Fatalf("expected Query.shouldPrepare to return false, got true for statement '%v'", cantPrepare[i])
}
}
}
func TestBatchBasicAPI(t *testing.T) {
cfg := &ClusterConfig{RetryPolicy: &SimpleRetryPolicy{NumRetries: 2}}
pool, err := NewSimplePool(cfg)
if err != nil {
t.Fatal(err)
}
s := &Session{
Pool: pool,
cfg: *cfg,
cons: Quorum,
}
defer s.Close()
b := s.NewBatch(UnloggedBatch)
if b.Type != UnloggedBatch {
t.Fatalf("expceted batch.Type to be '%v', got '%v'", UnloggedBatch, b.Type)
} else if b.rt != cfg.RetryPolicy {
t.Fatalf("expceted batch.RetryPolicy to be '%v', got '%v'", cfg.RetryPolicy, b.rt)
}
b = NewBatch(LoggedBatch)
if b.Type != LoggedBatch {
t.Fatalf("expected batch.Type to be '%v', got '%v'", LoggedBatch, b.Type)
}
b.attempts = 1
if b.Attempts() != 1 {
t.Fatalf("expceted batch.Attempts() to return %v, got %v", 1, b.Attempts())
}
if b.Latency() != 0 {
t.Fatalf("expected batch.Latency() to be 0, got %v", b.Latency())
}
b.totalLatency = 4
if b.Latency() != 4 {
t.Fatalf("expected batch.Latency() to return %v, got %v", 4, b.Latency())
}
b.Cons = One
if b.GetConsistency() != One {
t.Fatalf("expected batch.GetConsistency() to return 'One', got '%s'", b.GetConsistency())
}
b.Query("test", 1)
if b.Entries[0].Stmt != "test" {
t.Fatalf("expected batch.Entries[0].Stmt to be 'test', got '%v'", b.Entries[0].Stmt)
} else if b.Entries[0].Args[0].(int) != 1 {
t.Fatalf("expected batch.Entries[0].Args[0] to be 1, got %v", b.Entries[0].Args[0])
}
b.Bind("test2", func(q *QueryInfo) ([]interface{}, error) {
return nil, nil
})
if b.Entries[1].Stmt != "test2" {
t.Fatalf("expected batch.Entries[1].Stmt to be 'test2', got '%v'", b.Entries[1].Stmt)
} else if b.Entries[1].binding == nil {
t.Fatal("expected batch.Entries[1].binding to be defined, got nil")
}
r := &SimpleRetryPolicy{NumRetries: 4}
b.RetryPolicy(r)
if b.rt != r {
t.Fatalf("expected batch.RetryPolicy to be '%v', got '%v'", r, b.rt)
}
if b.Size() != 2 {
t.Fatalf("expected batch.Size() to return 2, got %v", b.Size())
}
}
func TestConsistencyNames(t *testing.T) {
names := map[fmt.Stringer]string{
Any: "ANY",
One: "ONE",
Two: "TWO",
Three: "THREE",
Quorum: "QUORUM",
All: "ALL",
LocalQuorum: "LOCAL_QUORUM",
EachQuorum: "EACH_QUORUM",
Serial: "SERIAL",
LocalSerial: "LOCAL_SERIAL",
LocalOne: "LOCAL_ONE",
}
for k, v := range names {
if k.String() != v {
t.Fatalf("expected '%v', got '%v'", v, k.String())
}
}
}

BIN
Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.keystore (Stored with Git LFS) generated vendored Normal file

Binary file not shown.

BIN
Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.truststore (Stored with Git LFS) generated vendored Normal file

Binary file not shown.

View file

@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDLzCCAhegAwIBAgIJAIKbAXgemwsjMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV
BAMTCWNhc3NhbmRyYTAeFw0xNDA5MTkyMTE4MTNaFw0yNDA5MTYyMTE4MTNaMBQx
EjAQBgNVBAMTCWNhc3NhbmRyYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAL5fX0l1WDNa+mO1krxw7k8lfUQn+Ec4L3Mqv6IstGoNdCPq4YRA+SXRD5YC
k/UXrFBWh9Hbs849GiuTYMPdj9HDLYz40RaQjM9GbieS23iy3UStQ0tKhxaaG6FN
6XBypXFKCTsanu0TkEoDGhAkSzAMcCAC3gkFBzMrZ5qt4HEzjY9rasZ2gthN+xop
nq3t4dDkE8HGaiFJcFvqTor7xmrnAaPjrPzUpvOF/ObIC09omwg/KXdPRx4DKPon
gCMKEE3ckebKnJvbsRX3WO8H5nTHBYZ6v1JxLZz5pqmV+P0NGxldCARM0gCQUBz5
wjMJkD/3e1ETC+q6uwfnAG0hlD8CAwEAAaOBgzCBgDAdBgNVHQ4EFgQUjHzn0nYF
iXEaI1vUWbRR4lwKXOgwRAYDVR0jBD0wO4AUjHzn0nYFiXEaI1vUWbRR4lwKXOih
GKQWMBQxEjAQBgNVBAMTCWNhc3NhbmRyYYIJAIKbAXgemwsjMAwGA1UdEwQFMAMB
Af8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQBCYDdIhtf/Y12Et947
am1B8TzSX+/iQ1V1J3JtvgD5F4fvNjfArat/I3D277WREUTAc76o16BCp2OBGqzO
zf9MvZPkjkAUoyU0TtPUEHyqxq4gZxbWKugIZGYkmQ1hCvSIgA5UnjRL3dylMmZb
Y33JJA2QY63FZwnhmWsM8FYZwh+8MzVCQx3mgXC/k/jS6OuYyIT/KjxQHHjyr5ZS
zAAQln1IcZycLfh1w5MtCFahCIethFcVDnWUWYPcPGDGgMJW7WBpNZdHbLxYY8cI
eCc3Hcrbdc/CG5CaLJeqUidBayjnlUIO/NNgglkJ1KhQzkM6bd+37e0AX1hLIqx7
gIZR
-----END CERTIFICATE-----

View file

@ -0,0 +1,30 @@
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,54C8072C0FF3B3A3
27eijmHdgB+s3beNPmU0+iz+muxMD0BVvWkDzyec/uawMv/Cn4c3mYXOcsFxS3BL
+qLT9MEttOmjqhHSaVrDYOPKoJIMpn+bVeKiR08V89icO36shEPy1feGqanagKtw
ecgzFDBTA8ZbqjAhftXlhTwxADebvNms/2aDh5Aw04vIcbo8nQ/8z1Wz8O7Firsn
kaseSTMTC6lxc+pa2V1X6mN0/2UpDi55bZbx1Z/mQ3+1CsdHOx0p7m/KY2m3ysov
XluaC0sqmzHkcwNgDhUs3Jh+apE33vXzLGU+W4BDOwrYJiL6KpspZW/mJj3OEx8B
8xdAZU3a/ei8NUA/lDStGmcYX+dOysExwJ6GMrCBm9iufZiefDQCQ8yRqWnr6Zop
lsFd+CqHNWYxfWDI1pSUBw3bsgIjevI0f0B7PxkFEF0DmIhCgB324/uqToRzGsOF
4MSVg6cSK7Sjo/u3r8r75A3aUAcY8NbR3peiZfAPMsTiUcfp4DoU+MJTqkX5PyQq
FNxHOJoARZqjjQ2IhZiUQWfIINHvZ8F9G2K7VaES8A0EATyUghqaRyeLbyI3IYdW
pGZBzrpGtdFlk9AVetHDDlY+gQiurtYhxOsxvlxJJuTj8FV+A5NWSElfPele0OiR
iprE3xkFSk3whHu5L1vnzamvdSlnBWOAE7pQD7kQA6NmcEw/tqnXK0dVdAw8RIFh
4BKgv0sNrXzBgnzE8+bKLUf1a2Byc/YKuBrI7EpSZ9/VHYvOcgmOxNxMmRS6NYd1
Ly+agQn0AyvsDmSlBZBp8GCzVp6JYBMDKSXyPVN8+wjK9OQM0PZdEdXouMwPCOVN
oNSjhmMtfjOsnG2SZ9tRas3p0qFdfh/N/E6Q7QHG3WD3cUIEweFV9ji1FTSRUrIa
shuKug8MUfNjvDJNMsdGyf6Hi/7Iik++42Rq3ZdTy0ZVkj5snv5yBN77pr2M/J4b
M+dsXjyXPO4SDW3kP/e3RnLRlWmUv1PNdOmNDdjBBUTKgVZ3ur+4HmSY1iDvhlUF
/hz2tz3/XUKQwYuv3KJVlBhLrniXeES36GK+JQadIszrjwb5N4q4p6xrIdIR7XgR
TJCSL1NGPLeQyjK6byWLNPRcCGrvnxWs0k0ev6trMRJL1EjsIFDCJam9szhcXkZP
iYl1d7ZMKPS3cAqCjdaFRSe65cZ+qI/cqxiv122orq/jkDY7ZSA9rWywY4YnYQ7A
BqvcPzC/6K0bteXqmMQkIy/84aSnEts6ecb/4s5e5xXLhHe0dchG0HkasC/Gb+v/
m9NOqACTerWvSD+Ecv9OvnBjP+GTlA1g7xTiRANLXsTJuiJomtxewXcV6kGZEMmZ
QWerGtPJGGUx36WRWrMiPeBfWZoIbjYGPmOO5mYNXMTjABGGWcFnKAqWUKsFihi9
pC0OpZ7A0dtc9uSm0ZmsHUc3XENMHTeeEN+qgWxVKcMzRKEcnapu/0OcHrOUHDZf
qPoG4EkNnG9kPMq3HzvFPx3qbQ017yl87vAkWy/Edo+ojfHoNghRBVGCw1zt/BMN
eJbFFHop+rQ87omz8WIL4K+zVf91rJ0REVAJssQVDo16O5wrMo+f+c8v2GANQks5
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,83 @@
Certificate:
Data:
Version: 3 (0x2)
Serial Number: 2 (0x2)
Signature Algorithm: sha256WithRSAEncryption
Issuer: CN=cassandra
Validity
Not Before: Sep 19 21:18:48 2014 GMT
Not After : Sep 16 21:18:48 2024 GMT
Subject: CN=cassandra
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public Key: (2048 bit)
Modulus (2048 bit):
00:e5:9c:20:9e:de:98:73:44:41:0d:37:4c:62:c3:
9f:87:5f:9b:4f:aa:cf:f6:90:6e:a5:e0:89:88:7a:
00:c6:bb:d7:80:87:69:2e:fa:f0:35:59:80:6e:82:
25:c8:b3:6c:f6:a4:97:97:93:93:ea:f0:70:70:a4:
e1:b7:aa:da:c1:99:66:9b:93:04:3a:ce:0b:83:07:
06:22:3d:a6:db:7f:68:0f:49:80:bd:86:a8:bb:54:
6d:38:5f:0f:b0:fa:1b:97:24:ae:cc:9d:37:98:7e:
76:cc:e3:1b:45:1b:21:25:17:02:c0:1a:c5:fb:76:
c3:8b:93:d7:c5:85:14:0a:5c:a4:12:e7:18:69:98:
f5:76:cd:78:cd:99:5a:29:65:f1:68:20:97:d3:be:
09:b3:68:1b:f2:a3:a2:9a:73:58:53:7e:ed:86:32:
a3:5a:d5:46:03:f9:b3:b4:ec:63:71:ba:bb:fb:6f:
f9:82:63:e4:55:47:7a:7a:e4:7b:17:6b:d7:e6:cf:
3b:c9:ab:0c:30:15:c9:ed:c7:d6:fc:b6:72:b2:14:
7d:c7:f3:7f:8a:f4:63:70:64:8e:0f:db:e8:3a:45:
47:cd:b9:7b:ae:c8:31:c1:52:d1:3e:34:12:b7:73:
e7:ba:89:86:9a:36:ed:a0:5a:69:d0:d4:e3:b6:16:
85:af
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Basic Constraints:
CA:FALSE
X509v3 Subject Key Identifier:
4A:D3:EC:63:07:E0:8F:1A:4E:F5:09:43:90:9F:7A:C5:31:D1:8F:D8
X509v3 Authority Key Identifier:
keyid:8C:7C:E7:D2:76:05:89:71:1A:23:5B:D4:59:B4:51:E2:5C:0A:5C:E8
DirName:/CN=cassandra
serial:82:9B:01:78:1E:9B:0B:23
X509v3 Extended Key Usage:
TLS Web Server Authentication
X509v3 Key Usage:
Digital Signature, Key Encipherment
Signature Algorithm: sha256WithRSAEncryption
ac:bc:80:82:2d:6d:f1:a0:46:eb:00:05:d2:25:9a:83:66:57:
40:51:6e:ff:db:e3:28:04:7b:16:63:74:ec:55:a0:c0:5b:47:
13:e1:5a:a5:6d:22:d0:e5:fe:c1:51:e8:f6:c6:9c:f9:be:b7:
be:82:14:e4:a0:b2:0b:9f:ee:68:bc:ac:17:0d:13:50:c6:9e:
52:91:8c:a0:98:db:4e:2d:f6:3d:6e:85:0a:bb:b9:dd:01:bf:
ad:52:dd:6e:e4:41:01:a5:93:58:dd:3f:cf:bf:15:e6:25:aa:
a0:4f:98:0d:75:8a:3f:5b:ba:67:37:f6:b1:0b:3f:21:34:97:
50:9a:85:97:2b:b6:05:41:9a:f3:cf:c4:92:23:06:ab:3e:87:
98:30:eb:cb:d3:83:ab:04:7d:5c:b9:f0:12:d1:43:b3:c5:7d:
33:9a:2e:2b:80:3a:66:be:f1:8c:08:37:7a:93:9c:9b:60:60:
53:71:16:70:86:df:ca:5f:a9:0b:e2:8b:3d:af:02:62:3b:61:
30:da:53:89:e3:d8:0b:88:04:9a:93:6a:f6:28:f8:dd:0d:8f:
0c:82:5b:c0:e5:f8:0d:ad:06:76:a7:3b:4b:ae:54:37:25:15:
f5:0c:67:0f:77:c5:c4:97:68:09:c3:02:a7:a0:46:10:1c:d1:
95:3a:4c:94
-----BEGIN CERTIFICATE-----
MIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwljYXNz
YW5kcmEwHhcNMTQwOTE5MjExODQ4WhcNMjQwOTE2MjExODQ4WjAUMRIwEAYDVQQD
EwljYXNzYW5kcmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDlnCCe
3phzREENN0xiw5+HX5tPqs/2kG6l4ImIegDGu9eAh2ku+vA1WYBugiXIs2z2pJeX
k5Pq8HBwpOG3qtrBmWabkwQ6zguDBwYiPabbf2gPSYC9hqi7VG04Xw+w+huXJK7M
nTeYfnbM4xtFGyElFwLAGsX7dsOLk9fFhRQKXKQS5xhpmPV2zXjNmVopZfFoIJfT
vgmzaBvyo6Kac1hTfu2GMqNa1UYD+bO07GNxurv7b/mCY+RVR3p65HsXa9fmzzvJ
qwwwFcntx9b8tnKyFH3H83+K9GNwZI4P2+g6RUfNuXuuyDHBUtE+NBK3c+e6iYaa
Nu2gWmnQ1OO2FoWvAgMBAAGjgZUwgZIwCQYDVR0TBAIwADAdBgNVHQ4EFgQUStPs
YwfgjxpO9QlDkJ96xTHRj9gwRAYDVR0jBD0wO4AUjHzn0nYFiXEaI1vUWbRR4lwK
XOihGKQWMBQxEjAQBgNVBAMTCWNhc3NhbmRyYYIJAIKbAXgemwsjMBMGA1UdJQQM
MAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDANBgkqhkiG9w0BAQsFAAOCAQEArLyA
gi1t8aBG6wAF0iWag2ZXQFFu/9vjKAR7FmN07FWgwFtHE+FapW0i0OX+wVHo9sac
+b63voIU5KCyC5/uaLysFw0TUMaeUpGMoJjbTi32PW6FCru53QG/rVLdbuRBAaWT
WN0/z78V5iWqoE+YDXWKP1u6Zzf2sQs/ITSXUJqFlyu2BUGa88/EkiMGqz6HmDDr
y9ODqwR9XLnwEtFDs8V9M5ouK4A6Zr7xjAg3epOcm2BgU3EWcIbfyl+pC+KLPa8C
YjthMNpTiePYC4gEmpNq9ij43Q2PDIJbwOX4Da0Gdqc7S65UNyUV9QxnD3fFxJdo
CcMCp6BGEBzRlTpMlA==
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEA5Zwgnt6Yc0RBDTdMYsOfh1+bT6rP9pBupeCJiHoAxrvXgIdp
LvrwNVmAboIlyLNs9qSXl5OT6vBwcKTht6rawZlmm5MEOs4LgwcGIj2m239oD0mA
vYaou1RtOF8PsPoblySuzJ03mH52zOMbRRshJRcCwBrF+3bDi5PXxYUUClykEucY
aZj1ds14zZlaKWXxaCCX074Js2gb8qOimnNYU37thjKjWtVGA/mztOxjcbq7+2/5
gmPkVUd6euR7F2vX5s87yasMMBXJ7cfW/LZyshR9x/N/ivRjcGSOD9voOkVHzbl7
rsgxwVLRPjQSt3PnuomGmjbtoFpp0NTjthaFrwIDAQABAoIBAQChjdjl73kUoVGk
GuSEGWCFv59nzqfEtJsl23bpr+4b5s8agCxiAe5Bm1fiaXBsZtKkN+rxm8TX6ZUz
rM+ki3KgBW9Mx4SSW6d96dNHBFoC1wJAv1b2A2l1ZVHz9+7ydwgysHzNO1GC2nh8
cM8fMJeBoU8uG6hx5n5wFvYa5CfVoUQh8+Oq0b+mVxEFKHmRPnWp9/jPzL5eBIdr
ulbDt9S3dKJtouHgHBUNdkq/7Ex3QeHrUOahX6Y4eX1rzLnfLYY+0J4EA2PCKvgQ
bfKCxVnnzL6ywviH8eS3ql6OvTfnbK9kCRw7WxX9CC50qKj3EmwC/51MPhWohWlq
jw3qf38BAoGBAPPNyb3vUiyUqoErZxxIPFc2ob3vCjj06cvi7uKpOgrkdgC3iBhz
aCFQ28r7LrxLAHaKvNvwp71Lc7WYo8WWkLI1DVn0dx+GiQYW3DbNcwZOS40ZQz5L
zsjEcG4+cnZmuqGZBMNvQ+xUjkuucxvxPWKpEKM18GfDjgEkKbmDr+uNAoGBAPEY
kVSfSZGtP0MoXIfRkrxBlhvCj9m+p60P37pyHrJBrlrwvxB7x3Oz8S70D6kV8s2g
vVHgOS3VPj17VaQG8a3jBLKjzp5JLe34G8D1Ny8GqDc2wzOBtZySpJbifXuSUSPk
cqF7yiu1cD/wRPlwyWxBX9ZbaxvxnIUwLLd3ygkrAoGBAKQaw42uVkCdvPr/DQOT
d9I4erxO9zGJYQmU8bjtsZz9VJR89QWIQPIT7C3/zuB9F42zKxZcMXwQGo2EddAc
3b6mSRtgmwJEW10W7BmTRrZa4y3RcFqxSjoHR6pdLEyYL01woy0taqnb7H/yp5aK
VghfxkwllXEyxxXrko5FnpdNAoGBANeJLBunz2BxrnW+doJhZDnytFya4nk6TbKU
12FaNoEL4PCh+12kGtogSwS74eg6m/citT2mI9gKpHrYcOaT4qmeo4uEj+nH6Eyv
Gzi0wCHFZMr/pSC92/teyc+uKZo4Y1ugFq6w+Tt8GB7BERiisR+bji8XSTkRFemn
+MIIUFFDAoGAM8Va2Q5aTUkfg2mYlNLqT2tUAXVEhbmzjPA6laSo25PQEYWmX7vj
hiU0DPCDJQ/PlPI23xYtDDLNk83Zbx+Oj29GO5pawJY9NvFI8n60EFXfLbP1nEdG
j077QZNZOKfcgJirWi3+RrHSAK4tFftCe7rkV8ZmlMRBY3SDxzKOGcc=
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,83 @@
Certificate:
Data:
Version: 3 (0x2)
Serial Number: 1 (0x1)
Signature Algorithm: sha256WithRSAEncryption
Issuer: CN=cassandra
Validity
Not Before: Sep 19 21:18:33 2014 GMT
Not After : Sep 16 21:18:33 2024 GMT
Subject: CN=gocql
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public Key: (2048 bit)
Modulus (2048 bit):
00:ae:e9:fa:9e:fd:e2:69:85:1d:08:0f:35:68:bc:
63:7b:92:50:7f:73:50:fc:42:43:35:06:b3:5c:9e:
27:1e:16:05:69:ec:88:d5:9c:4f:ef:e8:13:69:7a:
b5:b3:7f:66:6d:14:00:2e:d6:af:5b:ff:2c:90:91:
a6:11:07:72:5e:b0:37:c0:6d:ff:7b:76:2b:fe:de:
4c:d2:8d:ce:43:3b:1a:c4:1d:de:b6:d8:26:08:25:
89:59:a1:4b:94:a3:57:9e:19:46:28:6e:97:11:7c:
e6:b7:41:96:8f:42:dd:66:da:86:d2:53:dd:d8:f5:
20:cd:24:8b:0f:ab:df:c4:10:b2:64:20:1d:e0:0f:
f4:2d:f6:ca:94:be:83:ac:3e:a8:4a:77:b6:08:97:
3a:7e:7b:e0:3e:ab:68:cf:ee:f6:a1:8e:bf:ec:be:
06:d1:ad:6c:ed:4f:35:d1:04:97:08:33:b1:65:5b:
61:32:8d:4b:f0:30:35:4b:8b:6b:06:f2:1a:72:8c:
69:bd:f3:b2:c4:a4:a4:70:45:e3:67:a2:7a:9f:2e:
cb:28:2d:9f:68:03:f1:c7:d9:4f:83:c9:3d:8c:34:
04:0a:3b:13:87:92:e1:f7:e3:79:7e:ab:c0:25:b1:
e5:38:09:44:3e:31:df:12:d4:dc:7b:0e:35:bf:ee:
25:5f
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Basic Constraints:
CA:FALSE
X509v3 Subject Key Identifier:
9F:F1:B2:C4:82:34:D0:2F:FF:E9:7F:19:F1:3B:51:57:BF:E8:95:BB
X509v3 Authority Key Identifier:
keyid:8C:7C:E7:D2:76:05:89:71:1A:23:5B:D4:59:B4:51:E2:5C:0A:5C:E8
DirName:/CN=cassandra
serial:82:9B:01:78:1E:9B:0B:23
X509v3 Extended Key Usage:
TLS Web Client Authentication
X509v3 Key Usage:
Digital Signature
Signature Algorithm: sha256WithRSAEncryption
12:aa:1b:a6:58:27:52:32:c9:46:19:32:d3:69:ae:95:ad:23:
55:ad:12:65:da:2c:4c:72:f3:29:bd:2b:5a:97:3b:b7:68:8b:
68:80:77:55:e6:32:81:f1:f5:20:54:ba:0e:2b:86:90:d8:44:
cf:f2:9f:ec:4d:39:67:4e:36:6c:9b:49:4a:80:e6:c1:ed:a4:
41:39:19:16:d2:88:df:17:0c:46:5a:b9:88:53:f5:67:19:f0:
1f:9a:51:40:1b:40:12:bc:57:db:de:dd:d3:f5:a8:93:68:30:
ac:ba:4e:ee:6b:af:f8:13:3d:11:1a:fa:90:93:d0:68:ce:77:
5f:85:8b:a4:95:2a:4c:25:7b:53:9c:44:43:b1:d9:fe:0c:83:
b8:19:2a:88:cc:d8:d1:d9:b3:04:eb:45:9b:30:5e:cb:61:e0:
e1:88:23:9c:b0:34:79:62:82:0d:f8:10:ed:96:bb:a0:fd:0d:
02:cb:c5:d3:47:1f:35:a7:e3:39:31:56:d5:b3:eb:2f:93:8f:
18:b4:b7:3c:00:03:a7:b4:1c:17:72:91:7e:b6:f6:36:17:3d:
f6:54:3b:87:84:d1:9b:43:d1:88:42:64:20:7a:e3:cc:f7:05:
98:0e:1c:51:da:20:b7:9b:49:88:e8:c6:e1:de:0d:f5:56:4f:
79:41:d0:7f
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIBATANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwljYXNz
YW5kcmEwHhcNMTQwOTE5MjExODMzWhcNMjQwOTE2MjExODMzWjAQMQ4wDAYDVQQD
EwVnb2NxbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK7p+p794mmF
HQgPNWi8Y3uSUH9zUPxCQzUGs1yeJx4WBWnsiNWcT+/oE2l6tbN/Zm0UAC7Wr1v/
LJCRphEHcl6wN8Bt/3t2K/7eTNKNzkM7GsQd3rbYJggliVmhS5SjV54ZRihulxF8
5rdBlo9C3WbahtJT3dj1IM0kiw+r38QQsmQgHeAP9C32ypS+g6w+qEp3tgiXOn57
4D6raM/u9qGOv+y+BtGtbO1PNdEElwgzsWVbYTKNS/AwNUuLawbyGnKMab3zssSk
pHBF42eiep8uyygtn2gD8cfZT4PJPYw0BAo7E4eS4ffjeX6rwCWx5TgJRD4x3xLU
3HsONb/uJV8CAwEAAaOBlTCBkjAJBgNVHRMEAjAAMB0GA1UdDgQWBBSf8bLEgjTQ
L//pfxnxO1FXv+iVuzBEBgNVHSMEPTA7gBSMfOfSdgWJcRojW9RZtFHiXApc6KEY
pBYwFDESMBAGA1UEAxMJY2Fzc2FuZHJhggkAgpsBeB6bCyMwEwYDVR0lBAwwCgYI
KwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBCwUAA4IBAQASqhumWCdS
MslGGTLTaa6VrSNVrRJl2ixMcvMpvStalzu3aItogHdV5jKB8fUgVLoOK4aQ2ETP
8p/sTTlnTjZsm0lKgObB7aRBORkW0ojfFwxGWrmIU/VnGfAfmlFAG0ASvFfb3t3T
9aiTaDCsuk7ua6/4Ez0RGvqQk9BozndfhYuklSpMJXtTnERDsdn+DIO4GSqIzNjR
2bME60WbMF7LYeDhiCOcsDR5YoIN+BDtlrug/Q0Cy8XTRx81p+M5MVbVs+svk48Y
tLc8AAOntBwXcpF+tvY2Fz32VDuHhNGbQ9GIQmQgeuPM9wWYDhxR2iC3m0mI6Mbh
3g31Vk95QdB/
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEArun6nv3iaYUdCA81aLxje5JQf3NQ/EJDNQazXJ4nHhYFaeyI
1ZxP7+gTaXq1s39mbRQALtavW/8skJGmEQdyXrA3wG3/e3Yr/t5M0o3OQzsaxB3e
ttgmCCWJWaFLlKNXnhlGKG6XEXzmt0GWj0LdZtqG0lPd2PUgzSSLD6vfxBCyZCAd
4A/0LfbKlL6DrD6oSne2CJc6fnvgPqtoz+72oY6/7L4G0a1s7U810QSXCDOxZVth
Mo1L8DA1S4trBvIacoxpvfOyxKSkcEXjZ6J6ny7LKC2faAPxx9lPg8k9jDQECjsT
h5Lh9+N5fqvAJbHlOAlEPjHfEtTcew41v+4lXwIDAQABAoIBAQCCP9XSwzfwX6Fo
uPqKjY5/HEs5PQPXdPha6ixyEYsLilZptCuI9adI/MZHy4q2qW36V+Ry/IcEuJXU
6cCB+cue2xYJA2A17Z+BYMRQHiy0P7UEyUFpYrefZWRMDCIeAyxhnGxz+zYfXaTo
Xbzh3WbFCoFO6gjPYGoWmNm8x74PXyunNaMa/gWFECX5MMBXoOk5xSFGbHzI2Cds
iT7sdCQJVbBs7yidYwNqPWQuOwrskFinPIFSc7bZ0Sx9wO3XTIrQFCE94v/AN6yR
9Q37ida54g5tgtoeg/5EGsUM++i4wqJVoT3tWUHv1jBozO4Lm65uWR/1HcrusVnr
x0TM9SaBAoGBAOMeaZdUrCJXnIiSoqCGDvZmylTAeOo6n2RAiviOYxVB4GP/SSjh
8VeddFhYT1GCmZ+YjIXnRWK+dSqVukzCuf5xW5mWY7PDNGZe2P6O78lXnY4cb8Nc
Uo9/S2aPnNmNHL2TYVBYUiZj+t2azIQEFvRth4Vu/AHRUG41/USxpwm/AoGBAMUo
GX0xgSFAVpHnTLdzWrHNRrzHgYN8ywPKFgNOASvdgW0BFoqXEvVGc1Ak6uW82m1/
L9ChOzWjCY7CoT+LPmdUVyGT9/UAPtWeLfo8Owl4tG91jQjePmJFvLoXErryCFRt
SOOvCsTTTq2gN3PREHxY3dj2kJqaCBLCEzx3cYxhAoGBAIUxdrc6/t/9BV3KsPj2
5Zt3WL0vSzoCOyut9lIiHtV+lrvOIPeK2eCKBIsy7wFcV/+SlQaKRNTN4SSiPml5
4V3o2NFPsxTfK8HFafiPluw7J7kJ0Dl/0SM6gduZ6WBkMzCyV+WohjTheWOwvrPF
OjkKaunD1qKyQDsCCo/Yp589AoGAdKgnfNZf68bf8nEECcBtt6sY4fbCgYTDszhO
EiKDuurT/CWaquJ9SzgmXxOZEdrO+9838aCVIkWYECrFso23nPhgnfOp0gQVKdzw
o5Ij9JTBXvoVO1wVWZyd8RZZ9Nflad9IM8CNBK1rbnzQkuzvbkQ+8HPkWDYv9Ll1
HGAohcECgYBQeirIumumj1B17WD/KmNe0U0qCHHp+oSW4W2r7pjlEVZzeQmggX4O
anbEngyQaZKeUiUOj9snBDmzLv7S+j5p7Us4d1fbp70sCKuK6tcAnROU8gK8IGiI
I01ypD8Z1Mb556qek56eRWlr71sy6wI1lbQa856cUBvePajUOKsKsw==
-----END RSA PRIVATE KEY-----

348
Godeps/_workspace/src/github.com/gocql/gocql/token.go generated vendored Normal file
View file

@ -0,0 +1,348 @@
// Copyright (c) 2015 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"bytes"
"crypto/md5"
"fmt"
"math/big"
"sort"
"strconv"
"strings"
"unsafe"
)
// a token partitioner
type partitioner interface {
Name() string
Hash([]byte) token
ParseString(string) token
}
// a token
type token interface {
fmt.Stringer
Less(token) bool
}
// murmur3 partitioner and token
type murmur3Partitioner struct{}
type murmur3Token int64
func (p murmur3Partitioner) Name() string {
return "Murmur3Partitioner"
}
func (p murmur3Partitioner) Hash(partitionKey []byte) token {
h1 := murmur3H1(partitionKey)
return murmur3Token(int64(h1))
}
// murmur3 little-endian, 128-bit hash, but returns only h1
func murmur3H1(data []byte) uint64 {
length := len(data)
var h1, h2, k1, k2 uint64
const (
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
)
// body
nBlocks := length / 16
for i := 0; i < nBlocks; i++ {
block := (*[2]uint64)(unsafe.Pointer(&data[i*16]))
k1 = block[0]
k2 = block[1]
k1 *= c1
k1 = (k1 << 31) | (k1 >> 33) // ROTL64(k1, 31)
k1 *= c2
h1 ^= k1
h1 = (h1 << 27) | (h1 >> 37) // ROTL64(h1, 27)
h1 += h2
h1 = h1*5 + 0x52dce729
k2 *= c2
k2 = (k2 << 33) | (k2 >> 31) // ROTL64(k2, 33)
k2 *= c1
h2 ^= k2
h2 = (h2 << 31) | (h2 >> 33) // ROTL64(h2, 31)
h2 += h1
h2 = h2*5 + 0x38495ab5
}
// tail
tail := data[nBlocks*16:]
k1 = 0
k2 = 0
switch length & 15 {
case 15:
k2 ^= uint64(tail[14]) << 48
fallthrough
case 14:
k2 ^= uint64(tail[13]) << 40
fallthrough
case 13:
k2 ^= uint64(tail[12]) << 32
fallthrough
case 12:
k2 ^= uint64(tail[11]) << 24
fallthrough
case 11:
k2 ^= uint64(tail[10]) << 16
fallthrough
case 10:
k2 ^= uint64(tail[9]) << 8
fallthrough
case 9:
k2 ^= uint64(tail[8])
k2 *= c2
k2 = (k2 << 33) | (k2 >> 31) // ROTL64(k2, 33)
k2 *= c1
h2 ^= k2
fallthrough
case 8:
k1 ^= uint64(tail[7]) << 56
fallthrough
case 7:
k1 ^= uint64(tail[6]) << 48
fallthrough
case 6:
k1 ^= uint64(tail[5]) << 40
fallthrough
case 5:
k1 ^= uint64(tail[4]) << 32
fallthrough
case 4:
k1 ^= uint64(tail[3]) << 24
fallthrough
case 3:
k1 ^= uint64(tail[2]) << 16
fallthrough
case 2:
k1 ^= uint64(tail[1]) << 8
fallthrough
case 1:
k1 ^= uint64(tail[0])
k1 *= c1
k1 = (k1 << 31) | (k1 >> 33) // ROTL64(k1, 31)
k1 *= c2
h1 ^= k1
}
h1 ^= uint64(length)
h2 ^= uint64(length)
h1 += h2
h2 += h1
// finalizer
const (
fmix1 = 0xff51afd7ed558ccd
fmix2 = 0xc4ceb9fe1a85ec53
)
// fmix64(h1)
h1 ^= h1 >> 33
h1 *= fmix1
h1 ^= h1 >> 33
h1 *= fmix2
h1 ^= h1 >> 33
// fmix64(h2)
h2 ^= h2 >> 33
h2 *= fmix1
h2 ^= h2 >> 33
h2 *= fmix2
h2 ^= h2 >> 33
h1 += h2
// the following is extraneous since h2 is discarded
// h2 += h1
return h1
}
func (p murmur3Partitioner) ParseString(str string) token {
val, _ := strconv.ParseInt(str, 10, 64)
return murmur3Token(val)
}
func (m murmur3Token) String() string {
return strconv.FormatInt(int64(m), 10)
}
func (m murmur3Token) Less(token token) bool {
return m < token.(murmur3Token)
}
// order preserving partitioner and token
type orderedPartitioner struct{}
type orderedToken []byte
func (p orderedPartitioner) Name() string {
return "OrderedPartitioner"
}
func (p orderedPartitioner) Hash(partitionKey []byte) token {
// the partition key is the token
return orderedToken(partitionKey)
}
func (p orderedPartitioner) ParseString(str string) token {
return orderedToken([]byte(str))
}
func (o orderedToken) String() string {
return string([]byte(o))
}
func (o orderedToken) Less(token token) bool {
return -1 == bytes.Compare(o, token.(orderedToken))
}
// random partitioner and token
type randomPartitioner struct{}
type randomToken big.Int
func (r randomPartitioner) Name() string {
return "RandomPartitioner"
}
func (p randomPartitioner) Hash(partitionKey []byte) token {
hash := md5.New()
sum := hash.Sum(partitionKey)
val := new(big.Int)
val = val.SetBytes(sum)
val = val.Abs(val)
return (*randomToken)(val)
}
func (p randomPartitioner) ParseString(str string) token {
val := new(big.Int)
val.SetString(str, 10)
return (*randomToken)(val)
}
func (r *randomToken) String() string {
return (*big.Int)(r).String()
}
func (r *randomToken) Less(token token) bool {
return -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken)))
}
// a data structure for organizing the relationship between tokens and hosts
type tokenRing struct {
partitioner partitioner
tokens []token
hosts []*HostInfo
}
func newTokenRing(partitioner string, hosts []HostInfo) (*tokenRing, error) {
tokenRing := &tokenRing{
tokens: []token{},
hosts: []*HostInfo{},
}
if strings.HasSuffix(partitioner, "Murmur3Partitioner") {
tokenRing.partitioner = murmur3Partitioner{}
} else if strings.HasSuffix(partitioner, "OrderedPartitioner") {
tokenRing.partitioner = orderedPartitioner{}
} else if strings.HasSuffix(partitioner, "RandomPartitioner") {
tokenRing.partitioner = randomPartitioner{}
} else {
return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner)
}
for i := range hosts {
host := &hosts[i]
for _, strToken := range host.Tokens {
token := tokenRing.partitioner.ParseString(strToken)
tokenRing.tokens = append(tokenRing.tokens, token)
tokenRing.hosts = append(tokenRing.hosts, host)
}
}
sort.Sort(tokenRing)
return tokenRing, nil
}
func (t *tokenRing) Len() int {
return len(t.tokens)
}
func (t *tokenRing) Less(i, j int) bool {
return t.tokens[i].Less(t.tokens[j])
}
func (t *tokenRing) Swap(i, j int) {
t.tokens[i], t.hosts[i], t.tokens[j], t.hosts[j] =
t.tokens[j], t.hosts[j], t.tokens[i], t.hosts[i]
}
func (t *tokenRing) String() string {
buf := &bytes.Buffer{}
buf.WriteString("TokenRing(")
if t.partitioner != nil {
buf.WriteString(t.partitioner.Name())
}
buf.WriteString("){")
sep := ""
for i := range t.tokens {
buf.WriteString(sep)
sep = ","
buf.WriteString("\n\t[")
buf.WriteString(strconv.Itoa(i))
buf.WriteString("]")
buf.WriteString(t.tokens[i].String())
buf.WriteString(":")
buf.WriteString(t.hosts[i].Peer)
}
buf.WriteString("\n}")
return string(buf.Bytes())
}
func (t *tokenRing) GetHostForPartitionKey(partitionKey []byte) *HostInfo {
if t == nil {
return nil
}
token := t.partitioner.Hash(partitionKey)
return t.GetHostForToken(token)
}
func (t *tokenRing) GetHostForToken(token token) *HostInfo {
if t == nil {
return nil
}
// find the primary replica
ringIndex := sort.Search(
len(t.tokens),
func(i int) bool {
return !t.tokens[i].Less(token)
},
)
if ringIndex == len(t.tokens) {
// wrap around to the first in the ring
ringIndex = 0
}
host := t.hosts[ringIndex]
return host
}

View file

@ -0,0 +1,474 @@
// Copyright (c) 2015 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"bytes"
"math/big"
"sort"
"strconv"
"testing"
)
// Test the implementation of murmur3
func TestMurmur3H1(t *testing.T) {
// these examples are based on adding a index number to a sample string in
// a loop. The expected values were generated by the java datastax murmur3
// implementation. The number of examples here of increasing lengths ensure
// test coverage of all tail-length branches in the murmur3 algorithm
seriesExpected := [...]uint64{
0x0000000000000000, // ""
0x2ac9debed546a380, // "0"
0x649e4eaa7fc1708e, // "01"
0xce68f60d7c353bdb, // "012"
0x0f95757ce7f38254, // "0123"
0x0f04e459497f3fc1, // "01234"
0x88c0a92586be0a27, // "012345"
0x13eb9fb82606f7a6, // "0123456"
0x8236039b7387354d, // "01234567"
0x4c1e87519fe738ba, // "012345678"
0x3f9652ac3effeb24, // "0123456789"
0x3f33760ded9006c6, // "01234567890"
0xaed70a6631854cb1, // "012345678901"
0x8a299a8f8e0e2da7, // "0123456789012"
0x624b675c779249a6, // "01234567890123"
0xa4b203bb1d90b9a3, // "012345678901234"
0xa3293ad698ecb99a, // "0123456789012345"
0xbc740023dbd50048, // "01234567890123456"
0x3fe5ab9837d25cdd, // "012345678901234567"
0x2d0338c1ca87d132, // "0123456789012345678"
}
sample := ""
for i, expected := range seriesExpected {
assertMurmur3H1(t, []byte(sample), expected)
sample = sample + strconv.Itoa(i%10)
}
// Here are some test examples from other driver implementations
assertMurmur3H1(t, []byte("hello"), 0xcbd8a7b341bd9b02)
assertMurmur3H1(t, []byte("hello, world"), 0x342fac623a5ebc8e)
assertMurmur3H1(t, []byte("19 Jan 2038 at 3:14:07 AM"), 0xb89e5988b737affc)
assertMurmur3H1(t, []byte("The quick brown fox jumps over the lazy dog."), 0xcd99481f9ee902c9)
}
// helper function for testing the murmur3 implementation
func assertMurmur3H1(t *testing.T, data []byte, expected uint64) {
actual := murmur3H1(data)
if actual != expected {
t.Errorf("Expected h1 = %x for data = %x, but was %x", expected, data, actual)
}
}
// Benchmark of the performance of the murmur3 implementation
func BenchmarkMurmur3H1(b *testing.B) {
var h1 uint64
var data [1024]byte
for i := 0; i < 1024; i++ {
data[i] = byte(i)
}
for i := 0; i < b.N; i++ {
b.ResetTimer()
h1 = murmur3H1(data[:])
_ = murmur3Token(int64(h1))
}
}
// Tests of the murmur3Patitioner
func TestMurmur3Partitioner(t *testing.T) {
token := murmur3Partitioner{}.ParseString("-1053604476080545076")
if "-1053604476080545076" != token.String() {
t.Errorf("Expected '-1053604476080545076' but was '%s'", token)
}
// at least verify that the partitioner
// doesn't return nil
pk, _ := marshalInt(nil, 1)
token = murmur3Partitioner{}.Hash(pk)
if token == nil {
t.Fatal("token was nil")
}
}
// Tests of the murmur3Token
func TestMurmur3Token(t *testing.T) {
if murmur3Token(42).Less(murmur3Token(42)) {
t.Errorf("Expected Less to return false, but was true")
}
if !murmur3Token(-42).Less(murmur3Token(42)) {
t.Errorf("Expected Less to return true, but was false")
}
if murmur3Token(42).Less(murmur3Token(-42)) {
t.Errorf("Expected Less to return false, but was true")
}
}
// Tests of the orderedPartitioner
func TestOrderedPartitioner(t *testing.T) {
// at least verify that the partitioner
// doesn't return nil
p := orderedPartitioner{}
pk, _ := marshalInt(nil, 1)
token := p.Hash(pk)
if token == nil {
t.Fatal("token was nil")
}
str := token.String()
parsedToken := p.ParseString(str)
if !bytes.Equal([]byte(token.(orderedToken)), []byte(parsedToken.(orderedToken))) {
t.Errorf("Failed to convert to and from a string %s expected %x but was %x",
str,
[]byte(token.(orderedToken)),
[]byte(parsedToken.(orderedToken)),
)
}
}
// Tests of the orderedToken
func TestOrderedToken(t *testing.T) {
if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 4, 2})) {
t.Errorf("Expected Less to return false, but was true")
}
if !orderedToken([]byte{0, 0, 3}).Less(orderedToken([]byte{0, 0, 4, 2})) {
t.Errorf("Expected Less to return true, but was false")
}
if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 3})) {
t.Errorf("Expected Less to return false, but was true")
}
}
// Tests of the randomPartitioner
func TestRandomPartitioner(t *testing.T) {
// at least verify that the partitioner
// doesn't return nil
p := randomPartitioner{}
pk, _ := marshalInt(nil, 1)
token := p.Hash(pk)
if token == nil {
t.Fatal("token was nil")
}
str := token.String()
parsedToken := p.ParseString(str)
if (*big.Int)(token.(*randomToken)).Cmp((*big.Int)(parsedToken.(*randomToken))) != 0 {
t.Errorf("Failed to convert to and from a string %s expected %v but was %v",
str,
token,
parsedToken,
)
}
}
// Tests of the randomToken
func TestRandomToken(t *testing.T) {
if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(42))) {
t.Errorf("Expected Less to return false, but was true")
}
if !((*randomToken)(big.NewInt(41))).Less((*randomToken)(big.NewInt(42))) {
t.Errorf("Expected Less to return true, but was false")
}
if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(41))) {
t.Errorf("Expected Less to return false, but was true")
}
}
type intToken int
func (i intToken) String() string {
return strconv.Itoa(int(i))
}
func (i intToken) Less(token token) bool {
return i < token.(intToken)
}
// Test of the token ring implementation based on example at the start of this
// page of documentation:
// http://www.datastax.com/docs/0.8/cluster_architecture/partitioning
func TestIntTokenRing(t *testing.T) {
host0 := &HostInfo{}
host25 := &HostInfo{}
host50 := &HostInfo{}
host75 := &HostInfo{}
ring := &tokenRing{
partitioner: nil,
// these tokens and hosts are out of order to test sorting
tokens: []token{
intToken(0),
intToken(50),
intToken(75),
intToken(25),
},
hosts: []*HostInfo{
host0,
host50,
host75,
host25,
},
}
sort.Sort(ring)
if ring.GetHostForToken(intToken(0)) != host0 {
t.Error("Expected host 0 for token 0")
}
if ring.GetHostForToken(intToken(1)) != host25 {
t.Error("Expected host 25 for token 1")
}
if ring.GetHostForToken(intToken(24)) != host25 {
t.Error("Expected host 25 for token 24")
}
if ring.GetHostForToken(intToken(25)) != host25 {
t.Error("Expected host 25 for token 25")
}
if ring.GetHostForToken(intToken(26)) != host50 {
t.Error("Expected host 50 for token 26")
}
if ring.GetHostForToken(intToken(49)) != host50 {
t.Error("Expected host 50 for token 49")
}
if ring.GetHostForToken(intToken(50)) != host50 {
t.Error("Expected host 50 for token 50")
}
if ring.GetHostForToken(intToken(51)) != host75 {
t.Error("Expected host 75 for token 51")
}
if ring.GetHostForToken(intToken(74)) != host75 {
t.Error("Expected host 75 for token 74")
}
if ring.GetHostForToken(intToken(75)) != host75 {
t.Error("Expected host 75 for token 75")
}
if ring.GetHostForToken(intToken(76)) != host0 {
t.Error("Expected host 0 for token 76")
}
if ring.GetHostForToken(intToken(99)) != host0 {
t.Error("Expected host 0 for token 99")
}
if ring.GetHostForToken(intToken(100)) != host0 {
t.Error("Expected host 0 for token 100")
}
}
// Test for the behavior of a nil pointer to tokenRing
func TestNilTokenRing(t *testing.T) {
var ring *tokenRing = nil
if ring.GetHostForToken(nil) != nil {
t.Error("Expected nil for nil token ring")
}
if ring.GetHostForPartitionKey(nil) != nil {
t.Error("Expected nil for nil token ring")
}
}
// Test of the recognition of the partitioner class
func TestUnknownTokenRing(t *testing.T) {
_, err := newTokenRing("UnknownPartitioner", nil)
if err == nil {
t.Error("Expected error for unknown partitioner value, but was nil")
}
}
// Test of the tokenRing with the Murmur3Partitioner
func TestMurmur3TokenRing(t *testing.T) {
// Note, strings are parsed directly to int64, they are not murmur3 hashed
var hosts []HostInfo = []HostInfo{
HostInfo{
Peer: "0",
Tokens: []string{"0"},
},
HostInfo{
Peer: "1",
Tokens: []string{"25"},
},
HostInfo{
Peer: "2",
Tokens: []string{"50"},
},
HostInfo{
Peer: "3",
Tokens: []string{"75"},
},
}
ring, err := newTokenRing("Murmur3Partitioner", hosts)
if err != nil {
t.Fatalf("Failed to create token ring due to error: %v", err)
}
p := murmur3Partitioner{}
var actual *HostInfo
actual = ring.GetHostForToken(p.ParseString("0"))
if actual.Peer != "0" {
t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("25"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("50"))
if actual.Peer != "2" {
t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("75"))
if actual.Peer != "3" {
t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("12"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("24324545443332"))
if actual.Peer != "0" {
t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer)
}
}
// Test of the tokenRing with the OrderedPartitioner
func TestOrderedTokenRing(t *testing.T) {
// Tokens here more or less are similar layout to the int tokens above due
// to each numeric character translating to a consistently offset byte.
var hosts []HostInfo = []HostInfo{
HostInfo{
Peer: "0",
Tokens: []string{
"00",
},
},
HostInfo{
Peer: "1",
Tokens: []string{
"25",
},
},
HostInfo{
Peer: "2",
Tokens: []string{
"50",
},
},
HostInfo{
Peer: "3",
Tokens: []string{
"75",
},
},
}
ring, err := newTokenRing("OrderedPartitioner", hosts)
if err != nil {
t.Fatalf("Failed to create token ring due to error: %v", err)
}
p := orderedPartitioner{}
var actual *HostInfo
actual = ring.GetHostForToken(p.ParseString("0"))
if actual.Peer != "0" {
t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("25"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("50"))
if actual.Peer != "2" {
t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("75"))
if actual.Peer != "3" {
t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("12"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("24324545443332"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"24324545443332\", but was %s", actual.Peer)
}
}
// Test of the tokenRing with the RandomPartitioner
func TestRandomTokenRing(t *testing.T) {
// String tokens are parsed into big.Int in base 10
var hosts []HostInfo = []HostInfo{
HostInfo{
Peer: "0",
Tokens: []string{
"00",
},
},
HostInfo{
Peer: "1",
Tokens: []string{
"25",
},
},
HostInfo{
Peer: "2",
Tokens: []string{
"50",
},
},
HostInfo{
Peer: "3",
Tokens: []string{
"75",
},
},
}
ring, err := newTokenRing("RandomPartitioner", hosts)
if err != nil {
t.Fatalf("Failed to create token ring due to error: %v", err)
}
p := randomPartitioner{}
var actual *HostInfo
actual = ring.GetHostForToken(p.ParseString("0"))
if actual.Peer != "0" {
t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("25"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("50"))
if actual.Peer != "2" {
t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("75"))
if actual.Peer != "3" {
t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("12"))
if actual.Peer != "1" {
t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
}
actual = ring.GetHostForToken(p.ParseString("24324545443332"))
if actual.Peer != "0" {
t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer)
}
}

View file

@ -0,0 +1,74 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"sync"
"sync/atomic"
)
type Node interface {
Pick(qry *Query) *Conn
Close()
}
type RoundRobin struct {
pool []Node
pos uint32
mu sync.RWMutex
}
func NewRoundRobin() *RoundRobin {
return &RoundRobin{}
}
func (r *RoundRobin) AddNode(node Node) {
r.mu.Lock()
r.pool = append(r.pool, node)
r.mu.Unlock()
}
func (r *RoundRobin) RemoveNode(node Node) {
r.mu.Lock()
n := len(r.pool)
for i := 0; i < n; i++ {
if r.pool[i] == node {
r.pool[i], r.pool[n-1] = r.pool[n-1], r.pool[i]
r.pool = r.pool[:n-1]
break
}
}
r.mu.Unlock()
}
func (r *RoundRobin) Size() int {
r.mu.RLock()
n := len(r.pool)
r.mu.RUnlock()
return n
}
func (r *RoundRobin) Pick(qry *Query) *Conn {
pos := atomic.AddUint32(&r.pos, 1)
var node Node
r.mu.RLock()
if len(r.pool) > 0 {
node = r.pool[pos%uint32(len(r.pool))]
}
r.mu.RUnlock()
if node == nil {
return nil
}
return node.Pick(qry)
}
func (r *RoundRobin) Close() {
r.mu.Lock()
for i := 0; i < len(r.pool); i++ {
r.pool[i].Close()
}
r.pool = nil
r.mu.Unlock()
}

View file

@ -0,0 +1,51 @@
// +build all unit
package gocql
import (
"testing"
)
// fakeNode is used as a simple structure to test the RoundRobin API
type fakeNode struct {
conn *Conn
closed bool
}
// Pick is needed to satisfy the Node interface
func (n *fakeNode) Pick(qry *Query) *Conn {
if n.conn == nil {
n.conn = &Conn{}
}
return n.conn
}
//Close is needed to satisfy the Node interface
func (n *fakeNode) Close() {
n.closed = true
}
//TestRoundRobinAPI tests the exported methods of the RoundRobin struct
//to make sure the API behaves accordingly.
func TestRoundRobinAPI(t *testing.T) {
node := &fakeNode{}
rr := NewRoundRobin()
rr.AddNode(node)
if rr.Size() != 1 {
t.Fatalf("expected size to be 1, got %v", rr.Size())
}
if c := rr.Pick(nil); c != node.conn {
t.Fatalf("expected conn %v, got %v", node.conn, c)
}
rr.Close()
if rr.pool != nil {
t.Fatalf("expected rr.pool to be nil, got %v", rr.pool)
}
if !node.closed {
t.Fatal("expected node.closed to be true, got false")
}
}

View file

@ -0,0 +1,51 @@
// +build all integration
package gocql
import "testing"
func TestTupleSimple(t *testing.T) {
if *flagProto < protoVersion3 {
t.Skip("tuple types are only available of proto>=3")
}
session := createSession(t)
defer session.Close()
err := createTable(session, `CREATE TABLE tuple_test(
id int,
coord frozen<tuple<int, int>>,
primary key(id))`)
if err != nil {
t.Fatal(err)
}
err = session.Query("INSERT INTO tuple_test(id, coord) VALUES(?, (?, ?))", 1, 100, -100).Exec()
if err != nil {
t.Fatal(err)
}
var (
id int
coord struct {
x int
y int
}
)
iter := session.Query("SELECT id, coord FROM tuple_test WHERE id=?", 1)
if err := iter.Scan(&id, &coord.x, &coord.y); err != nil {
t.Fatal(err)
}
if id != 1 {
t.Errorf("expected to get id=1 got: %v", id)
}
if coord.x != 100 {
t.Errorf("expected to get coord.x=100 got: %v", coord.x)
}
if coord.y != -100 {
t.Errorf("expected to get coord.y=-100 got: %v", coord.y)
}
}

View file

@ -0,0 +1,254 @@
// +build all integration
package gocql
import (
"fmt"
"strings"
"testing"
)
type position struct {
Lat int `cql:"lat"`
Lon int `cql:"lon"`
Padding string `json:"padding"`
}
// NOTE: due to current implementation details it is not currently possible to use
// a pointer receiver type for the UDTMarshaler interface to handle UDT's
func (p position) MarshalUDT(name string, info TypeInfo) ([]byte, error) {
switch name {
case "lat":
return Marshal(info, p.Lat)
case "lon":
return Marshal(info, p.Lon)
case "padding":
return Marshal(info, p.Padding)
default:
return nil, fmt.Errorf("unknown column for position: %q", name)
}
}
func (p *position) UnmarshalUDT(name string, info TypeInfo, data []byte) error {
switch name {
case "lat":
return Unmarshal(info, data, &p.Lat)
case "lon":
return Unmarshal(info, data, &p.Lon)
case "padding":
return Unmarshal(info, data, &p.Padding)
default:
return fmt.Errorf("unknown column for position: %q", name)
}
}
func TestUDT_Marshaler(t *testing.T) {
if *flagProto < protoVersion3 {
t.Skip("UDT are only available on protocol >= 3")
}
session := createSession(t)
defer session.Close()
err := createTable(session, `CREATE TYPE position(
lat int,
lon int,
padding text);`)
if err != nil {
t.Fatal(err)
}
err = createTable(session, `CREATE TABLE houses(
id int,
name text,
loc frozen<position>,
primary key(id)
);`)
if err != nil {
t.Fatal(err)
}
const (
expLat = -1
expLon = 2
)
pad := strings.Repeat("X", 1000)
err = session.Query("INSERT INTO houses(id, name, loc) VALUES(?, ?, ?)", 1, "test", &position{expLat, expLon, pad}).Exec()
if err != nil {
t.Fatal(err)
}
pos := &position{}
err = session.Query("SELECT loc FROM houses WHERE id = ?", 1).Scan(pos)
if err != nil {
t.Fatal(err)
}
if pos.Lat != expLat {
t.Errorf("expeceted lat to be be %d got %d", expLat, pos.Lat)
}
if pos.Lon != expLon {
t.Errorf("expeceted lon to be be %d got %d", expLon, pos.Lon)
}
if pos.Padding != pad {
t.Errorf("expected to get padding %q got %q\n", pad, pos.Padding)
}
}
func TestUDT_Reflect(t *testing.T) {
if *flagProto < protoVersion3 {
t.Skip("UDT are only available on protocol >= 3")
}
// Uses reflection instead of implementing the marshaling type
session := createSession(t)
defer session.Close()
err := createTable(session, `CREATE TYPE horse(
name text,
owner text);`)
if err != nil {
t.Fatal(err)
}
err = createTable(session, `CREATE TABLE horse_race(
position int,
horse frozen<horse>,
primary key(position)
);`)
if err != nil {
t.Fatal(err)
}
type horse struct {
Name string `cql:"name"`
Owner string `cql:"owner"`
}
insertedHorse := &horse{
Name: "pony",
Owner: "jim",
}
err = session.Query("INSERT INTO horse_race(position, horse) VALUES(?, ?)", 1, insertedHorse).Exec()
if err != nil {
t.Fatal(err)
}
retrievedHorse := &horse{}
err = session.Query("SELECT horse FROM horse_race WHERE position = ?", 1).Scan(retrievedHorse)
if err != nil {
t.Fatal(err)
}
if *retrievedHorse != *insertedHorse {
t.Fatal("exepcted to get %+v got %+v", insertedHorse, retrievedHorse)
}
}
func TestUDT_Proto2error(t *testing.T) {
if *flagProto < protoVersion3 {
t.Skip("UDT are only available on protocol >= 3")
}
cluster := createCluster()
cluster.ProtoVersion = 2
cluster.Keyspace = "gocql_test"
// Uses reflection instead of implementing the marshaling type
session, err := cluster.CreateSession()
if err != nil {
t.Fatal(err)
}
defer session.Close()
err = createTable(session, `CREATE TYPE fish(
name text,
owner text);`)
if err != nil {
t.Fatal(err)
}
err = createTable(session, `CREATE TABLE fish_race(
position int,
fish frozen<fish>,
primary key(position)
);`)
if err != nil {
t.Fatal(err)
}
type fish struct {
Name string `cql:"name"`
Owner string `cql:"owner"`
}
insertedFish := &fish{
Name: "pony",
Owner: "jim",
}
err = session.Query("INSERT INTO fish_race(position, fish) VALUES(?, ?)", 1, insertedFish).Exec()
if err != ErrorUDTUnavailable {
t.Fatalf("expected to get %v got %v", ErrorUDTUnavailable, err)
}
}
func TestUDT_NullObject(t *testing.T) {
if *flagProto < protoVersion3 {
t.Skip("UDT are only available on protocol >= 3")
}
session := createSession(t)
defer session.Close()
err := createTable(session, `CREATE TYPE udt_null_type(
name text,
owner text);`)
if err != nil {
t.Fatal(err)
}
err = createTable(session, `CREATE TABLE udt_null_table(
id uuid,
udt_col frozen<udt_null_type>,
primary key(id)
);`)
if err != nil {
t.Fatal(err)
}
type col struct {
Name string `cql:"name"`
Owner string `cql:"owner"`
}
id := TimeUUID()
err = session.Query("INSERT INTO udt_null_table(id) VALUES(?)", id).Exec()
if err != nil {
t.Fatal(err)
}
readCol := &col{
Name: "temp",
Owner: "temp",
}
err = session.Query("SELECT udt_col FROM udt_null_table WHERE id = ?", id).Scan(readCol)
if err != nil {
t.Fatal(err)
}
if readCol.Name != "" {
t.Errorf("expected empty string to be returned for null udt: got %q", readCol.Name)
}
if readCol.Owner != "" {
t.Errorf("expected empty string to be returned for null udt: got %q", readCol.Owner)
}
}

242
Godeps/_workspace/src/github.com/gocql/gocql/uuid.go generated vendored Normal file
View file

@ -0,0 +1,242 @@
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The uuid package can be used to generate and parse universally unique
// identifiers, a standardized format in the form of a 128 bit number.
//
// http://tools.ietf.org/html/rfc4122
package gocql
import (
"crypto/rand"
"errors"
"fmt"
"io"
"net"
"strings"
"sync/atomic"
"time"
)
type UUID [16]byte
var hardwareAddr []byte
var clockSeq uint32
const (
VariantNCSCompat = 0
VariantIETF = 2
VariantMicrosoft = 6
VariantFuture = 7
)
func init() {
if interfaces, err := net.Interfaces(); err == nil {
for _, i := range interfaces {
if i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 {
hardwareAddr = i.HardwareAddr
break
}
}
}
if hardwareAddr == nil {
// If we failed to obtain the MAC address of the current computer,
// we will use a randomly generated 6 byte sequence instead and set
// the multicast bit as recommended in RFC 4122.
hardwareAddr = make([]byte, 6)
_, err := io.ReadFull(rand.Reader, hardwareAddr)
if err != nil {
panic(err)
}
hardwareAddr[0] = hardwareAddr[0] | 0x01
}
// initialize the clock sequence with a random number
var clockSeqRand [2]byte
io.ReadFull(rand.Reader, clockSeqRand[:])
clockSeq = uint32(clockSeqRand[1])<<8 | uint32(clockSeqRand[0])
}
// ParseUUID parses a 32 digit hexadecimal number (that might contain hypens)
// representing an UUID.
func ParseUUID(input string) (UUID, error) {
var u UUID
j := 0
for _, r := range input {
switch {
case r == '-' && j&1 == 0:
continue
case r >= '0' && r <= '9' && j < 32:
u[j/2] |= byte(r-'0') << uint(4-j&1*4)
case r >= 'a' && r <= 'f' && j < 32:
u[j/2] |= byte(r-'a'+10) << uint(4-j&1*4)
case r >= 'A' && r <= 'F' && j < 32:
u[j/2] |= byte(r-'A'+10) << uint(4-j&1*4)
default:
return UUID{}, fmt.Errorf("invalid UUID %q", input)
}
j += 1
}
if j != 32 {
return UUID{}, fmt.Errorf("invalid UUID %q", input)
}
return u, nil
}
// UUIDFromBytes converts a raw byte slice to an UUID.
func UUIDFromBytes(input []byte) (UUID, error) {
var u UUID
if len(input) != 16 {
return u, errors.New("UUIDs must be exactly 16 bytes long")
}
copy(u[:], input)
return u, nil
}
// RandomUUID generates a totally random UUID (version 4) as described in
// RFC 4122.
func RandomUUID() (UUID, error) {
var u UUID
_, err := io.ReadFull(rand.Reader, u[:])
if err != nil {
return u, err
}
u[6] &= 0x0F // clear version
u[6] |= 0x40 // set version to 4 (random uuid)
u[8] &= 0x3F // clear variant
u[8] |= 0x80 // set to IETF variant
return u, nil
}
var timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix()
// TimeUUID generates a new time based UUID (version 1) using the current
// time as the timestamp.
func TimeUUID() UUID {
return UUIDFromTime(time.Now())
}
// UUIDFromTime generates a new time based UUID (version 1) as described in
// RFC 4122. This UUID contains the MAC address of the node that generated
// the UUID, the given timestamp and a sequence number.
func UUIDFromTime(aTime time.Time) UUID {
var u UUID
utcTime := aTime.In(time.UTC)
t := uint64(utcTime.Unix()-timeBase)*10000000 + uint64(utcTime.Nanosecond()/100)
u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t)
u[4], u[5] = byte(t>>40), byte(t>>32)
u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48)
clock := atomic.AddUint32(&clockSeq, 1)
u[8] = byte(clock >> 8)
u[9] = byte(clock)
copy(u[10:], hardwareAddr)
u[6] |= 0x10 // set version to 1 (time based uuid)
u[8] &= 0x3F // clear variant
u[8] |= 0x80 // set to IETF variant
return u
}
// String returns the UUID in it's canonical form, a 32 digit hexadecimal
// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
func (u UUID) String() string {
var offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34}
const hexString = "0123456789abcdef"
r := make([]byte, 36)
for i, b := range u {
r[offsets[i]] = hexString[b>>4]
r[offsets[i]+1] = hexString[b&0xF]
}
r[8] = '-'
r[13] = '-'
r[18] = '-'
r[23] = '-'
return string(r)
}
// Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits
// (16 bytes) long.
func (u UUID) Bytes() []byte {
return u[:]
}
// Variant returns the variant of this UUID. This package will only generate
// UUIDs in the IETF variant.
func (u UUID) Variant() int {
x := u[8]
if x&0x80 == 0 {
return VariantNCSCompat
}
if x&0x40 == 0 {
return VariantIETF
}
if x&0x20 == 0 {
return VariantMicrosoft
}
return VariantFuture
}
// Version extracts the version of this UUID variant. The RFC 4122 describes
// five kinds of UUIDs.
func (u UUID) Version() int {
return int(u[6] & 0xF0 >> 4)
}
// Node extracts the MAC address of the node who generated this UUID. It will
// return nil if the UUID is not a time based UUID (version 1).
func (u UUID) Node() []byte {
if u.Version() != 1 {
return nil
}
return u[10:]
}
// Timestamp extracts the timestamp information from a time based UUID
// (version 1).
func (u UUID) Timestamp() int64 {
if u.Version() != 1 {
return 0
}
return int64(uint64(u[0])<<24|uint64(u[1])<<16|
uint64(u[2])<<8|uint64(u[3])) +
int64(uint64(u[4])<<40|uint64(u[5])<<32) +
int64(uint64(u[6]&0x0F)<<56|uint64(u[7])<<48)
}
// Time is like Timestamp, except that it returns a time.Time.
func (u UUID) Time() time.Time {
if u.Version() != 1 {
return time.Time{}
}
t := u.Timestamp()
sec := t / 1e7
nsec := (t % 1e7) * 100
return time.Unix(sec+timeBase, nsec).UTC()
}
// Marshaling for JSON
func (u UUID) MarshalJSON() ([]byte, error) {
return []byte(`"` + u.String() + `"`), nil
}
// Unmarshaling for JSON
func (u *UUID) UnmarshalJSON(data []byte) error {
str := strings.Trim(string(data), `"`)
if len(str) > 36 {
return fmt.Errorf("invalid JSON UUID %s", str)
}
parsed, err := ParseUUID(str)
if err == nil {
copy(u[:], parsed[:])
}
return err
}

View file

@ -0,0 +1,197 @@
// +build all unit
package gocql
import (
"bytes"
"strings"
"testing"
"time"
)
func TestUUIDNil(t *testing.T) {
var uuid UUID
want, got := "00000000-0000-0000-0000-000000000000", uuid.String()
if want != got {
t.Fatalf("TestNil: expected %q got %q", want, got)
}
}
var testsUUID = []struct {
input string
variant int
version int
}{
{"b4f00409-cef8-4822-802c-deb20704c365", VariantIETF, 4},
{"B4F00409-CEF8-4822-802C-DEB20704C365", VariantIETF, 4}, //Use capital letters
{"f81d4fae-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1},
{"00000000-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1},
{"3051a8d7-aea7-1801-e0bf-bc539dd60cf3", VariantFuture, 1},
{"3051a8d7-aea7-2801-e0bf-bc539dd60cf3", VariantFuture, 2},
{"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 3},
{"3051a8d7-aea7-4801-e0bf-bc539dd60cf3", VariantFuture, 4},
{"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 5},
{"d0e817e1-e4b1-1801-3fe6-b4b60ccecf9d", VariantNCSCompat, 0},
{"d0e817e1-e4b1-1801-bfe6-b4b60ccecf9d", VariantIETF, 1},
{"d0e817e1-e4b1-1801-dfe6-b4b60ccecf9d", VariantMicrosoft, 0},
{"d0e817e1-e4b1-1801-ffe6-b4b60ccecf9d", VariantFuture, 0},
}
func TestPredefinedUUID(t *testing.T) {
for i := range testsUUID {
uuid, err := ParseUUID(testsUUID[i].input)
if err != nil {
t.Errorf("ParseUUID #%d: %v", i, err)
continue
}
if str := uuid.String(); str != strings.ToLower(testsUUID[i].input) {
t.Errorf("String #%d: expected %q got %q", i, testsUUID[i].input, str)
continue
}
if variant := uuid.Variant(); variant != testsUUID[i].variant {
t.Errorf("Variant #%d: expected %d got %d", i, testsUUID[i].variant, variant)
}
if testsUUID[i].variant == VariantIETF {
if version := uuid.Version(); version != testsUUID[i].version {
t.Errorf("Version #%d: expected %d got %d", i, testsUUID[i].version, version)
}
}
json, err := uuid.MarshalJSON()
if err != nil {
t.Errorf("MarshalJSON #%d: %v", i, err)
}
expectedJson := `"` + strings.ToLower(testsUUID[i].input) + `"`
if string(json) != expectedJson {
t.Errorf("MarshalJSON #%d: expected %v got %v", i, expectedJson, string(json))
}
var unmarshaled UUID
err = unmarshaled.UnmarshalJSON(json)
if err != nil {
t.Errorf("UnmarshalJSON #%d: %v", i, err)
}
if unmarshaled != uuid {
t.Errorf("UnmarshalJSON #%d: expected %v got %v", i, uuid, unmarshaled)
}
}
}
func TestInvalidUUIDCharacter(t *testing.T) {
_, err := ParseUUID("z4f00409-cef8-4822-802c-deb20704c365")
if err == nil || !strings.Contains(err.Error(), "invalid UUID") {
t.Fatalf("expected invalid UUID error, got '%v' ", err)
}
}
func TestInvalidUUIDLength(t *testing.T) {
_, err := ParseUUID("4f00")
if err == nil || !strings.Contains(err.Error(), "invalid UUID") {
t.Fatalf("expected invalid UUID error, got '%v' ", err)
}
_, err = UUIDFromBytes(TimeUUID().Bytes()[:15])
if err == nil || err.Error() != "UUIDs must be exactly 16 bytes long" {
t.Fatalf("expected error '%v', got '%v'", "UUIDs must be exactly 16 bytes long", err)
}
}
func TestRandomUUID(t *testing.T) {
for i := 0; i < 20; i++ {
uuid, err := RandomUUID()
if err != nil {
t.Errorf("RandomUUID: %v", err)
}
if variant := uuid.Variant(); variant != VariantIETF {
t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant)
}
if version := uuid.Version(); version != 4 {
t.Errorf("wrong version. expected %d got %d", 4, version)
}
}
}
func TestRandomUUIDInvalidAPICalls(t *testing.T) {
uuid, err := RandomUUID()
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if node := uuid.Node(); node != nil {
t.Fatalf("expected nil, got %v", node)
}
if stamp := uuid.Timestamp(); stamp != 0 {
t.Fatalf("expceted 0, got %v", stamp)
}
zeroT := time.Time{}
if to := uuid.Time(); to != zeroT {
t.Fatalf("expected %v, got %v", zeroT, to)
}
}
func TestUUIDFromTime(t *testing.T) {
date := time.Date(1982, 5, 5, 12, 34, 56, 400, time.UTC)
uuid := UUIDFromTime(date)
if uuid.Time() != date {
t.Errorf("embedded time incorrect. Expected %v got %v", date, uuid.Time())
}
}
func TestParseUUID(t *testing.T) {
uuid, _ := ParseUUID("486f3a88-775b-11e3-ae07-d231feb1dc81")
if uuid.Time() != time.Date(2014, 1, 7, 5, 19, 29, 222516000, time.UTC) {
t.Errorf("Expected date of 1/7/2014 at 5:19:29.222516, got %v", uuid.Time())
}
}
func TestTimeUUID(t *testing.T) {
var node []byte
timestamp := int64(0)
for i := 0; i < 20; i++ {
uuid := TimeUUID()
if variant := uuid.Variant(); variant != VariantIETF {
t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant)
}
if version := uuid.Version(); version != 1 {
t.Errorf("wrong version. expected %d got %d", 1, version)
}
if n := uuid.Node(); !bytes.Equal(n, node) && i > 0 {
t.Errorf("wrong node. expected %x, got %x", node, n)
} else if i == 0 {
node = n
}
ts := uuid.Timestamp()
if ts < timestamp {
t.Errorf("timestamps must grow")
}
timestamp = ts
}
}
func TestUnmarshalJSON(t *testing.T) {
var withHyphens, withoutHypens, tooLong UUID
withHyphens.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81"`))
if withHyphens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) {
t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withHyphens.Time())
}
withoutHypens.UnmarshalJSON([]byte(`"486f3a88775b11e3ae07d231feb1dc81"`))
if withoutHypens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) {
t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withoutHypens.Time())
}
err := tooLong.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81486f3a88"`))
if err == nil {
t.Errorf("no error for invalid JSON UUID")
}
}

View file

@ -0,0 +1,384 @@
.btn-default,
.btn-primary,
.btn-success,
.btn-info,
.btn-warning,
.btn-danger {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
}
.btn-default:active,
.btn-primary:active,
.btn-success:active,
.btn-info:active,
.btn-warning:active,
.btn-danger:active,
.btn-default.active,
.btn-primary.active,
.btn-success.active,
.btn-info.active,
.btn-warning.active,
.btn-danger.active {
-webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
}
.btn:active,
.btn.active {
background-image: none;
}
.btn-default {
text-shadow: 0 1px 0 #fff;
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#e6e6e6));
background-image: -webkit-linear-gradient(top, #ffffff, 0%, #e6e6e6, 100%);
background-image: -moz-linear-gradient(top, #ffffff 0%, #e6e6e6 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #e6e6e6 100%);
background-repeat: repeat-x;
border-color: #e0e0e0;
border-color: #ccc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0);
}
.btn-default:active,
.btn-default.active {
background-color: #e6e6e6;
border-color: #e0e0e0;
}
.btn-primary {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9));
background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%);
background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
background-repeat: repeat-x;
border-color: #2d6ca2;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
}
.btn-primary:active,
.btn-primary.active {
background-color: #3071a9;
border-color: #2d6ca2;
}
.btn-success {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44));
background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%);
background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
background-repeat: repeat-x;
border-color: #419641;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
}
.btn-success:active,
.btn-success.active {
background-color: #449d44;
border-color: #419641;
}
.btn-warning {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f));
background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%);
background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
background-repeat: repeat-x;
border-color: #eb9316;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
}
.btn-warning:active,
.btn-warning.active {
background-color: #ec971f;
border-color: #eb9316;
}
.btn-danger {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c));
background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%);
background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
background-repeat: repeat-x;
border-color: #c12e2a;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
}
.btn-danger:active,
.btn-danger.active {
background-color: #c9302c;
border-color: #c12e2a;
}
.btn-info {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5));
background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%);
background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
background-repeat: repeat-x;
border-color: #2aabd2;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
}
.btn-info:active,
.btn-info.active {
background-color: #31b0d5;
border-color: #2aabd2;
}
.thumbnail,
.img-thumbnail {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.dropdown-menu > li > a:hover,
.dropdown-menu > li > a:focus,
.dropdown-menu > .active > a,
.dropdown-menu > .active > a:hover,
.dropdown-menu > .active > a:focus {
background-color: #357ebd;
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd));
background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%);
background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.navbar {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#f8f8f8));
background-image: -webkit-linear-gradient(top, #ffffff, 0%, #f8f8f8, 100%);
background-image: -moz-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);
background-repeat: repeat-x;
border-radius: 4px;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
}
.navbar .navbar-nav > .active > a {
background-color: #f8f8f8;
}
.navbar-brand,
.navbar-nav > li > a {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);
}
.navbar-inverse {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#3c3c3c), to(#222222));
background-image: -webkit-linear-gradient(top, #3c3c3c, 0%, #222222, 100%);
background-image: -moz-linear-gradient(top, #3c3c3c 0%, #222222 100%);
background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
}
.navbar-inverse .navbar-nav > .active > a {
background-color: #222222;
}
.navbar-inverse .navbar-brand,
.navbar-inverse .navbar-nav > li > a {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
}
.navbar-static-top,
.navbar-fixed-top,
.navbar-fixed-bottom {
border-radius: 0;
}
.alert {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
}
.alert-success {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#c8e5bc));
background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #c8e5bc, 100%);
background-image: -moz-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
background-repeat: repeat-x;
border-color: #b2dba1;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
}
.alert-info {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#b9def0));
background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #b9def0, 100%);
background-image: -moz-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
background-repeat: repeat-x;
border-color: #9acfea;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
}
.alert-warning {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#f8efc0));
background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #f8efc0, 100%);
background-image: -moz-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
background-repeat: repeat-x;
border-color: #f5e79e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
}
.alert-danger {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#e7c3c3));
background-image: -webkit-linear-gradient(top, #f2dede, 0%, #e7c3c3, 100%);
background-image: -moz-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
background-repeat: repeat-x;
border-color: #dca7a7;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
}
.progress {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ebebeb), to(#f5f5f5));
background-image: -webkit-linear-gradient(top, #ebebeb, 0%, #f5f5f5, 100%);
background-image: -moz-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
}
.progress-bar {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9));
background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%);
background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
}
.progress-bar-success {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44));
background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%);
background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
}
.progress-bar-info {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5));
background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%);
background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
}
.progress-bar-warning {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f));
background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%);
background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
}
.progress-bar-danger {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c));
background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%);
background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
}
.list-group {
border-radius: 4px;
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.list-group-item.active,
.list-group-item.active:hover,
.list-group-item.active:focus {
text-shadow: 0 -1px 0 #3071a9;
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3278b3));
background-image: -webkit-linear-gradient(top, #428bca, 0%, #3278b3, 100%);
background-image: -moz-linear-gradient(top, #428bca 0%, #3278b3 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
background-repeat: repeat-x;
border-color: #3278b3;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
}
.panel {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.panel-default > .panel-heading {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f5f5f5), to(#e8e8e8));
background-image: -webkit-linear-gradient(top, #f5f5f5, 0%, #e8e8e8, 100%);
background-image: -moz-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.panel-primary > .panel-heading {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd));
background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%);
background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.panel-success > .panel-heading {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#d0e9c6));
background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #d0e9c6, 100%);
background-image: -moz-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
}
.panel-info > .panel-heading {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#c4e3f3));
background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #c4e3f3, 100%);
background-image: -moz-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
}
.panel-warning > .panel-heading {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#faf2cc));
background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #faf2cc, 100%);
background-image: -moz-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
}
.panel-danger > .panel-heading {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#ebcccc));
background-image: -webkit-linear-gradient(top, #f2dede, 0%, #ebcccc, 100%);
background-image: -moz-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
}
.well {
background-image: -webkit-gradient(linear, left 0%, left 100%, from(#e8e8e8), to(#f5f5f5));
background-image: -webkit-linear-gradient(top, #e8e8e8, 0%, #f5f5f5, 100%);
background-image: -moz-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
background-repeat: repeat-x;
border-color: #dcdcdc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
-webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

BIN
Godeps/_workspace/src/github.com/gocql/gocql/website/favicon.ico (Stored with Git LFS) generated vendored Normal file

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,228 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
<svg xmlns="http://www.w3.org/2000/svg">
<metadata></metadata>
<defs>
<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
<font-face units-per-em="1200" ascent="960" descent="-240" />
<missing-glyph horiz-adv-x="500" />
<glyph />
<glyph />
<glyph unicode=" " />
<glyph unicode="*" d="M1100 500h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200z" />
<glyph unicode="+" d="M1100 400h-400v-400h-300v400h-400v300h400v400h300v-400h400v-300z" />
<glyph unicode="&#xa0;" />
<glyph unicode="&#x2000;" horiz-adv-x="652" />
<glyph unicode="&#x2001;" horiz-adv-x="1304" />
<glyph unicode="&#x2002;" horiz-adv-x="652" />
<glyph unicode="&#x2003;" horiz-adv-x="1304" />
<glyph unicode="&#x2004;" horiz-adv-x="434" />
<glyph unicode="&#x2005;" horiz-adv-x="326" />
<glyph unicode="&#x2006;" horiz-adv-x="217" />
<glyph unicode="&#x2007;" horiz-adv-x="217" />
<glyph unicode="&#x2008;" horiz-adv-x="163" />
<glyph unicode="&#x2009;" horiz-adv-x="260" />
<glyph unicode="&#x200a;" horiz-adv-x="72" />
<glyph unicode="&#x202f;" horiz-adv-x="260" />
<glyph unicode="&#x205f;" horiz-adv-x="326" />
<glyph unicode="&#x20ac;" d="M800 500h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257 q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406z" />
<glyph unicode="&#x2212;" d="M1100 700h-900v-300h900v300z" />
<glyph unicode="&#x2601;" d="M178 300h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57z" />
<glyph unicode="&#x2709;" d="M1200 1100h-1200l600 -603zM300 600l-300 -300v600zM1200 900v-600l-300 300zM800 500l400 -400h-1200l400 400l200 -200z" />
<glyph unicode="&#x270f;" d="M1101 889l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13l-94 -97zM401 189l614 614l-214 214l-614 -614zM-13 -13l333 112l-223 223z" />
<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
<glyph unicode="&#xe001;" d="M700 100h300v-100h-800v100h300v550l-500 550h1200l-500 -550v-550z" />
<glyph unicode="&#xe002;" d="M1000 934v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7q-79 -25 -122.5 -82t-25.5 -112t86 -75.5t147 5.5 q65 21 109 69t44 90v606z" />
<glyph unicode="&#xe003;" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
<glyph unicode="&#xe005;" d="M649 949q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5t-94 124.5t-33.5 117.5q0 64 28 123t73 100.5t104.5 64t119 20.5 t120 -38.5t104.5 -104.5z" />
<glyph unicode="&#xe006;" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM168 71l2 1z" />
<glyph unicode="&#xe007;" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM747 331l-74 229l193 140h-235l-77 211l-78 -211h-239l196 -142l-73 -226l192 140zM168 71l2 1z" />
<glyph unicode="&#xe008;" d="M1200 143v-143h-1200v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100z" />
<glyph unicode="&#xe009;" d="M1200 1100v-1100h-1200v1100h1200zM200 1000h-100v-100h100v100zM900 1000h-600v-400h600v400zM1100 1000h-100v-100h100v100zM200 800h-100v-100h100v100zM1100 800h-100v-100h100v100zM200 600h-100v-100h100v100zM1100 600h-100v-100h100v100zM900 500h-600v-400h600 v400zM200 400h-100v-100h100v100zM1100 400h-100v-100h100v100zM200 200h-100v-100h100v100zM1100 200h-100v-100h100v100z" />
<glyph unicode="&#xe010;" d="M500 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400 q21 0 35.5 -14.5t14.5 -35.5zM500 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5z" />
<glyph unicode="&#xe011;" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 250v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5 t14.5 -35.5z" />
<glyph unicode="&#xe012;" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700 q21 0 35.5 -14.5t14.5 -35.5zM300 450v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-200q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM1200 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5z" />
<glyph unicode="&#xe013;" d="M448 34l818 820l-212 212l-607 -607l-206 207l-212 -212z" />
<glyph unicode="&#xe014;" d="M882 106l-282 282l-282 -282l-212 212l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282z" />
<glyph unicode="&#xe015;" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM507 363q137 0 233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5t-234 -97t-97 -233 t97 -233t234 -97zM600 800h100v-200h-100v-100h-200v100h-100v200h100v100h200v-100z" />
<glyph unicode="&#xe016;" d="M913 432l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -141 -78 -262zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 801v-200h400v200h-400z" />
<glyph unicode="&#xe017;" d="M700 750v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5zM800 975v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123 t-123 184t-45.5 224.5q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155z" />
<glyph unicode="&#xe018;" d="M1200 1h-200v1200h200v-1200zM900 1h-200v800h200v-800zM600 1h-200v500h200v-500zM300 301h-200v-300h200v300z" />
<glyph unicode="&#xe019;" d="M488 183l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5 q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39zM600 815q89 0 152 -63 t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152q0 88 63 151t152 63z" />
<glyph unicode="&#xe020;" d="M900 1100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100zM800 1100v100h-300v-100h300zM200 900h900v-800q0 -41 -29.5 -71 t-70.5 -30h-700q-41 0 -70.5 30t-29.5 71v800zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
<glyph unicode="&#xe021;" d="M1301 601h-200v-600h-300v400h-300v-400h-300v600h-200l656 644z" />
<glyph unicode="&#xe022;" d="M600 700h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18v1150q0 11 7 18t18 7h475v-500zM1000 800h-300v300z" />
<glyph unicode="&#xe023;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 600h200 v-100h-300v400h100v-300z" />
<glyph unicode="&#xe024;" d="M721 400h-242l-40 -400h-539l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538zM712 500l-27 300h-170l-27 -300h224z" />
<glyph unicode="&#xe025;" d="M1100 400v-400h-1100v400h490l-290 300h200v500h300v-500h200l-290 -300h490zM988 300h-175v-100h175v100z" />
<glyph unicode="&#xe026;" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 1012q-170 0 -291 -121t-121 -291t121 -291t291 -121t291 121 t121 291t-121 291t-291 121zM700 600h150l-250 -300l-250 300h150v300h200v-300z" />
<glyph unicode="&#xe027;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM850 600h-150 v-300h-200v300h-150l250 300z" />
<glyph unicode="&#xe028;" d="M0 500l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18v475zM903 1000h-606l-97 -500h200l50 -200h300l50 200h200z" />
<glyph unicode="&#xe029;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM797 598 l-297 -201v401z" />
<glyph unicode="&#xe030;" d="M1177 600h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123t-123 -184t-45.5 -224.5t45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123 t123 184t45.5 224.5z" />
<glyph unicode="&#xe031;" d="M700 800l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400zM500 400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122l-145 -145v400h400z" />
<glyph unicode="&#xe032;" d="M100 1200v-1200h1100v1200h-1100zM1100 100h-900v900h900v-900zM400 800h-100v100h100v-100zM1000 800h-500v100h500v-100zM400 600h-100v100h100v-100zM1000 600h-500v100h500v-100zM400 400h-100v100h100v-100zM1000 400h-500v100h500v-100zM400 200h-100v100h100v-100 zM1000 300h-500v-100h500v100z" />
<glyph unicode="&#xe034;" d="M200 0h-100v1100h100v-1100zM1100 600v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5z" />
<glyph unicode="&#xe035;" d="M1200 275v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5t-49.5 -227v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50 q11 0 18 7t7 18zM400 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14zM1000 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14z" />
<glyph unicode="&#xe036;" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM971 600l141 -141l-71 -71l-141 141l-141 -141l-71 71l141 141l-141 141l71 71l141 -141l141 141l71 -71z" />
<glyph unicode="&#xe037;" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
<glyph unicode="&#xe038;" d="M974 186l6 8q142 178 142 405q0 230 -144 408l-6 8l-83 -64l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8zM300 801l300 200v-800l-300 200h-300v400h300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257z" />
<glyph unicode="&#xe039;" d="M100 700h400v100h100v100h-100v300h-500v-600h100v100zM1200 700v500h-600v-200h100v-300h200v-300h300v200h-200v100h200zM100 1100h300v-300h-300v300zM800 800v300h300v-300h-300zM200 900h100v100h-100v-100zM900 1000h100v-100h-100v100zM300 600h-100v-100h-200 v-500h500v500h-200v100zM900 200v-100h-200v100h-100v100h100v200h-200v100h300v-300h200v-100h-100zM400 400v-300h-300v300h300zM300 200h-100v100h100v-100zM1100 300h100v-100h-100v100zM600 100h100v-100h-100v100zM1200 100v-100h-300v100h300z" />
<glyph unicode="&#xe040;" d="M100 1200h-100v-1000h100v1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 1200v-1000h-200v1000h200zM400 100v-100h-300v100h300zM500 91h100v-91h-100v91zM700 91h100v-91h-100v91zM1100 91v-91h-200v91h200z " />
<glyph unicode="&#xe041;" d="M1200 500l-500 -500l-699 700v475q0 10 7.5 17.5t17.5 7.5h474zM320 882q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71t29 -71q30 -30 71.5 -30t71.5 30z" />
<glyph unicode="&#xe042;" d="M1201 500l-500 -500l-699 700v475q0 11 7 18t18 7h474zM1501 500l-500 -500l-50 50l450 450l-700 700h100zM320 882q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71t30 -71q29 -30 71 -30t71 30z" />
<glyph unicode="&#xe043;" d="M1200 1200v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900v1025l175 175h925z" />
<glyph unicode="&#xe045;" d="M947 829l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18l-94 -346l40 -124h592zM1200 800v-700h-200v200h-800v-200h-200v700h200l100 -200h600l100 200h200zM881 176l38 -152q2 -10 -3.5 -17t-15.5 -7h-600q-10 0 -15.5 7t-3.5 17l38 152q2 10 11.5 17t19.5 7 h500q10 0 19.5 -7t11.5 -17z" />
<glyph unicode="&#xe047;" d="M1200 0v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417zM416 521l178 457l46 -140l116 -317 h-340z" />
<glyph unicode="&#xe048;" d="M100 1199h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111t-162 -38.5h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21 t-29 14t-49 14.5v70zM400 1079v-379h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400z" />
<glyph unicode="&#xe049;" d="M877 1200l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425z" />
<glyph unicode="&#xe050;" d="M1150 1200h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49v300h150h700zM100 1000v-800h75l-125 -167l-125 167h75v800h-75l125 167 l125 -167h-75z" />
<glyph unicode="&#xe051;" d="M950 1201h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50v300h150h700zM200 101h800v75l167 -125l-167 -125v75h-800v-75l-167 125l167 125 v-75z" />
<glyph unicode="&#xe052;" d="M700 950v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35zM1100 650v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1000 q21 0 35.5 15t14.5 35zM900 350v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
<glyph unicode="&#xe053;" d="M1000 950v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 650v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1100 q21 0 35.5 15t14.5 35zM1000 350v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="&#xe055;" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="&#xe056;" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="&#xe057;" d="M400 1100h-100v-1100h100v1100zM700 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM1100 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM100 425v75h-201v100h201v75l166 -125zM900 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM1200 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
<glyph unicode="&#xe058;" d="M201 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM801 1100h100v-1100h-100v1100zM601 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM1101 425v75h200v100h-200v75l-167 -125zM401 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM701 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
<glyph unicode="&#xe059;" d="M900 925v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53zM1200 300l-300 300l300 300v-600z" />
<glyph unicode="&#xe060;" d="M1200 1056v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31zM1100 1000h-1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500zM476 750q0 -56 -39 -95t-95 -39t-95 39t-39 95t39 95t95 39t95 -39 t39 -95z" />
<glyph unicode="&#xe062;" d="M600 1213q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262q0 124 60.5 231.5t165 172t226.5 64.5zM599 514q107 0 182.5 75.5t75.5 182.5t-75.5 182 t-182.5 75t-182 -75.5t-75 -181.5q0 -107 75.5 -182.5t181.5 -75.5z" />
<glyph unicode="&#xe063;" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 173v854q-176 0 -301.5 -125t-125.5 -302t125.5 -302t301.5 -125z " />
<glyph unicode="&#xe064;" d="M554 1295q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5zM455 296q-7 6 -18 17 t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156q14 -82 59.5 -136t136.5 -80z" />
<glyph unicode="&#xe065;" d="M1108 902l113 113l-21 85l-92 28l-113 -113zM1100 625v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125zM436 341l161 50l412 412l-114 113l-405 -405z" />
<glyph unicode="&#xe066;" d="M1100 453v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5z M813 431l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209z" />
<glyph unicode="&#xe067;" d="M1100 569v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69z M625 348l566 567l-136 137l-430 -431l-147 147l-136 -136z" />
<glyph unicode="&#xe068;" d="M900 303v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198l-300 300l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296z" />
<glyph unicode="&#xe069;" d="M900 0l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100z" />
<glyph unicode="&#xe070;" d="M1200 0l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100z" />
<glyph unicode="&#xe071;" d="M1200 0l-500 488v-488l-564 550l564 550v-487l500 487v-1100z" />
<glyph unicode="&#xe072;" d="M1100 550l-900 550v-1100z" />
<glyph unicode="&#xe073;" d="M500 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM900 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200 q21 0 35.5 14.5t14.5 35.5z" />
<glyph unicode="&#xe074;" d="M1100 150v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35z" />
<glyph unicode="&#xe075;" d="M500 0v488l-500 -488v1100l500 -487v487l564 -550z" />
<glyph unicode="&#xe076;" d="M1050 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488l-500 -488v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
<glyph unicode="&#xe078;" d="M650 1064l-550 -564h1100zM1200 350v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
<glyph unicode="&#xe079;" d="M777 7l240 240l-353 353l353 353l-240 240l-592 -594z" />
<glyph unicode="&#xe080;" d="M513 -46l-241 240l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1z" />
<glyph unicode="&#xe081;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-200h-200v-200h200v-200h200v200h200v200h-200v200h-200z" />
<glyph unicode="&#xe082;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM300 700v-200h600v200h-600z" />
<glyph unicode="&#xe083;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM247 741l141 -141l-142 -141l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141 l-141 142z" />
<glyph unicode="&#xe084;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM546 623l-102 102l-174 -174l276 -277l411 411l-175 174z" />
<glyph unicode="&#xe085;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 500h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3 q-105 0 -172 -56t-67 -183h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5zM500 400v-100h200v100h-200z" />
<glyph unicode="&#xe086;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-100h200v100h-200zM400 700v-100h100v-200h-100v-100h400v100h-100v300h-300z" />
<glyph unicode="&#xe087;" d="M1200 700v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203zM700 500v-206q149 48 201 206h-201v200h200 q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210q24 -73 79.5 -127.5t130.5 -78.5v206h200z" />
<glyph unicode="&#xe088;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM844 735 l-135 -135l135 -135l-109 -109l-135 135l-135 -135l-109 109l135 135l-135 135l109 109l135 -135l135 135z" />
<glyph unicode="&#xe089;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM896 654 l-346 -345l-228 228l141 141l87 -87l204 205z" />
<glyph unicode="&#xe090;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM248 385l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5q0 -115 62 -215zM955 809l-564 -564q97 -59 209 -59q171 0 292.5 121.5 t121.5 292.5q0 112 -59 209z" />
<glyph unicode="&#xe091;" d="M1200 400h-600v-301l-600 448l600 453v-300h600v-300z" />
<glyph unicode="&#xe092;" d="M600 400h-600v300h600v300l600 -453l-600 -448v301z" />
<glyph unicode="&#xe093;" d="M1098 600h-298v-600h-300v600h-296l450 600z" />
<glyph unicode="&#xe094;" d="M998 600l-449 -600l-445 600h296v600h300v-600h298z" />
<glyph unicode="&#xe095;" d="M600 199v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453z" />
<glyph unicode="&#xe096;" d="M1200 1200h-400l129 -129l-294 -294l142 -142l294 294l129 -129v400zM565 423l-294 -294l129 -129h-400v400l129 -129l294 294z" />
<glyph unicode="&#xe097;" d="M871 730l129 -130h-400v400l129 -129l295 295l142 -141zM200 600h400v-400l-129 130l-295 -295l-142 141l295 295z" />
<glyph unicode="&#xe101;" d="M600 1177q118 0 224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5t45.5 224.5t123 184t184 123t224.5 45.5zM686 549l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5 l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5zM700 400h-200v-100h200v100z" />
<glyph unicode="&#xe102;" d="M1200 900h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100h100v-200h400v300h200v-300h400v200h100v100z M731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269zM481 900h-281q-3 0 14 48t35 96l18 47zM100 0h400v400h-400v-400zM700 400h400v-400h-400v400z" />
<glyph unicode="&#xe103;" d="M0 121l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55l-201 -202 v143zM692 611q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5t86.5 76.5q55 66 367 234z" />
<glyph unicode="&#xe105;" d="M1261 600l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5 t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30zM600 240q64 0 123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212 q0 85 46 158q-102 -87 -226 -258q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5zM484 762l-107 -106q49 -124 154 -191l105 105q-37 24 -75 72t-57 84z" />
<glyph unicode="&#xe106;" d="M906 1200l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148zM1261 600l-26 -40q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5 t-124 -100t-146.5 -79l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52zM513 264l37 141q-107 18 -178.5 101.5t-71.5 193.5q0 85 46 158q-102 -87 -226 -258q210 -282 393 -336z M484 762l-107 -106q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68z" />
<glyph unicode="&#xe107;" d="M-47 0h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66t50.5 -34zM700 200v100h-200v-100h-345l445 723l445 -723h-345zM700 700h-200v-100l100 -300l100 300v100z" />
<glyph unicode="&#xe108;" d="M800 711l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41q0 20 11 44.5t26 38.5 l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339z" />
<glyph unicode="&#xe110;" d="M941 800l-600 -600h-341v200h259l600 600h241v198l300 -295l-300 -300v197h-159zM381 678l141 142l-181 180h-341v-200h259zM1100 598l300 -295l-300 -300v197h-241l-181 181l141 142l122 -123h159v198z" />
<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
<glyph unicode="&#xe112;" d="M400 900h-300v300h300v-300zM1100 900h-300v300h300v-300zM1100 800v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5t-58 109.5t-31.5 116t-15 104t-3 83v200h300v-250q0 -113 6 -145 q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300z" />
<glyph unicode="&#xe113;" d="M902 184l226 227l-578 579l-580 -579l227 -227l352 353z" />
<glyph unicode="&#xe114;" d="M650 218l578 579l-226 227l-353 -353l-352 353l-227 -227z" />
<glyph unicode="&#xe115;" d="M1198 400v600h-796l215 -200h381v-400h-198l299 -283l299 283h-200zM-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196z" />
<glyph unicode="&#xe116;" d="M1050 1200h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35 q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43l-100 475q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5z" />
<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
<glyph unicode="&#xe118;" d="M201 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000zM1501 700l-300 -700h-1200l300 700h1200z" />
<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
<glyph unicode="&#xe120;" d="M900 303v197h-600v-197l-300 297l300 298v-198h600v198l300 -298z" />
<glyph unicode="&#xe121;" d="M31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM100 300h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM900 200h-100v-100h100v100z M1100 200h-100v-100h100v100z" />
<glyph unicode="&#xe122;" d="M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35zM325 800l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351v250v5 q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200zM-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5z" />
<glyph unicode="&#xe124;" d="M445 1180l-45 -233l-224 78l78 -225l-233 -44l179 -156l-179 -155l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180z" />
<glyph unicode="&#xe125;" d="M700 1200h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5zM700 925l-50 -225h450 v-125l-250 -375h-214l-136 100h-100v375l150 212l100 213h50v-175zM0 800v-600h200v600h-200z" />
<glyph unicode="&#xe126;" d="M700 0h-50q-27 0 -51 20t-38 48l-96 198l-145 196q-20 26 -20 63v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5zM200 400h-200v600h200 v-600zM700 275l-50 225h450v125l-250 375h-214l-136 -100h-100v-375l150 -212l100 -213h50v175z" />
<glyph unicode="&#xe127;" d="M364 873l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM408 792v-503 l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83zM208 200h-200v600h200v-600z" />
<glyph unicode="&#xe128;" d="M475 1104l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111t54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6zM370 946 l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100h222q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237zM1199 201h-200v600h200v-600z" />
<glyph unicode="&#xe129;" d="M1100 473v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90zM911 400h-503l-236 339 l83 86l183 -146q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294zM1000 200v-200h-600v200h600z" />
<glyph unicode="&#xe130;" d="M305 1104v200h600v-200h-600zM605 310l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15l-230 -362q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106z M905 804v-294l-340 -130q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146l-83 86l237 339h503z" />
<glyph unicode="&#xe131;" d="M603 1195q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM598 701h-298v-201h300l-2 -194l402 294l-402 298v-197z" />
<glyph unicode="&#xe132;" d="M597 1195q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5zM200 600l400 -294v194h302v201h-300v197z" />
<glyph unicode="&#xe133;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM300 600h200v-300h200v300h200l-300 400z" />
<glyph unicode="&#xe134;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM500 900v-300h-200l300 -400l300 400h-200v300h-200z" />
<glyph unicode="&#xe135;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM627 1101q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6 q-15 -3 -45.5 0.5t-45.5 -2.5q-21 -7 -52 -26.5t-34 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -90.5t-29.5 -79.5q-8 -33 5.5 -92.5t7.5 -87.5q0 -9 17 -44t16 -60q12 0 23 -5.5t23 -15t20 -13.5q24 -12 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55 t-20 -57q42 -71 87 -80q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q102 -2 221 112q30 29 47 47t34.5 49t20.5 62q-14 9 -37 9.5t-36 7.5q-14 7 -49 15t-52 19q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7 q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5 t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23q-19 -3 -37 0zM613 994q0 -18 8 -42.5t16.5 -44t9.5 -23.5q-9 2 -31 5t-36 5t-32 8t-30 14q3 12 16 30t16 25q10 -10 18.5 -10 t14 6t14.5 14.5t16 12.5z" />
<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
<glyph unicode="&#xe138;" d="M1100 1200v-100h-1000v100h1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
<glyph unicode="&#xe140;" d="M329 729l142 142l-200 200l129 129h-400v-400l129 129zM1200 1200v-400l-129 129l-200 -200l-142 142l200 200l-129 129h400zM271 129l129 -129h-400v400l129 -129l200 200l142 -142zM1071 271l129 129v-400h-400l129 129l-200 200l142 142z" />
<glyph unicode="&#xe141;" d="M596 1192q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1010q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM455 905 q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5t16 38.5t39 16.5zM708 821l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-14 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5 q0 32 20.5 56.5t51.5 29.5zM855 709q23 0 38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39q0 22 16 38t39 16zM345 709q23 0 39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39t15.5 38.5t38.5 15.5z" />
<glyph unicode="&#xe143;" d="M649 54l-16 22q-90 125 -293 323q-71 70 -104.5 105.5t-77 89.5t-61 99t-17.5 91q0 131 98.5 229.5t230.5 98.5q143 0 241 -129q103 129 246 129q129 0 226 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-203 -198 -293 -323zM844 524l12 12 q64 62 97.5 97t64.5 79t31 72q0 71 -48 119t-105 48q-74 0 -132 -82l-118 -171l-114 174q-51 79 -123 79q-60 0 -109.5 -49t-49.5 -118q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203z" />
<glyph unicode="&#xe144;" d="M476 406l19 -17l105 105l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159q0 -93 66 -159zM123 193l141 -141q66 -66 159 -66q95 0 159 66 l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159q0 -94 66 -160z" />
<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM900 1000h-600v-700h600v700zM600 46q43 0 73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5t-73.5 -30.5t-30.5 -73.5 t30.5 -73.5t73.5 -30.5z" />
<glyph unicode="&#xe148;" d="M700 1029v-307l64 -14q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5 t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5zM600 755v274q-61 -8 -97.5 -37.5t-36.5 -102.5q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3zM700 548 v-311q170 18 170 151q0 64 -44 99.5t-126 60.5z" />
<glyph unicode="&#xe149;" d="M866 300l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5 t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94 q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30z" />
<glyph unicode="&#xe150;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1200l298 -300h-198v-900h-200v900h-198z" />
<glyph unicode="&#xe151;" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-500h-100v100h-100v-100h-100v500h300zM901 1100h-100v-200h100v200zM700 500h300v-200h-99v-100h-100v100h99v100h-200v100zM800 100h200v-100h-300v200h100v-100z" />
<glyph unicode="&#xe152;" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-200h-99v-100h-100v100h99v100h-200v100h300zM800 800h200v-100h-300v200h100v-100zM700 500h300v-500h-100v100h-100v-100h-100v500zM801 200h100v200h-100v-200z" />
<glyph unicode="&#xe153;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1100h-100v100h200v-500h-100v400zM1100 500v-500h-100v100h-200v400h300zM1001 400h-100v-200h100v200z" />
<glyph unicode="&#xe154;" d="M300 0l298 300h-198v900h-200v-900h-198zM1100 1200v-500h-100v100h-200v400h300zM1001 1100h-100v-200h100v200zM900 400h-100v100h200v-500h-100v400z" />
<glyph unicode="&#xe155;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
<glyph unicode="&#xe156;" d="M300 0l298 300h-198v900h-200v-900h-198zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
<glyph unicode="&#xe157;" d="M400 1100h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5z" />
<glyph unicode="&#xe158;" d="M700 0h-300q-163 0 -281.5 117.5t-118.5 282.5v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5zM400 800v-500l333 250z" />
<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM800 700h-500l250 -333z" />
<glyph unicode="&#xe160;" d="M1100 700v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM550 733l-250 -333h500z" />
<glyph unicode="&#xe161;" d="M500 1100h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200zM700 550l-400 -350v200h-300v300h300v200z" />
<glyph unicode="&#xe162;" d="M403 2l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32z" />
<glyph unicode="&#xe163;" d="M800 200h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185zM900 200v200h-300v300h300v200l400 -350z" />
<glyph unicode="&#xe164;" d="M1200 700l-149 149l-342 -353l-213 213l353 342l-149 149h500v-500zM1022 571l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5v-300 q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98z" />
<glyph unicode="&#xe165;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 794 q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
<glyph unicode="&#xe166;" d="M700 800v400h-300v-400h-300l445 -500l450 500h-295zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe167;" d="M400 700v-300h300v300h295l-445 500l-450 -500h300zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe168;" d="M405 400l596 596l-154 155l-442 -442l-150 151l-155 -155zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe169;" d="M409 1103l-97 97l-212 -212l97 -98zM650 861l-149 149l-212 -212l149 -149l-238 -248h700v699zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe170;" d="M539 950l-149 -149l212 -212l149 148l248 -237v700h-699zM297 709l-97 -97l212 -212l98 97zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
<glyph unicode="&#xe171;" d="M1200 1199v-1079l-475 272l-310 -393v416h-392zM1166 1148l-672 -712v-226z" />
<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1200h-100v-200h100v200z" />
<glyph unicode="&#xe173;" d="M578 500h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120zM700 1200h-100v-200h100v200zM1300 538l-475 -476l-244 244l123 123l120 -120l353 352z" />
<glyph unicode="&#xe174;" d="M529 500h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170zM700 1200h-100v-200h100v200zM1167 6l-170 170l-170 -170l-127 127l170 170l-170 170l127 127l170 -170l170 170l127 -128 l-170 -169l170 -170z" />
<glyph unicode="&#xe175;" d="M700 500h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200zM700 1000h-100v200h100v-200zM1000 600h-200v-300h-200l300 -300l300 300h-200v300z" />
<glyph unicode="&#xe176;" d="M602 500h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200zM700 1000h-100v200h100v-200zM1000 300h200l-300 300l-300 -300h200v-300h200v300z" />
<glyph unicode="&#xe177;" d="M1200 900v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1200zM0 800v-550q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200zM100 500h400v-200h-400v200z" />
<glyph unicode="&#xe178;" d="M500 1000h400v198l300 -298l-300 -298v198h-400v200zM100 800v200h100v-200h-100zM400 800h-100v200h100v-200zM700 300h-400v-198l-300 298l300 298v-198h400v-200zM800 500h100v-200h-100v200zM1000 500v-200h100v200h-100z" />
<glyph unicode="&#xe179;" d="M1200 50v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM550 1200l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447l-100 203v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300z" />
<glyph unicode="&#xe180;" d="M1100 106v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394 q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5z" />
<glyph unicode="&#xe181;" d="M675 1000l-100 100h-375l-100 -100h400l200 -200v-98l295 98h105v200h-425zM500 300v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5zM100 800h300v-200h-300v200zM700 565l400 133 v-163l-400 -133v163zM100 500h300v-200h-300v200zM805 300l295 98v-298h-425l-100 -100h-375l-100 100h400l200 200h105z" />
<glyph unicode="&#xe182;" d="M179 1169l-162 -162q-1 -11 -0.5 -32.5t16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118 q17 17 20 41.5t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14z" />
<glyph unicode="&#xe183;" d="M1200 712v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40t-53.5 -36.5t-31 -27.5l-9 -10v-200q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38 t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5zM800 650l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5 t30 -27.5t12 -24l1 -10v-50z" />
<glyph unicode="&#xe184;" d="M175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250zM1200 100v-100h-1100v100h1100z" />
<glyph unicode="&#xe185;" d="M600 1100h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300v1000q0 41 29.5 70.5t70.5 29.5zM1000 800h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300v700q0 41 29.5 70.5t70.5 29.5zM400 0v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400h300z" />
<glyph unicode="&#xe186;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
<glyph unicode="&#xe187;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM400 600h-100v200h-100v-500h100v200h100v-200h100v500h-100v-200zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
<glyph unicode="&#xe188;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-500h300v100h-200v300h200v100h-300zM600 800v-500h300v100h-200v300h200v100h-300z" />
<glyph unicode="&#xe189;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM500 700l-300 -150l300 -150v300zM600 400l300 150l-300 150v-300z" />
<glyph unicode="&#xe190;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM900 800v-500h-700v500h700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM800 700h-130 q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300z" />
<glyph unicode="&#xe191;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 300h100v500h-200v-100h100v-400z M601 300h100v100h-100v-100z" />
<glyph unicode="&#xe192;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM300 700v100h-100v-500h300v400h-200zM800 300h100v500h-200v-100h100v-400zM401 400h-100v200h100v-200z M601 300h100v100h-100v-100z" />
<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM1000 900h-900v-700h900v700zM400 700h-200v100h300v-300h-99v-100h-100v100h99v200zM800 700h-100v100h200v-500h-100v400zM201 400h100v-100 h-100v100zM701 300h-100v100h100v-100z" />
<glyph unicode="&#xe194;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700h-300 v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
<glyph unicode="&#xe195;" d="M596 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700v-100 h-100v100h-200v-100h200v-100h-200v-100h-100v400h300zM800 400h-100v100h100v-100z" />
<glyph unicode="&#xe197;" d="M800 300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h222v300h400v-300zM700 200h200l-300 -300 l-300 300h200v300h200v-300z" />
<glyph unicode="&#xe198;" d="M600 714l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h8zM700 -100h-200v300h-200l300 300 l300 -300h-200v-300z" />
<glyph unicode="&#xe199;" d="M700 200h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-155l-75 -45h350l-75 45v155z" />
<glyph unicode="&#xe200;" d="M700 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5 q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350z" />
<glyph unicode="&#x1f4bc;" d="M800 1000h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100zM500 1000h200v100h-200v-100zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
<glyph unicode="&#x1f4c5;" d="M1100 900v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1100zM0 800v-750q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100zM100 600h100v-100h-100v100zM300 600h100v-100h-100v100z M500 600h100v-100h-100v100zM700 600h100v-100h-100v100zM900 600h100v-100h-100v100zM100 400h100v-100h-100v100zM300 400h100v-100h-100v100zM500 400h100v-100h-100v100zM700 400h100v-100h-100v100zM900 400h100v-100h-100v100zM100 200h100v-100h-100v100zM300 200 h100v-100h-100v100zM500 200h100v-100h-100v100zM700 200h100v-100h-100v100zM900 200h100v-100h-100v100z" />
<glyph unicode="&#x1f4cc;" d="M902 1185l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207l-380 -303l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15z" />
<glyph unicode="&#x1f4ce;" d="M518 119l69 -60l517 511q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163t35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84 t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348 q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256z" />
<glyph unicode="&#x1f4f7;" d="M1200 200v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5z M1000 700h-100v100h100v-100zM844 500q0 -100 -72 -172t-172 -72t-172 72t-72 172t72 172t172 72t172 -72t72 -172zM706 500q0 44 -31 75t-75 31t-75 -31t-31 -75t31 -75t75 -31t75 31t31 75z" />
<glyph unicode="&#x1f512;" d="M900 800h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
<glyph unicode="&#x1f514;" d="M1062 400h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94 q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327zM600 104q-54 0 -103 6q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6z" />
<glyph unicode="&#x1f516;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
<glyph unicode="&#x1f525;" d="M400 755q2 -12 8 -41.5t8 -43t6 -39.5t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85t5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5 q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129 q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5z" />
<glyph unicode="&#x1f527;" d="M948 778l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138z" />
</font>
</defs></svg>

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Binary file not shown.

BIN
Godeps/_workspace/src/github.com/gocql/gocql/website/gocql.png (Stored with Git LFS) generated vendored Normal file

Binary file not shown.

View file

@ -0,0 +1,133 @@
<!DOCTYPE html>
<html>
<head>
<title>GoCQL</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="GoCQL - Modern Cassandra client for the Go">
<meta name="keywords" content="GoCQL, CQL, Cassandra, Go">
<link rel="shortcut icon" type="image/x-icon" href="favicon.ico">
<link rel="stylesheet" href="css/bootstrap.min.css">
<meta property="og:title" content="GoCQL">
<meta property="og:type" content="website">
<meta property="og:url" content="http://tux21b.org/gocql/">
<meta property="og:image" content="http://tux21b.org/gocql/gocql.png">
<meta property="og:description" content="GoCQL - Modern Cassandra client for the Go">
<style>
body { border-top: 4px solid #666; }
h1 { font-weight: bold; }
h2 { border-bottom: 1px solid #ccc; padding-bottom: .2em; }
div.footer { border-top: 1px solid #ccc; padding: 1em 0 .5em; margin-top: 3em; }
img.logo { margin-right: 1em; margin-top: 10px;}
</style>
</head>
<body>
<div class="container">
<div class="row" style="margin-top: 10px; margin-bottom: 20px;">
<div class="col-lg-6">
<img src="gocql.png" alt="GoCQL logo" class="pull-left logo" />
<h1><a href="index.html">GoCQL</a> <small>tux21b.org/v1/gocql</small></h1>
</div>
<div class="col-lg-6">
<ul class="nav nav-pills pull-right" style="margin-top: 20px">
<li><a href="#">Tutorial</a></li>
<li><a href="http://godoc.org/tux21b.org/v1/gocql">Reference</a></li>
<li><a href="https://groups.google.com/forum/#!forum/gocql">Mailing List</a></li>
<li><a href="https://github.com/tux21b/gocql">Source</a></li>
</ul>
</div>
</div>
<div class="alert alert-warning" style="margin: 1em 0 2em;">
<strong>Under Development:</strong> The GoCQL package is currently actively
developed and the API may change in the future.
</div>
<h2>Highlights<span class="glyphicon glyphicon-star pull-right"></span></h2>
<div class="row">
<div class="col-lg-3">
<h3>Cluster Management</h3>
<p>GoCQL automatically discovers all data centers, racks and hosts
in your cluster, manages a pool of connections to them and distributes
queries in a reasonable and efficient way.</p>
</div>
<div class="col-lg-3">
<h3>Type Conversation</h3>
<p>Automatic and safe type conversation between Cassandra and Go without
any loss of precision. Basic types, collections and UUIDs are supported
by default and custom types can implement their own marshaling logic.</p>
</div>
<div class="col-lg-3">
<h3>Synchronous and Concurrent</h3>
<p>Synchronous API with an asynchronous and concurrent back-end. Each
connection can handle up to 128 concurrent queries and may receive
server side push events at any time.</p>
</div>
<div class="col-lg-3">
<h3>Failover Management</h3>
<p>TODO :(</p>
</div>
</div>
<div class="row">
<div class="col-lg-3">
<h3>Result Paging</h3>
<p>Iterate over large results sets and let GoCQL fetch one page after
another. The next page is automatically pre-fetched in the background
once the iterator has passed a certain threshold.</p>
</div>
<div class="col-lg-3">
<h3>Atomic Batches</h3>
<p>Execute a batch of related updates in a single query. GoCQL supports
logged, unlogged and counter batches.</p>
</div>
<div class="col-lg-3">
<h3>Query Tracing</h3>
<p>Trace queries to obtain a detailed output of all events that
happened during the query execution from Cassandra. The output might
help to identify bugs and performance bottlenecks in your
application.</p>
</div>
<div class="col-lg-3">
<h3>Frame Compression</h3>
<p>Speed up and reduce the network traffic by compressing the frames
that are sent to Cassandra.
<a href="https://code.google.com/p/snappy/">Snappy</a>, a
compression algorithm that aims for very high speeds and reasonable
compression, is enabled by default.</p>
</div>
</div>
<div class="row">
<div class="col-lg-3">
<h3>Multiple Cassandra Versions</h3>
<p>GoCQL supports multiple Cassandra version. Currently Cassandra 1.2
and Cassandra 2.0 are fully supported.</p>
</div>
<div class="col-lg-3">
<h3>Thoroughly Tested</h3>
<p>TODO :(</p>
</div>
<div class="col-lg-3">
<h3>BSD License</h3>
<p>Completely open source. Browse the source on
<a href="https://github.com/tux21b/gocql">GitHub</a> and start
contributing today.</p>
</div>
</div>
<div class="footer">
<p>@ 2013 The GoCQL Authors. All rights reserved.</p>
</div>
</body>
</html>

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,274 @@
// +build all integration
package gocql
import (
"fmt"
"reflect"
"sort"
"speter.net/go/exp/math/dec/inf"
"testing"
"time"
)
type WikiPage struct {
Title string
RevId UUID
Body string
Views int64
Protected bool
Modified time.Time
Rating *inf.Dec
Tags []string
Attachments map[string]WikiAttachment
}
type WikiAttachment []byte
var wikiTestData = []*WikiPage{
&WikiPage{
Title: "Frontpage",
RevId: TimeUUID(),
Body: "Welcome to this wiki page!",
Rating: inf.NewDec(131, 3),
Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
Tags: []string{"start", "important", "test"},
Attachments: map[string]WikiAttachment{
"logo": WikiAttachment("\x00company logo\x00"),
"favicon": WikiAttachment("favicon.ico"),
},
},
&WikiPage{
Title: "Foobar",
RevId: TimeUUID(),
Body: "foo::Foo f = new foo::Foo(foo::Foo::INIT);",
Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
},
}
type WikiTest struct {
session *Session
tb testing.TB
}
func (w *WikiTest) CreateSchema() {
if err := w.session.Query(`DROP TABLE wiki_page`).Exec(); err != nil && err.Error() != "unconfigured columnfamily wiki_page" {
w.tb.Fatal("CreateSchema:", err)
}
err := createTable(w.session, `CREATE TABLE wiki_page (
title varchar,
revid timeuuid,
body varchar,
views bigint,
protected boolean,
modified timestamp,
rating decimal,
tags set<varchar>,
attachments map<varchar, blob>,
PRIMARY KEY (title, revid)
)`)
if *clusterSize > 1 {
// wait for table definition to propogate
time.Sleep(250 * time.Millisecond)
}
if err != nil {
w.tb.Fatal("CreateSchema:", err)
}
}
func (w *WikiTest) CreatePages(n int) {
var page WikiPage
t0 := time.Now()
for i := 0; i < n; i++ {
page.Title = fmt.Sprintf("generated_%d", (i&16)+1)
page.Modified = t0.Add(time.Duration(i-n) * time.Minute)
page.RevId = UUIDFromTime(page.Modified)
page.Body = fmt.Sprintf("text %d", i)
if err := w.InsertPage(&page); err != nil {
w.tb.Error("CreatePages:", err)
}
}
}
func (w *WikiTest) InsertPage(page *WikiPage) error {
return w.session.Query(`INSERT INTO wiki_page
(title, revid, body, views, protected, modified, rating, tags, attachments)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
page.Title, page.RevId, page.Body, page.Views, page.Protected,
page.Modified, page.Rating, page.Tags, page.Attachments).Exec()
}
func (w *WikiTest) SelectPage(page *WikiPage, title string, revid UUID) error {
return w.session.Query(`SELECT title, revid, body, views, protected,
modified,tags, attachments, rating
FROM wiki_page WHERE title = ? AND revid = ? LIMIT 1`,
title, revid).Scan(&page.Title, &page.RevId,
&page.Body, &page.Views, &page.Protected, &page.Modified, &page.Tags,
&page.Attachments, &page.Rating)
}
func (w *WikiTest) GetPageCount() int {
var count int
if err := w.session.Query(`SELECT COUNT(*) FROM wiki_page`).Scan(&count); err != nil {
w.tb.Error("GetPageCount", err)
}
return count
}
func TestWikiCreateSchema(t *testing.T) {
session := createSession(t)
defer session.Close()
w := WikiTest{session, t}
w.CreateSchema()
}
func BenchmarkWikiCreateSchema(b *testing.B) {
b.StopTimer()
session := createSession(b)
defer func() {
b.StopTimer()
session.Close()
}()
w := WikiTest{session, b}
b.StartTimer()
for i := 0; i < b.N; i++ {
w.CreateSchema()
}
}
func TestWikiCreatePages(t *testing.T) {
session := createSession(t)
defer session.Close()
w := WikiTest{session, t}
w.CreateSchema()
numPages := 5
w.CreatePages(numPages)
if count := w.GetPageCount(); count != numPages {
t.Errorf("expected %d pages, got %d pages.", numPages, count)
}
}
func BenchmarkWikiCreatePages(b *testing.B) {
b.StopTimer()
session := createSession(b)
defer func() {
b.StopTimer()
session.Close()
}()
w := WikiTest{session, b}
w.CreateSchema()
b.StartTimer()
w.CreatePages(b.N)
}
func BenchmarkWikiSelectAllPages(b *testing.B) {
b.StopTimer()
session := createSession(b)
defer func() {
b.StopTimer()
session.Close()
}()
w := WikiTest{session, b}
w.CreateSchema()
w.CreatePages(100)
b.StartTimer()
var page WikiPage
for i := 0; i < b.N; i++ {
iter := session.Query(`SELECT title, revid, body, views, protected,
modified, tags, attachments, rating
FROM wiki_page`).Iter()
for iter.Scan(&page.Title, &page.RevId, &page.Body, &page.Views,
&page.Protected, &page.Modified, &page.Tags, &page.Attachments,
&page.Rating) {
// pass
}
if err := iter.Close(); err != nil {
b.Error(err)
}
}
}
func BenchmarkWikiSelectSinglePage(b *testing.B) {
b.StopTimer()
session := createSession(b)
defer func() {
b.StopTimer()
session.Close()
}()
w := WikiTest{session, b}
w.CreateSchema()
pages := make([]WikiPage, 100)
w.CreatePages(len(pages))
iter := session.Query(`SELECT title, revid FROM wiki_page`).Iter()
for i := 0; i < len(pages); i++ {
if !iter.Scan(&pages[i].Title, &pages[i].RevId) {
pages = pages[:i]
break
}
}
if err := iter.Close(); err != nil {
b.Error(err)
}
b.StartTimer()
var page WikiPage
for i := 0; i < b.N; i++ {
p := &pages[i%len(pages)]
if err := w.SelectPage(&page, p.Title, p.RevId); err != nil {
b.Error(err)
}
}
}
func BenchmarkWikiSelectPageCount(b *testing.B) {
b.StopTimer()
session := createSession(b)
defer func() {
b.StopTimer()
session.Close()
}()
w := WikiTest{session, b}
w.CreateSchema()
numPages := 10
w.CreatePages(numPages)
b.StartTimer()
for i := 0; i < b.N; i++ {
if count := w.GetPageCount(); count != numPages {
b.Errorf("expected %d pages, got %d pages.", numPages, count)
}
}
}
func TestWikiTypicalCRUD(t *testing.T) {
session := createSession(t)
defer session.Close()
w := WikiTest{session, t}
w.CreateSchema()
for _, page := range wikiTestData {
if err := w.InsertPage(page); err != nil {
t.Error("InsertPage:", err)
}
}
if count := w.GetPageCount(); count != len(wikiTestData) {
t.Errorf("count: expected %d, got %d\n", len(wikiTestData), count)
}
for _, original := range wikiTestData {
page := new(WikiPage)
if err := w.SelectPage(page, original.Title, original.RevId); err != nil {
t.Error("SelectPage:", err)
continue
}
sort.Sort(sort.StringSlice(page.Tags))
sort.Sort(sort.StringSlice(original.Tags))
if !reflect.DeepEqual(page, original) {
t.Errorf("page: expected %#v, got %#v\n", original, page)
}
}
}

View file

@ -0,0 +1,121 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package lru implements an LRU cache.
package lru
import "container/list"
// Cache is an LRU cache. It is not safe for concurrent access.
type Cache struct {
// MaxEntries is the maximum number of cache entries before
// an item is evicted. Zero means no limit.
MaxEntries int
// OnEvicted optionally specificies a callback function to be
// executed when an entry is purged from the cache.
OnEvicted func(key Key, value interface{})
ll *list.List
cache map[interface{}]*list.Element
}
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
type Key interface{}
type entry struct {
key Key
value interface{}
}
// New creates a new Cache.
// If maxEntries is zero, the cache has no limit and it's assumed
// that eviction is done by the caller.
func New(maxEntries int) *Cache {
return &Cache{
MaxEntries: maxEntries,
ll: list.New(),
cache: make(map[interface{}]*list.Element),
}
}
// Add adds a value to the cache.
func (c *Cache) Add(key Key, value interface{}) {
if c.cache == nil {
c.cache = make(map[interface{}]*list.Element)
c.ll = list.New()
}
if ee, ok := c.cache[key]; ok {
c.ll.MoveToFront(ee)
ee.Value.(*entry).value = value
return
}
ele := c.ll.PushFront(&entry{key, value})
c.cache[key] = ele
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
c.RemoveOldest()
}
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
if c.cache == nil {
return
}
if ele, hit := c.cache[key]; hit {
c.ll.MoveToFront(ele)
return ele.Value.(*entry).value, true
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key Key) {
if c.cache == nil {
return
}
if ele, hit := c.cache[key]; hit {
c.removeElement(ele)
}
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() {
if c.cache == nil {
return
}
ele := c.ll.Back()
if ele != nil {
c.removeElement(ele)
}
}
func (c *Cache) removeElement(e *list.Element) {
c.ll.Remove(e)
kv := e.Value.(*entry)
delete(c.cache, kv.key)
if c.OnEvicted != nil {
c.OnEvicted(kv.key, kv.value)
}
}
// Len returns the number of items in the cache.
func (c *Cache) Len() int {
if c.cache == nil {
return 0
}
return c.ll.Len()
}

View file

@ -0,0 +1,73 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lru
import (
"testing"
)
type simpleStruct struct {
int
string
}
type complexStruct struct {
int
simpleStruct
}
var getTests = []struct {
name string
keyToAdd interface{}
keyToGet interface{}
expectedOk bool
}{
{"string_hit", "myKey", "myKey", true},
{"string_miss", "myKey", "nonsense", false},
{"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true},
{"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false},
{"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}},
complexStruct{1, simpleStruct{2, "three"}}, true},
}
func TestGet(t *testing.T) {
for _, tt := range getTests {
lru := New(0)
lru.Add(tt.keyToAdd, 1234)
val, ok := lru.Get(tt.keyToGet)
if ok != tt.expectedOk {
t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
} else if ok && val != 1234 {
t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
}
}
}
func TestRemove(t *testing.T) {
lru := New(0)
lru.Add("myKey", 1234)
if val, ok := lru.Get("myKey"); !ok {
t.Fatal("TestRemove returned no match")
} else if val != 1234 {
t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
}
lru.Remove("myKey")
if _, ok := lru.Get("myKey"); ok {
t.Fatal("TestRemove returned a removed entry")
}
}

View file

@ -0,0 +1,292 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n == 0 {
return 0, 0, ErrCorrupt
}
if uint64(int(v)) != v {
return 0, 0, errors.New("snappy: decoded block is too large")
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s += 1
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
} else {
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
return 0, r.err
}
}
}
}

View file

@ -0,0 +1,258 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"io"
)
// We limit how far copy back-references can go, the same as the C++ code.
const maxOffset = 1 << 15
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
case n < 1<<16:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
case n < 1<<24:
dst[0] = 62<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
i = 4
case int64(n) < 1<<32:
dst[0] = 63<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
dst[4] = uint8(n >> 24)
i = 5
default:
panic("snappy: source buffer is too long")
}
if copy(dst[i:], lit) != len(lit) {
panic("snappy: destination buffer is too short")
}
return i + len(lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst []byte, offset, length int) int {
i := 0
for length > 0 {
x := length - 4
if 0 <= x && x < 1<<3 && offset < 1<<11 {
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
dst[i+1] = uint8(offset)
i += 2
break
}
x = length
if x > 1<<6 {
x = 1 << 6
}
dst[i+0] = uint8(x-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= x
}
return i
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
// Return early if src is short.
if len(src) <= 4 {
if len(src) != 0 {
d += emitLiteral(dst[d:], src)
}
return dst[:d], nil
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
const maxTableSize = 1 << 14
shift, tableSize := uint(32-8), 1<<8
for tableSize < maxTableSize && tableSize < len(src) {
shift--
tableSize *= 2
}
var table [maxTableSize]int
// Iterate over the source bytes.
var (
s int // The iterator position.
t int // The last position with the same hash as s.
lit int // The start position of any pending literal bytes.
)
for s+3 < len(src) {
// Update the hash table.
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
p := &table[(h*0x1e35a7bd)>>shift]
// We need to to store values in [-1, inf) in table. To save
// some initialization time, (re)use the table's zero value
// and shift the values against this zero: add 1 on writes,
// subtract 1 on reads.
t, *p = *p-1, s+1
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
s++
continue
}
// Otherwise, we have a match. First, emit any pending literal bytes.
if lit != s {
d += emitLiteral(dst[d:], src[lit:s])
}
// Extend the match to be as long as possible.
s0 := s
s, t = s+4, t+4
for s < len(src) && src[s] == src[t] {
s++
t++
}
// Emit the copied bytes.
d += emitCopy(dst[d:], s-t, s-s0)
lit = s
}
// Emit any final pending literal bytes and return.
if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return dst[:d], nil
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte
wroteHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody, err := Encode(w.enc, uncompressed)
if err != nil {
w.err = err
return n, err
}
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err = w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err = w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}

View file

@ -0,0 +1,68 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer supported.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View file

@ -0,0 +1,364 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
)
var (
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
)
func roundtrip(b, ebuf, dbuf []byte) error {
e, err := Encode(ebuf, b)
if err != nil {
return fmt.Errorf("encoding error: %v", err)
}
d, err := Decode(dbuf, e)
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
if !bytes.Equal(b, d) {
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
}
return nil
}
func TestEmpty(t *testing.T) {
if err := roundtrip(nil, nil, nil); err != nil {
t.Fatal(err)
}
}
func TestSmallCopy(t *testing.T) {
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for i := 0; i < 32; i++ {
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
}
}
}
}
}
func TestSmallRand(t *testing.T) {
rng := rand.New(rand.NewSource(27354294))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(rng.Uint32())
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func cmp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
}
for i := range a {
if a[i] != b[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
}
}
return nil
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxUncompressedChunkLen (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
dst, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(gold); err != nil {
t.Fatalf("Write: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := ioutil.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
var gots, wants [][]byte
const n = 20
w, failed := NewWriter(nil), false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
return
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
func benchDecode(b *testing.B, src []byte) {
encoded, err := Encode(nil, src)
if err != nil {
b.Fatal(err)
}
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Decode(src, encoded)
}
}
func benchEncode(b *testing.B, src []byte) {
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
dst := make([]byte, MaxEncodedLen(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Encode(dst, src)
}
}
func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Fatalf("failed reading %s: %s", filename, err)
}
if len(src) == 0 {
b.Fatalf("%s has zero length", filename)
}
return src
}
// expand returns a slice of length n containing repeated copies of src.
func expand(src []byte, n int) []byte {
dst := make([]byte, n)
for x := dst; len(x) > 0; {
i := copy(x, src)
x = x[i:]
}
return dst
}
func benchWords(b *testing.B, n int, decode bool) {
// Note: the file is OS-language dependent so the resulting values are not
// directly comparable for non-US-English OS installations.
data := expand(readFile(b, "/usr/share/dict/words"), n)
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
// testFiles' values are copied directly from
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
filename string
}{
{"html", "html"},
{"urls", "urls.10K"},
{"jpg", "fireworks.jpeg"},
{"jpg_200", "fireworks.jpeg"},
{"pdf", "paper-100k.pdf"},
{"html4", "html_x_4"},
{"txt1", "alice29.txt"},
{"txt2", "asyoulik.txt"},
{"txt3", "lcet10.txt"},
{"txt4", "plrabn12.txt"},
{"pb", "geo.protodata"},
{"gaviota", "kppkn.gtb"},
}
// The test data files are present at this canonical URL.
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(basename string) (errRet error) {
filename := filepath.Join(*testdata, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create testdata: %s", err)
}
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
}
defer f.Close()
defer func() {
if errRet != nil {
os.Remove(filename)
}
}()
url := baseURL + basename
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
if err := downloadTestdata(testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
// Naming convention is kept similar to what snappy's C++ implementation uses.
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }

View file

@ -0,0 +1,57 @@
Copyright (c) 2012 Péter Surányi. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------
Portions of inf.Dec's source code have been derived from Go and are
covered by the following license:
----------------------------------------------------------------------
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,210 @@
package inf
import (
"fmt"
"math/big"
"math/rand"
"sync"
"testing"
)
const maxcap = 1024 * 1024
const bits = 256
const maxscale = 32
var once sync.Once
var decInput [][2]Dec
var intInput [][2]big.Int
var initBench = func() {
decInput = make([][2]Dec, maxcap)
intInput = make([][2]big.Int, maxcap)
max := new(big.Int).Lsh(big.NewInt(1), bits)
r := rand.New(rand.NewSource(0))
for i := 0; i < cap(decInput); i++ {
decInput[i][0].SetUnscaledBig(new(big.Int).Rand(r, max)).
SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
decInput[i][1].SetUnscaledBig(new(big.Int).Rand(r, max)).
SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
}
for i := 0; i < cap(intInput); i++ {
intInput[i][0].Rand(r, max)
intInput[i][1].Rand(r, max)
}
}
func doBenchmarkDec1(b *testing.B, f func(z *Dec)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&decInput[i%maxcap][0])
}
}
func doBenchmarkDec2(b *testing.B, f func(x, y *Dec)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&decInput[i%maxcap][0], &decInput[i%maxcap][1])
}
}
func doBenchmarkInt1(b *testing.B, f func(z *big.Int)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&intInput[i%maxcap][0])
}
}
func doBenchmarkInt2(b *testing.B, f func(x, y *big.Int)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&intInput[i%maxcap][0], &intInput[i%maxcap][1])
}
}
func Benchmark_Dec_String(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
x.String()
})
}
func Benchmark_Dec_StringScan(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
s := x.String()
d := new(Dec)
fmt.Sscan(s, d)
})
}
func Benchmark_Dec_GobEncode(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
x.GobEncode()
})
}
func Benchmark_Dec_GobEnDecode(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
g, _ := x.GobEncode()
new(Dec).GobDecode(g)
})
}
func Benchmark_Dec_Add(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
ys := y.Scale()
y.SetScale(x.Scale())
_ = new(Dec).Add(x, y)
y.SetScale(ys)
})
}
func Benchmark_Dec_AddMixed(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).Add(x, y)
})
}
func Benchmark_Dec_Sub(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
ys := y.Scale()
y.SetScale(x.Scale())
_ = new(Dec).Sub(x, y)
y.SetScale(ys)
})
}
func Benchmark_Dec_SubMixed(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).Sub(x, y)
})
}
func Benchmark_Dec_Mul(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).Mul(x, y)
})
}
func Benchmark_Dec_Mul_QuoExact(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
v := new(Dec).Mul(x, y)
_ = new(Dec).QuoExact(v, y)
})
}
func Benchmark_Dec_QuoRound_Fixed_Down(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).QuoRound(x, y, 0, RoundDown)
})
}
func Benchmark_Dec_QuoRound_Fixed_HalfUp(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).QuoRound(x, y, 0, RoundHalfUp)
})
}
func Benchmark_Int_String(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
x.String()
})
}
func Benchmark_Int_StringScan(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
s := x.String()
d := new(big.Int)
fmt.Sscan(s, d)
})
}
func Benchmark_Int_GobEncode(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
x.GobEncode()
})
}
func Benchmark_Int_GobEnDecode(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
g, _ := x.GobEncode()
new(big.Int).GobDecode(g)
})
}
func Benchmark_Int_Add(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Add(x, y)
})
}
func Benchmark_Int_Sub(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Sub(x, y)
})
}
func Benchmark_Int_Mul(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Mul(x, y)
})
}
func Benchmark_Int_Quo(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Quo(x, y)
})
}
func Benchmark_Int_QuoRem(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_, _ = new(big.Int).QuoRem(x, y, new(big.Int))
})
}

View file

@ -0,0 +1,615 @@
// Package inf (type inf.Dec) implements "infinite-precision" decimal
// arithmetic.
// "Infinite precision" describes two characteristics: practically unlimited
// precision for decimal number representation and no support for calculating
// with any specific fixed precision.
// (Although there is no practical limit on precision, inf.Dec can only
// represent finite decimals.)
//
// This package is currently in experimental stage and the API may change.
//
// This package does NOT support:
// - rounding to specific precisions (as opposed to specific decimal positions)
// - the notion of context (each rounding must be explicit)
// - NaN and Inf values, and distinguishing between positive and negative zero
// - conversions to and from float32/64 types
//
// Features considered for possible addition:
// + formatting options
// + Exp method
// + combined operations such as AddRound/MulAdd etc
// + exchanging data in decimal32/64/128 formats
//
package inf
// TODO:
// - avoid excessive deep copying (quo and rounders)
import (
"fmt"
"io"
"math/big"
"strings"
)
// A Dec represents a signed arbitrary-precision decimal.
// It is a combination of a sign, an arbitrary-precision integer coefficient
// value, and a signed fixed-precision exponent value.
// The sign and the coefficient value are handled together as a signed value
// and referred to as the unscaled value.
// (Positive and negative zero values are not distinguished.)
// Since the exponent is most commonly non-positive, it is handled in negated
// form and referred to as scale.
//
// The mathematical value of a Dec equals:
//
// unscaled * 10**(-scale)
//
// Note that different Dec representations may have equal mathematical values.
//
// unscaled scale String()
// -------------------------
// 0 0 "0"
// 0 2 "0.00"
// 0 -2 "0"
// 1 0 "1"
// 100 2 "1.00"
// 10 0 "10"
// 1 -1 "10"
//
// The zero value for a Dec represents the value 0 with scale 0.
//
// Operations are typically performed through the *Dec type.
// The semantics of the assignment operation "=" for "bare" Dec values is
// undefined and should not be relied on.
//
// Methods are typically of the form:
//
// func (z *Dec) Op(x, y *Dec) *Dec
//
// and implement operations z = x Op y with the result as receiver; if it
// is one of the operands it may be overwritten (and its memory reused).
// To enable chaining of operations, the result is also returned. Methods
// returning a result other than *Dec take one of the operands as the receiver.
//
// A "bare" Quo method (quotient / division operation) is not provided, as the
// result is not always a finite decimal and thus in general cannot be
// represented as a Dec.
// Instead, in the common case when rounding is (potentially) necessary,
// QuoRound should be used with a Scale and a Rounder.
// QuoExact or QuoRound with RoundExact can be used in the special cases when it
// is known that the result is always a finite decimal.
//
type Dec struct {
unscaled big.Int
scale Scale
}
// Scale represents the type used for the scale of a Dec.
type Scale int32
const scaleSize = 4 // bytes in a Scale value
// Scaler represents a method for obtaining the scale to use for the result of
// an operation on x and y.
type scaler interface {
Scale(x *Dec, y *Dec) Scale
}
var bigInt = [...]*big.Int{
big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
big.NewInt(10),
}
var exp10cache [64]big.Int = func() [64]big.Int {
e10, e10i := [64]big.Int{}, bigInt[1]
for i, _ := range e10 {
e10[i].Set(e10i)
e10i = new(big.Int).Mul(e10i, bigInt[10])
}
return e10
}()
// NewDec allocates and returns a new Dec set to the given int64 unscaled value
// and scale.
func NewDec(unscaled int64, scale Scale) *Dec {
return new(Dec).SetUnscaled(unscaled).SetScale(scale)
}
// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
// value and scale.
func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
}
// Scale returns the scale of x.
func (x *Dec) Scale() Scale {
return x.scale
}
// Unscaled returns the unscaled value of x for u and true for ok when the
// unscaled value can be represented as int64; otherwise it returns an undefined
// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
// checking the validity of the value when the check is known to be redundant.
func (x *Dec) Unscaled() (u int64, ok bool) {
u = x.unscaled.Int64()
var i big.Int
ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
return
}
// UnscaledBig returns the unscaled value of x as *big.Int.
func (x *Dec) UnscaledBig() *big.Int {
return &x.unscaled
}
// SetScale sets the scale of z, with the unscaled value unchanged, and returns
// z.
// The mathematical value of the Dec changes as if it was multiplied by
// 10**(oldscale-scale).
func (z *Dec) SetScale(scale Scale) *Dec {
z.scale = scale
return z
}
// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaled(unscaled int64) *Dec {
z.unscaled.SetInt64(unscaled)
return z
}
// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
z.unscaled.Set(unscaled)
return z
}
// Set sets z to the value of x and returns z.
// It does nothing if z == x.
func (z *Dec) Set(x *Dec) *Dec {
if z != x {
z.SetUnscaledBig(x.UnscaledBig())
z.SetScale(x.Scale())
}
return z
}
// Sign returns:
//
// -1 if x < 0
// 0 if x == 0
// +1 if x > 0
//
func (x *Dec) Sign() int {
return x.UnscaledBig().Sign()
}
// Neg sets z to -x and returns z.
func (z *Dec) Neg(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Neg(x.UnscaledBig())
return z
}
// Cmp compares x and y and returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
func (x *Dec) Cmp(y *Dec) int {
xx, yy := upscale(x, y)
return xx.UnscaledBig().Cmp(yy.UnscaledBig())
}
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Dec) Abs(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Abs(x.UnscaledBig())
return z
}
// Add sets z to the sum x+y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Add(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Sub sets z to the difference x-y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Sub(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Mul sets z to the product x*y and returns z.
// The scale of z is the sum of the scales of x and y.
func (z *Dec) Mul(x, y *Dec) *Dec {
z.SetScale(x.Scale() + y.Scale())
z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
return z
}
// Round sets z to the value of x rounded to Scale s using Rounder r, and
// returns z.
func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
return z.QuoRound(x, NewDec(1, 0), s, r)
}
// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
// specified scale.
//
// If the rounder is RoundExact but the result can not be expressed exactly at
// the specified scale, QuoRound returns nil, and the value of z is undefined.
//
// There is no corresponding Div method; the equivalent can be achieved through
// the choice of Rounder used.
//
func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
return z.quo(x, y, sclr{s}, r)
}
func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
scl := s.Scale(x, y)
var zzz *Dec
if r.UseRemainder() {
zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
zzz = r.Round(new(Dec), zz, rA, rB)
} else {
zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
zzz = r.Round(new(Dec), zz, nil, nil)
}
if zzz == nil {
return nil
}
return z.Set(zzz)
}
// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
// decimal. Otherwise it returns nil and the value of z is undefined.
//
// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
// calculated so that the remainder will be zero whenever x/y is a finite
// decimal.
func (z *Dec) QuoExact(x, y *Dec) *Dec {
return z.quo(x, y, scaleQuoExact{}, RoundExact)
}
// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
// it sets remNum and remDen to the numerator and denominator of the remainder.
// It returns z, remNum and remDen.
//
// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
// that is, the results satisfy the following equation:
//
// x / y = z + (remNum/remDen) * 10**(-z.Scale())
//
// See Rounder for more details about rounding.
//
func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
// difference (required adjustment) compared to "canonical" result scale
shift := s - (x.Scale() - y.Scale())
// pointers to adjusted unscaled dividend and divisor
var ix, iy *big.Int
switch {
case shift > 0:
// increased scale: decimal-shift dividend left
ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
iy = y.UnscaledBig()
case shift < 0:
// decreased scale: decimal-shift divisor left
ix = x.UnscaledBig()
iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
default:
ix = x.UnscaledBig()
iy = y.UnscaledBig()
}
// save a copy of iy in case it to be overwritten with the result
iy2 := iy
if iy == z.UnscaledBig() {
iy2 = new(big.Int).Set(iy)
}
// set scale
z.SetScale(s)
// set unscaled
if useRem {
// Int division
_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
// set remainder
remNum.Set(intr)
remDen.Set(iy2)
} else {
z.UnscaledBig().Quo(ix, iy)
}
return z, remNum, remDen
}
type sclr struct{ s Scale }
func (s sclr) Scale(x, y *Dec) Scale {
return s.s
}
type scaleQuoExact struct{}
func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
var f10 Scale
if f2 > f5 {
f10 = Scale(f2)
} else {
f10 = Scale(f5)
}
return x.Scale() - y.Scale() + f10
}
func factor(n *big.Int, p *big.Int) int {
// could be improved for large factors
d, f := n, 0
for {
dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
if dm.Sign() == 0 {
f++
d = dd
} else {
break
}
}
return f
}
func factor2(n *big.Int) int {
// could be improved for large factors
f := 0
for ; n.Bit(f) == 0; f++ {
}
return f
}
func upscale(a, b *Dec) (*Dec, *Dec) {
if a.Scale() == b.Scale() {
return a, b
}
if a.Scale() > b.Scale() {
bb := b.rescale(a.Scale())
return a, bb
}
aa := a.rescale(b.Scale())
return aa, b
}
func exp10(x Scale) *big.Int {
if int(x) < len(exp10cache) {
return &exp10cache[int(x)]
}
return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
}
func (x *Dec) rescale(newScale Scale) *Dec {
shift := newScale - x.Scale()
switch {
case shift < 0:
e := exp10(-shift)
return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
case shift > 0:
e := exp10(shift)
return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
}
return x
}
var zeros = []byte("00000000000000000000000000000000" +
"00000000000000000000000000000000")
var lzeros = Scale(len(zeros))
func appendZeros(s []byte, n Scale) []byte {
for i := Scale(0); i < n; i += lzeros {
if n > i+lzeros {
s = append(s, zeros...)
} else {
s = append(s, zeros[0:n-i]...)
}
}
return s
}
func (x *Dec) String() string {
if x == nil {
return "<nil>"
}
scale := x.Scale()
s := []byte(x.UnscaledBig().String())
if scale <= 0 {
if scale != 0 && x.unscaled.Sign() != 0 {
s = appendZeros(s, -scale)
}
return string(s)
}
negbit := Scale(-((x.Sign() - 1) / 2))
// scale > 0
lens := Scale(len(s))
if lens-negbit <= scale {
ss := make([]byte, 0, scale+2)
if negbit == 1 {
ss = append(ss, '-')
}
ss = append(ss, '0', '.')
ss = appendZeros(ss, scale-lens+negbit)
ss = append(ss, s[negbit:]...)
return string(ss)
}
// lens > scale
ss := make([]byte, 0, lens+1)
ss = append(ss, s[:lens-scale]...)
ss = append(ss, '.')
ss = append(ss, s[lens-scale:]...)
return string(ss)
}
// Format is a support routine for fmt.Formatter. It accepts the decimal
// formats 'd' and 'f', and handles both equivalently.
// Width, precision, flags and bases 2, 8, 16 are not supported.
func (x *Dec) Format(s fmt.State, ch rune) {
if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
return
}
fmt.Fprintf(s, x.String())
}
func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
dp, dg := -1, -1 // indexes of decimal point, first digit
loop:
for {
ch, _, err := r.ReadRune()
if err == io.EOF {
break loop
}
if err != nil {
return nil, err
}
switch {
case ch == '+' || ch == '-':
if len(unscaled) > 0 || dp >= 0 { // must be first character
r.UnreadRune()
break loop
}
case ch == '.':
if dp >= 0 {
r.UnreadRune()
break loop
}
dp = len(unscaled)
continue // don't add to unscaled
case ch >= '0' && ch <= '9':
if dg == -1 {
dg = len(unscaled)
}
default:
r.UnreadRune()
break loop
}
unscaled = append(unscaled, byte(ch))
}
if dg == -1 {
return nil, fmt.Errorf("no digits read")
}
if dp >= 0 {
z.SetScale(Scale(len(unscaled) - dp))
} else {
z.SetScale(0)
}
_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
if !ok {
return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
}
return z, nil
}
// SetString sets z to the value of s, interpreted as a decimal (base 10),
// and returns z and a boolean indicating success. The scale of z is the
// number of digits after the decimal point (including any trailing 0s),
// or 0 if there is no decimal point. If SetString fails, the value of z
// is undefined but the returned value is nil.
func (z *Dec) SetString(s string) (*Dec, bool) {
r := strings.NewReader(s)
_, err := z.scan(r)
if err != nil {
return nil, false
}
_, _, err = r.ReadRune()
if err != io.EOF {
return nil, false
}
// err == io.EOF => scan consumed all of s
return z, true
}
// Scan is a support routine for fmt.Scanner; it sets z to the value of
// the scanned number. It accepts the decimal formats 'd' and 'f', and
// handles both equivalently. Bases 2, 8, 16 are not supported.
// The scale of z is the number of digits after the decimal point
// (including any trailing 0s), or 0 if there is no decimal point.
func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
}
s.SkipSpace()
_, err := z.scan(s)
return err
}
// Gob encoding version
const decGobVersion byte = 1
func scaleBytes(s Scale) []byte {
buf := make([]byte, scaleSize)
i := scaleSize
for j := 0; j < scaleSize; j++ {
i--
buf[i] = byte(s)
s >>= 8
}
return buf
}
func scale(b []byte) (s Scale) {
for j := 0; j < scaleSize; j++ {
s <<= 8
s |= Scale(b[j])
}
return
}
// GobEncode implements the gob.GobEncoder interface.
func (x *Dec) GobEncode() ([]byte, error) {
buf, err := x.UnscaledBig().GobEncode()
if err != nil {
return nil, err
}
buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
return buf, nil
}
// GobDecode implements the gob.GobDecoder interface.
func (z *Dec) GobDecode(buf []byte) error {
if len(buf) == 0 {
return fmt.Errorf("Dec.GobDecode: no data")
}
b := buf[len(buf)-1]
if b != decGobVersion {
return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
}
l := len(buf) - scaleSize - 1
err := z.UnscaledBig().GobDecode(buf[:l])
if err != nil {
return err
}
z.SetScale(scale(buf[l : l+scaleSize]))
return nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (x *Dec) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (z *Dec) UnmarshalText(data []byte) error {
_, ok := z.SetString(string(data))
if !ok {
return fmt.Errorf("invalid inf.Dec")
}
return nil
}

View file

@ -0,0 +1,33 @@
// +build go1.2
package inf
import (
"encoding"
"encoding/json"
"testing"
)
var _ encoding.TextMarshaler = new(Dec)
var _ encoding.TextUnmarshaler = new(Dec)
type Obj struct {
Val *Dec
}
func TestDecJsonMarshalUnmarshal(t *testing.T) {
o := Obj{Val: NewDec(123, 2)}
js, err := json.Marshal(o)
if err != nil {
t.Fatalf("json.Marshal(%v): got %v, want ok", o, err)
}
o2 := &Obj{}
err = json.Unmarshal(js, o2)
if err != nil {
t.Fatalf("json.Unmarshal(%#q): got %v, want ok", js, err)
}
if o.Val.Scale() != o2.Val.Scale() ||
o.Val.UnscaledBig().Cmp(o2.Val.UnscaledBig()) != 0 {
t.Fatalf("json.Unmarshal(json.Marshal(%v)): want %v, got %v", o, o, o2)
}
}

View file

@ -0,0 +1,40 @@
package inf
import (
"math/big"
"testing"
)
var decQuoRemZZZ = []struct {
z, x, y *Dec
r *big.Rat
srA, srB int
}{
// basic examples
{NewDec(1, 0), NewDec(2, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
{NewDec(15, 1), NewDec(3, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
{NewDec(1, 1), NewDec(1, 0), NewDec(10, 0), big.NewRat(0, 1), 0, 1},
{NewDec(0, 0), NewDec(2, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
{NewDec(0, 0), NewDec(2, 0), NewDec(6, 0), big.NewRat(1, 3), 1, 1},
{NewDec(1, 1), NewDec(2, 0), NewDec(12, 0), big.NewRat(2, 3), 1, 1},
// examples from the Go Language Specification
{NewDec(1, 0), NewDec(5, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
{NewDec(-1, 0), NewDec(-5, 0), NewDec(3, 0), big.NewRat(-2, 3), -1, 1},
{NewDec(-1, 0), NewDec(5, 0), NewDec(-3, 0), big.NewRat(-2, 3), 1, -1},
{NewDec(1, 0), NewDec(-5, 0), NewDec(-3, 0), big.NewRat(2, 3), -1, -1},
}
func TestDecQuoRem(t *testing.T) {
for i, a := range decQuoRemZZZ {
z, rA, rB := new(Dec), new(big.Int), new(big.Int)
s := scaleQuoExact{}.Scale(a.x, a.y)
z.quoRem(a.x, a.y, s, true, rA, rB)
if a.z.Cmp(z) != 0 || a.r.Cmp(new(big.Rat).SetFrac(rA, rB)) != 0 {
t.Errorf("#%d QuoRemZZZ got %v, %v, %v; expected %v, %v", i, z, rA, rB, a.z, a.r)
}
if a.srA != rA.Sign() || a.srB != rB.Sign() {
t.Errorf("#%d QuoRemZZZ wrong signs, got %v, %v; expected %v, %v", i, rA.Sign(), rB.Sign(), a.srA, a.srB)
}
}
}

View file

@ -0,0 +1,379 @@
package inf_test
import (
"bytes"
"encoding/gob"
"fmt"
"math/big"
"strings"
"testing"
"speter.net/go/exp/math/dec/inf"
)
type decFunZZ func(z, x, y *inf.Dec) *inf.Dec
type decArgZZ struct {
z, x, y *inf.Dec
}
var decSumZZ = []decArgZZ{
{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
{inf.NewDec(1111111110, 0), inf.NewDec(123456789, 0), inf.NewDec(987654321, 0)},
{inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(0, 0)},
{inf.NewDec(864197532, 0), inf.NewDec(-123456789, 0), inf.NewDec(987654321, 0)},
{inf.NewDec(-1111111110, 0), inf.NewDec(-123456789, 0), inf.NewDec(-987654321, 0)},
{inf.NewDec(12, 2), inf.NewDec(1, 1), inf.NewDec(2, 2)},
}
var decProdZZ = []decArgZZ{
{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
{inf.NewDec(0, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(1, 0)},
{inf.NewDec(-991*991, 0), inf.NewDec(991, 0), inf.NewDec(-991, 0)},
{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
{inf.NewDec(2, -3), inf.NewDec(1, -1), inf.NewDec(2, -2)},
{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
}
func TestDecSignZ(t *testing.T) {
var zero inf.Dec
for _, a := range decSumZZ {
s := a.z.Sign()
e := a.z.Cmp(&zero)
if s != e {
t.Errorf("got %d; want %d for z = %v", s, e, a.z)
}
}
}
func TestDecAbsZ(t *testing.T) {
var zero inf.Dec
for _, a := range decSumZZ {
var z inf.Dec
z.Abs(a.z)
var e inf.Dec
e.Set(a.z)
if e.Cmp(&zero) < 0 {
e.Sub(&zero, &e)
}
if z.Cmp(&e) != 0 {
t.Errorf("got z = %v; want %v", z, e)
}
}
}
func testDecFunZZ(t *testing.T, msg string, f decFunZZ, a decArgZZ) {
var z inf.Dec
f(&z, a.x, a.y)
if (&z).Cmp(a.z) != 0 {
t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
}
}
func TestDecSumZZ(t *testing.T) {
AddZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Add(x, y) }
SubZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Sub(x, y) }
for _, a := range decSumZZ {
arg := a
testDecFunZZ(t, "AddZZ", AddZZ, arg)
arg = decArgZZ{a.z, a.y, a.x}
testDecFunZZ(t, "AddZZ symmetric", AddZZ, arg)
arg = decArgZZ{a.x, a.z, a.y}
testDecFunZZ(t, "SubZZ", SubZZ, arg)
arg = decArgZZ{a.y, a.z, a.x}
testDecFunZZ(t, "SubZZ symmetric", SubZZ, arg)
}
}
func TestDecProdZZ(t *testing.T) {
MulZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Mul(x, y) }
for _, a := range decProdZZ {
arg := a
testDecFunZZ(t, "MulZZ", MulZZ, arg)
arg = decArgZZ{a.z, a.y, a.x}
testDecFunZZ(t, "MulZZ symmetric", MulZZ, arg)
}
}
var decUnscaledTests = []struct {
d *inf.Dec
u int64 // ignored when ok == false
ok bool
}{
{new(inf.Dec), 0, true},
{inf.NewDec(-1<<63, 0), -1 << 63, true},
{inf.NewDec(-(-1<<63 + 1), 0), -(-1<<63 + 1), true},
{new(inf.Dec).Neg(inf.NewDec(-1<<63, 0)), 0, false},
{new(inf.Dec).Sub(inf.NewDec(-1<<63, 0), inf.NewDec(1, 0)), 0, false},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), 0, false},
}
func TestDecUnscaled(t *testing.T) {
for i, tt := range decUnscaledTests {
u, ok := tt.d.Unscaled()
if ok != tt.ok {
t.Errorf("#%d Unscaled: got %v, expected %v", i, ok, tt.ok)
} else if ok && u != tt.u {
t.Errorf("#%d Unscaled: got %v, expected %v", i, u, tt.u)
}
}
}
var decRoundTests = [...]struct {
in *inf.Dec
s inf.Scale
r inf.Rounder
exp *inf.Dec
}{
{inf.NewDec(123424999999999993, 15), 2, inf.RoundHalfUp, inf.NewDec(12342, 2)},
{inf.NewDec(123425000000000001, 15), 2, inf.RoundHalfUp, inf.NewDec(12343, 2)},
{inf.NewDec(123424999999999993, 15), 15, inf.RoundHalfUp, inf.NewDec(123424999999999993, 15)},
{inf.NewDec(123424999999999993, 15), 16, inf.RoundHalfUp, inf.NewDec(1234249999999999930, 16)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -1, inf.RoundHalfUp, inf.NewDec(1844674407370955162, -1)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -2, inf.RoundHalfUp, inf.NewDec(184467440737095516, -2)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -3, inf.RoundHalfUp, inf.NewDec(18446744073709552, -3)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -4, inf.RoundHalfUp, inf.NewDec(1844674407370955, -4)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -5, inf.RoundHalfUp, inf.NewDec(184467440737096, -5)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -6, inf.RoundHalfUp, inf.NewDec(18446744073710, -6)},
}
func TestDecRound(t *testing.T) {
for i, tt := range decRoundTests {
z := new(inf.Dec).Round(tt.in, tt.s, tt.r)
if tt.exp.Cmp(z) != 0 {
t.Errorf("#%d Round got %v; expected %v", i, z, tt.exp)
}
}
}
var decStringTests = []struct {
in string
out string
val int64
scale inf.Scale // skip SetString if negative
ok bool
scanOk bool
}{
{in: "", ok: false, scanOk: false},
{in: "a", ok: false, scanOk: false},
{in: "z", ok: false, scanOk: false},
{in: "+", ok: false, scanOk: false},
{in: "-", ok: false, scanOk: false},
{in: "g", ok: false, scanOk: false},
{in: ".", ok: false, scanOk: false},
{in: ".-0", ok: false, scanOk: false},
{in: ".+0", ok: false, scanOk: false},
// Scannable but not SetStringable
{"0b", "ignored", 0, 0, false, true},
{"0x", "ignored", 0, 0, false, true},
{"0xg", "ignored", 0, 0, false, true},
{"0.0g", "ignored", 0, 1, false, true},
// examples from godoc for Dec
{"0", "0", 0, 0, true, true},
{"0.00", "0.00", 0, 2, true, true},
{"ignored", "0", 0, -2, true, false},
{"1", "1", 1, 0, true, true},
{"1.00", "1.00", 100, 2, true, true},
{"10", "10", 10, 0, true, true},
{"ignored", "10", 1, -1, true, false},
// other tests
{"+0", "0", 0, 0, true, true},
{"-0", "0", 0, 0, true, true},
{"0.0", "0.0", 0, 1, true, true},
{"0.1", "0.1", 1, 1, true, true},
{"0.", "0", 0, 0, true, true},
{"-10", "-10", -1, -1, true, true},
{"-1", "-1", -1, 0, true, true},
{"-0.1", "-0.1", -1, 1, true, true},
{"-0.01", "-0.01", -1, 2, true, true},
{"+0.", "0", 0, 0, true, true},
{"-0.", "0", 0, 0, true, true},
{".0", "0.0", 0, 1, true, true},
{"+.0", "0.0", 0, 1, true, true},
{"-.0", "0.0", 0, 1, true, true},
{"0.0000000000", "0.0000000000", 0, 10, true, true},
{"0.0000000001", "0.0000000001", 1, 10, true, true},
{"-0.0000000000", "0.0000000000", 0, 10, true, true},
{"-0.0000000001", "-0.0000000001", -1, 10, true, true},
{"-10", "-10", -10, 0, true, true},
{"+10", "10", 10, 0, true, true},
{"00", "0", 0, 0, true, true},
{"023", "23", 23, 0, true, true}, // decimal, not octal
{"-02.3", "-2.3", -23, 1, true, true}, // decimal, not octal
}
func TestDecGetString(t *testing.T) {
z := new(inf.Dec)
for i, test := range decStringTests {
if !test.ok {
continue
}
z.SetUnscaled(test.val)
z.SetScale(test.scale)
s := z.String()
if s != test.out {
t.Errorf("#%da got %s; want %s", i, s, test.out)
}
s = fmt.Sprintf("%d", z)
if s != test.out {
t.Errorf("#%db got %s; want %s", i, s, test.out)
}
}
}
func TestDecSetString(t *testing.T) {
tmp := new(inf.Dec)
for i, test := range decStringTests {
if test.scale < 0 {
// SetString only supports scale >= 0
continue
}
// initialize to a non-zero value so that issues with parsing
// 0 are detected
tmp.Set(inf.NewDec(1234567890, 123))
n1, ok1 := new(inf.Dec).SetString(test.in)
n2, ok2 := tmp.SetString(test.in)
expected := inf.NewDec(test.val, test.scale)
if ok1 != test.ok || ok2 != test.ok {
t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
continue
}
if !ok1 {
if n1 != nil {
t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
}
continue
}
if !ok2 {
if n2 != nil {
t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
}
continue
}
if n1.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
}
if n2.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
}
}
}
func TestDecScan(t *testing.T) {
tmp := new(inf.Dec)
for i, test := range decStringTests {
if test.scale < 0 {
// SetString only supports scale >= 0
continue
}
// initialize to a non-zero value so that issues with parsing
// 0 are detected
tmp.Set(inf.NewDec(1234567890, 123))
n1, n2 := new(inf.Dec), tmp
nn1, err1 := fmt.Sscan(test.in, n1)
nn2, err2 := fmt.Sscan(test.in, n2)
if !test.scanOk {
if err1 == nil || err2 == nil {
t.Errorf("#%d (input '%s') ok incorrect, should be %t", i, test.in, test.scanOk)
}
continue
}
expected := inf.NewDec(test.val, test.scale)
if nn1 != 1 || err1 != nil || nn2 != 1 || err2 != nil {
t.Errorf("#%d (input '%s') error %d %v, %d %v", i, test.in, nn1, err1, nn2, err2)
continue
}
if n1.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
}
if n2.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
}
}
}
var decScanNextTests = []struct {
in string
ok bool
next rune
}{
{"", false, 0},
{"a", false, 'a'},
{"z", false, 'z'},
{"+", false, 0},
{"-", false, 0},
{"g", false, 'g'},
{".", false, 0},
{".-0", false, '-'},
{".+0", false, '+'},
{"0b", true, 'b'},
{"0x", true, 'x'},
{"0xg", true, 'x'},
{"0.0g", true, 'g'},
}
func TestDecScanNext(t *testing.T) {
for i, test := range decScanNextTests {
rdr := strings.NewReader(test.in)
n1 := new(inf.Dec)
nn1, _ := fmt.Fscan(rdr, n1)
if (test.ok && nn1 == 0) || (!test.ok && nn1 > 0) {
t.Errorf("#%d (input '%s') ok incorrect should be %t", i, test.in, test.ok)
continue
}
r := rune(0)
nn2, err := fmt.Fscanf(rdr, "%c", &r)
if test.next != r {
t.Errorf("#%d (input '%s') next incorrect, got %c should be %c, %d, %v", i, test.in, r, test.next, nn2, err)
}
}
}
var decGobEncodingTests = []string{
"0",
"1",
"2",
"10",
"42",
"1234567890",
"298472983472983471903246121093472394872319615612417471234712061",
}
func TestDecGobEncoding(t *testing.T) {
var medium bytes.Buffer
enc := gob.NewEncoder(&medium)
dec := gob.NewDecoder(&medium)
for i, test := range decGobEncodingTests {
for j := 0; j < 2; j++ {
for k := inf.Scale(-5); k <= 5; k++ {
medium.Reset() // empty buffer for each test case (in case of failures)
stest := test
if j != 0 {
// negative numbers
stest = "-" + test
}
var tx inf.Dec
tx.SetString(stest)
tx.SetScale(k) // test with positive, negative, and zero scale
if err := enc.Encode(&tx); err != nil {
t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
}
var rx inf.Dec
if err := dec.Decode(&rx); err != nil {
t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
}
if rx.Cmp(&tx) != 0 {
t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
}
}
}
}
}

View file

@ -0,0 +1,62 @@
package inf_test
import (
"fmt"
"log"
)
import "speter.net/go/exp/math/dec/inf"
func ExampleDec_SetString() {
d := new(inf.Dec)
d.SetString("012345.67890") // decimal; leading 0 ignored; trailing 0 kept
fmt.Println(d)
// Output: 12345.67890
}
func ExampleDec_Scan() {
// The Scan function is rarely used directly;
// the fmt package recognizes it as an implementation of fmt.Scanner.
d := new(inf.Dec)
_, err := fmt.Sscan("184467440.73709551617", d)
if err != nil {
log.Println("error scanning value:", err)
} else {
fmt.Println(d)
}
// Output: 184467440.73709551617
}
func ExampleDec_QuoRound_scale2RoundDown() {
// 10 / 3 is an infinite decimal; it has no exact Dec representation
x, y := inf.NewDec(10, 0), inf.NewDec(3, 0)
// use 2 digits beyond the decimal point, round towards 0
z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundDown)
fmt.Println(z)
// Output: 3.33
}
func ExampleDec_QuoRound_scale2RoundCeil() {
// -42 / 400 is an finite decimal with 3 digits beyond the decimal point
x, y := inf.NewDec(-42, 0), inf.NewDec(400, 0)
// use 2 digits beyond decimal point, round towards positive infinity
z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundCeil)
fmt.Println(z)
// Output: -0.10
}
func ExampleDec_QuoExact_ok() {
// 1 / 25 is a finite decimal; it has exact Dec representation
x, y := inf.NewDec(1, 0), inf.NewDec(25, 0)
z := new(inf.Dec).QuoExact(x, y)
fmt.Println(z)
// Output: 0.04
}
func ExampleDec_QuoExact_fail() {
// 1 / 3 is an infinite decimal; it has no exact Dec representation
x, y := inf.NewDec(1, 0), inf.NewDec(3, 0)
z := new(inf.Dec).QuoExact(x, y)
fmt.Println(z)
// Output: <nil>
}

View file

@ -0,0 +1,145 @@
package inf
import (
"math/big"
)
// Rounder represents a method for rounding the (possibly infinite decimal)
// result of a division to a finite Dec. It is used by Dec.Round() and
// Dec.Quo().
//
// See the Example for results of using each Rounder with some sample values.
//
type Rounder rounder
// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
// definitions of these rounding modes.
var (
RoundDown Rounder // towards 0
RoundUp Rounder // away from 0
RoundFloor Rounder // towards -infinity
RoundCeil Rounder // towards +infinity
RoundHalfDown Rounder // to nearest; towards 0 if same distance
RoundHalfUp Rounder // to nearest; away from 0 if same distance
RoundHalfEven Rounder // to nearest; even last digit if same distance
)
// RoundExact is to be used in the case when rounding is not necessary.
// When used with Quo or Round, it returns the result verbatim when it can be
// expressed exactly with the given precision, and it returns nil otherwise.
// QuoExact is a shorthand for using Quo with RoundExact.
var RoundExact Rounder
type rounder interface {
// When UseRemainder() returns true, the Round() method is passed the
// remainder of the division, expressed as the numerator and denominator of
// a rational.
UseRemainder() bool
// Round sets the rounded value of a quotient to z, and returns z.
// quo is rounded down (truncated towards zero) to the scale obtained from
// the Scaler in Quo().
//
// When the remainder is not used, remNum and remDen are nil.
// When used, the remainder is normalized between -1 and 1; that is:
//
// -|remDen| < remNum < |remDen|
//
// remDen has the same sign as y, and remNum is zero or has the same sign
// as x.
Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
}
type rndr struct {
useRem bool
round func(z, quo *Dec, remNum, remDen *big.Int) *Dec
}
func (r rndr) UseRemainder() bool {
return r.useRem
}
func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
return r.round(z, quo, remNum, remDen)
}
var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
return func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
brA, brB := rA.BitLen(), rB.BitLen()
if brA < brB-1 {
// brA < brB-1 => |rA| < |rB/2|
return z
}
roundUp := false
srA, srB := rA.Sign(), rB.Sign()
s := srA * srB
if brA == brB-1 {
rA2 := new(big.Int).Lsh(rA, 1)
if s < 0 {
rA2.Neg(rA2)
}
roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
} else {
// brA > brB-1 => |rA| > |rB/2|
roundUp = true
}
if roundUp {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
}
return z
}
}
func init() {
RoundExact = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
if rA.Sign() != 0 {
return nil
}
return z.Set(q)
}}
RoundDown = rndr{false,
func(z, q *Dec, rA, rB *big.Int) *Dec {
return z.Set(q)
}}
RoundUp = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign() != 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
}
return z
}}
RoundFloor = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign()*rB.Sign() < 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
}
return z
}}
RoundCeil = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign()*rB.Sign() > 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
}
return z
}}
RoundHalfDown = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c > 0
})}
RoundHalfUp = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c >= 0
})}
RoundHalfEven = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c > 0 || c == 0 && odd == 1
})}
}

View file

@ -0,0 +1,72 @@
package inf_test
import (
"fmt"
"os"
"text/tabwriter"
"speter.net/go/exp/math/dec/inf"
)
// This example displays the results of Dec.Round with each of the Rounders.
//
func ExampleRounder() {
var vals = []struct {
x string
s inf.Scale
}{
{"-0.18", 1}, {"-0.15", 1}, {"-0.12", 1}, {"-0.10", 1},
{"-0.08", 1}, {"-0.05", 1}, {"-0.02", 1}, {"0.00", 1},
{"0.02", 1}, {"0.05", 1}, {"0.08", 1}, {"0.10", 1},
{"0.12", 1}, {"0.15", 1}, {"0.18", 1},
}
var rounders = []struct {
name string
rounder inf.Rounder
}{
{"RoundDown", inf.RoundDown}, {"RoundUp", inf.RoundUp},
{"RoundCeil", inf.RoundCeil}, {"RoundFloor", inf.RoundFloor},
{"RoundHalfDown", inf.RoundHalfDown}, {"RoundHalfUp", inf.RoundHalfUp},
{"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact},
}
fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n")
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight)
fmt.Fprint(w, "x\ts\t|\t")
for _, r := range rounders {
fmt.Fprintf(w, "%s\t", r.name[5:])
}
fmt.Fprintln(w)
for _, v := range vals {
fmt.Fprintf(w, "%s\t%d\t|\t", v.x, v.s)
for _, r := range rounders {
x, _ := new(inf.Dec).SetString(v.x)
z := new(inf.Dec).Round(x, v.s, r.rounder)
fmt.Fprintf(w, "%d\t", z)
}
fmt.Fprintln(w)
}
w.Flush()
// Output:
// The results of new(inf.Dec).Round(x, s, inf.RoundXXX):
//
// x s | Down Up Ceil Floor HalfDown HalfUp HalfEven Exact
// -0.18 1 | -0.1 -0.2 -0.1 -0.2 -0.2 -0.2 -0.2 <nil>
// -0.15 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.2 -0.2 <nil>
// -0.12 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.1 -0.1 <nil>
// -0.10 1 | -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1
// -0.08 1 | 0.0 -0.1 0.0 -0.1 -0.1 -0.1 -0.1 <nil>
// -0.05 1 | 0.0 -0.1 0.0 -0.1 0.0 -0.1 0.0 <nil>
// -0.02 1 | 0.0 -0.1 0.0 -0.1 0.0 0.0 0.0 <nil>
// 0.00 1 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
// 0.02 1 | 0.0 0.1 0.1 0.0 0.0 0.0 0.0 <nil>
// 0.05 1 | 0.0 0.1 0.1 0.0 0.0 0.1 0.0 <nil>
// 0.08 1 | 0.0 0.1 0.1 0.0 0.1 0.1 0.1 <nil>
// 0.10 1 | 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
// 0.12 1 | 0.1 0.2 0.2 0.1 0.1 0.1 0.1 <nil>
// 0.15 1 | 0.1 0.2 0.2 0.1 0.1 0.2 0.2 <nil>
// 0.18 1 | 0.1 0.2 0.2 0.1 0.2 0.2 0.2 <nil>
}

View file

@ -0,0 +1,109 @@
package inf_test
import (
"math/big"
"testing"
"speter.net/go/exp/math/dec/inf"
)
var decRounderInputs = [...]struct {
quo *inf.Dec
rA, rB *big.Int
}{
// examples from go language spec
{inf.NewDec(1, 0), big.NewInt(2), big.NewInt(3)}, // 5 / 3
{inf.NewDec(-1, 0), big.NewInt(-2), big.NewInt(3)}, // -5 / 3
{inf.NewDec(-1, 0), big.NewInt(2), big.NewInt(-3)}, // 5 / -3
{inf.NewDec(1, 0), big.NewInt(-2), big.NewInt(-3)}, // -5 / -3
// examples from godoc
{inf.NewDec(-1, 1), big.NewInt(-8), big.NewInt(10)},
{inf.NewDec(-1, 1), big.NewInt(-5), big.NewInt(10)},
{inf.NewDec(-1, 1), big.NewInt(-2), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(-8), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(-5), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(-2), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(0), big.NewInt(1)},
{inf.NewDec(0, 1), big.NewInt(2), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(5), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(8), big.NewInt(10)},
{inf.NewDec(1, 1), big.NewInt(2), big.NewInt(10)},
{inf.NewDec(1, 1), big.NewInt(5), big.NewInt(10)},
{inf.NewDec(1, 1), big.NewInt(8), big.NewInt(10)},
}
var decRounderResults = [...]struct {
rounder inf.Rounder
results [len(decRounderInputs)]*inf.Dec
}{
{inf.RoundExact, [...]*inf.Dec{nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil,
inf.NewDec(0, 1), nil, nil, nil, nil, nil, nil}},
{inf.RoundDown, [...]*inf.Dec{
inf.NewDec(1, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(1, 0),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
{inf.RoundUp, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
{inf.RoundHalfDown, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(2, 1)}},
{inf.RoundHalfUp, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
{inf.RoundHalfEven, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
{inf.RoundFloor, [...]*inf.Dec{
inf.NewDec(1, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(1, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
{inf.RoundCeil, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(2, 0),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
}
func TestDecRounders(t *testing.T) {
for i, a := range decRounderResults {
for j, input := range decRounderInputs {
q := new(inf.Dec).Set(input.quo)
rA, rB := new(big.Int).Set(input.rA), new(big.Int).Set(input.rB)
res := a.rounder.Round(new(inf.Dec), q, rA, rB)
if a.results[j] == nil && res == nil {
continue
}
if (a.results[j] == nil && res != nil) ||
(a.results[j] != nil && res == nil) ||
a.results[j].Cmp(res) != 0 {
t.Errorf("#%d,%d Rounder got %v; expected %v", i, j, res, a.results[j])
}
}
}
}