Add config file, handle it with Viper, add a command to open it in an editor

This commit is contained in:
Ken-Håvard Lieng 2015-05-25 04:00:21 +02:00
parent b8a8ba2e08
commit 61aa5880d3
234 changed files with 44636 additions and 383 deletions

73
Godeps/Godeps.json generated
View File

@ -1,10 +1,19 @@
{
"ImportPath": "github.com/khlieng/name_pending",
"GoVersion": "go1.4",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/BurntSushi/toml",
"Comment": "v0.1.0-21-g056c9bc",
"Rev": "056c9bc7be7190eaa7715723883caffa5f8fa3e4"
},
{
"ImportPath": "github.com/armon/consul-api",
"Rev": "dcfedd50ed5334f96adee43fc88518a4f095e15c"
},
{
"ImportPath": "github.com/blevesearch/bleve",
"Rev": "16f538d7b76dd85c935a3104c390307cae5cbf79"
@ -23,6 +32,11 @@
"Comment": "v1.0-43-gcf33c9e",
"Rev": "cf33c9e0ca0a23509b8bb8edfc63e4776bb1a330"
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
"Comment": "v2.0.0-7-g73a8ef7",
"Rev": "73a8ef737e8ea002281a28b4cb92a1de121ad4c6"
},
{
"ImportPath": "github.com/cznic/b",
"Rev": "c4adf3a58579a2d57cd3097f455dcdf75edcdfd8"
@ -43,18 +57,48 @@
"ImportPath": "github.com/julienschmidt/httprouter",
"Rev": "b428fda53bb0a764fea9c76c9413512eda291dec"
},
{
"ImportPath": "github.com/kr/pretty",
"Comment": "go.weekly.2011-12-22-27-ge6ac2fc",
"Rev": "e6ac2fc51e89a3249e82157fa0bb7a18ef9dd5bb"
},
{
"ImportPath": "github.com/kr/text",
"Rev": "e373e137fafd8abd480af49182dea0513914adb4"
},
{
"ImportPath": "github.com/magiconair/properties",
"Comment": "v1.5.2",
"Rev": "d5929c67198951106f49f7ea425198d0f1a08f7f"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "f7d28d5aeab42b9b95d2e6d6b956f73a290077fc"
},
{
"ImportPath": "github.com/ryszard/goskiplist/skiplist",
"Rev": "2dfbae5fcf46374f166f8969cb07e167f1be6273"
},
{
"ImportPath": "github.com/spf13/cast",
"Rev": "4d07383ffe94b5e5a6fa3af9211374a4507a0184"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "3ee9552eebbb5db27cb81abcae66c6f1430cad29"
},
{
"ImportPath": "github.com/spf13/jwalterweatherman",
"Rev": "3d60171a64319ef63c78bd45bd60e6eab1e75f8b"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "32bfad653e29e893e4ed3812fdc0294a05126c08"
},
{
"ImportPath": "github.com/spf13/viper",
"Rev": "d62d4bb4c68a773c3b5f4e72844913a2d5de0de0"
},
{
"ImportPath": "github.com/steveyen/gtreap",
"Rev": "72cd76f34c91f8d64a031af97b499e4a0b1a6e0c"
@ -72,6 +116,29 @@
"Comment": "v1.0.0-17-g4b22041",
"Rev": "4b220417a489359f934045d0509d941a7a2a1038"
},
{
"ImportPath": "github.com/xordataexchange/crypt/backend",
"Comment": "v0.0.2-17-g749e360",
"Rev": "749e360c8f236773f28fc6d3ddfce4a470795227"
},
{
"ImportPath": "github.com/xordataexchange/crypt/config",
"Comment": "v0.0.2-17-g749e360",
"Rev": "749e360c8f236773f28fc6d3ddfce4a470795227"
},
{
"ImportPath": "github.com/xordataexchange/crypt/encoding/secconf",
"Comment": "v0.0.2-17-g749e360",
"Rev": "749e360c8f236773f28fc6d3ddfce4a470795227"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
},
{
"ImportPath": "golang.org/x/crypto/openpgp",
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "c92eb3cd6e70951a111680995e651ea4b2c35539"
@ -79,6 +146,10 @@
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "c92eb3cd6e70951a111680995e651ea4b2c35539"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213"
}
]
}

View File

@ -0,0 +1,5 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

View File

@ -0,0 +1,12 @@
language: go
go:
- 1.1
- 1.2
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View File

@ -0,0 +1,3 @@
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)

View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@ -0,0 +1,19 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View File

@ -0,0 +1,220 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/mojombo/toml
Compatible with TOML version
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
Documentation: http://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@ -0,0 +1,14 @@
# Implements the TOML test suite interface
This is an implementation of the interface expected by
[toml-test](https://github.com/BurntSushi/toml-test) for my
[toml parser written in Go](https://github.com/BurntSushi/toml).
In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
Compatible with `toml-test` version
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)

View File

@ -0,0 +1,90 @@
// Command toml-test-decoder satisfies the toml-test interface for testing
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"path"
"time"
"github.com/khlieng/name_pending/Godeps/_workspace/src/github.com/BurntSushi/toml"
)
func init() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() != 0 {
flag.Usage()
}
var tmp interface{}
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
log.Fatalf("Error decoding TOML: %s", err)
}
typedTmp := translate(tmp)
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
log.Fatalf("Error encoding JSON: %s", err)
}
}
func translate(tomlData interface{}) interface{} {
switch orig := tomlData.(type) {
case map[string]interface{}:
typed := make(map[string]interface{}, len(orig))
for k, v := range orig {
typed[k] = translate(v)
}
return typed
case []map[string]interface{}:
typed := make([]map[string]interface{}, len(orig))
for i, v := range orig {
typed[i] = translate(v).(map[string]interface{})
}
return typed
case []interface{}:
typed := make([]interface{}, len(orig))
for i, v := range orig {
typed[i] = translate(v)
}
// We don't really need to tag arrays, but let's be future proof.
// (If TOML ever supports tuples, we'll need this.)
return tag("array", typed)
case time.Time:
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
case bool:
return tag("bool", fmt.Sprintf("%v", orig))
case int64:
return tag("integer", fmt.Sprintf("%d", orig))
case float64:
return tag("float", fmt.Sprintf("%v", orig))
case string:
return tag("string", orig)
}
panic(fmt.Sprintf("Unknown type: %T", tomlData))
}
func tag(typeName string, data interface{}) map[string]interface{} {
return map[string]interface{}{
"type": typeName,
"value": data,
}
}

View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@ -0,0 +1,14 @@
# Implements the TOML test suite interface for TOML encoders
This is an implementation of the interface expected by
[toml-test](https://github.com/BurntSushi/toml-test) for the
[TOML encoder](https://github.com/BurntSushi/toml).
In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
Compatible with `toml-test` version
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)

View File

@ -0,0 +1,131 @@
// Command toml-test-encoder satisfies the toml-test interface for testing
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
package main
import (
"encoding/json"
"flag"
"log"
"os"
"path"
"strconv"
"time"
"github.com/khlieng/name_pending/Godeps/_workspace/src/github.com/BurntSushi/toml"
)
func init() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() != 0 {
flag.Usage()
}
var tmp interface{}
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
log.Fatalf("Error decoding JSON: %s", err)
}
tomlData := translate(tmp)
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
log.Fatalf("Error encoding TOML: %s", err)
}
}
func translate(typedJson interface{}) interface{} {
switch v := typedJson.(type) {
case map[string]interface{}:
if len(v) == 2 && in("type", v) && in("value", v) {
return untag(v)
}
m := make(map[string]interface{}, len(v))
for k, v2 := range v {
m[k] = translate(v2)
}
return m
case []interface{}:
tabArray := make([]map[string]interface{}, len(v))
for i := range v {
if m, ok := translate(v[i]).(map[string]interface{}); ok {
tabArray[i] = m
} else {
log.Fatalf("JSON arrays may only contain objects. This " +
"corresponds to only tables being allowed in " +
"TOML table arrays.")
}
}
return tabArray
}
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
panic("unreachable")
}
func untag(typed map[string]interface{}) interface{} {
t := typed["type"].(string)
v := typed["value"]
switch t {
case "string":
return v.(string)
case "integer":
v := v.(string)
n, err := strconv.Atoi(v)
if err != nil {
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
}
return n
case "float":
v := v.(string)
f, err := strconv.ParseFloat(v, 64)
if err != nil {
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
}
return f
case "datetime":
v := v.(string)
t, err := time.Parse("2006-01-02T15:04:05Z", v)
if err != nil {
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
}
return t
case "bool":
v := v.(string)
switch v {
case "true":
return true
case "false":
return false
}
log.Fatalf("Could not parse '%s' as a boolean.", v)
case "array":
v := v.([]interface{})
array := make([]interface{}, len(v))
for i := range v {
if m, ok := v[i].(map[string]interface{}); ok {
array[i] = untag(m)
} else {
log.Fatalf("Arrays may only contain other arrays or "+
"primitive values, but found a '%T'.", m)
}
}
return array
}
log.Fatalf("Unrecognized tag type '%s'.", t)
panic("unreachable")
}
func in(key string, m map[string]interface{}) bool {
_, ok := m[key]
return ok
}

View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@ -0,0 +1,22 @@
# TOML Validator
If Go is installed, it's simple to try it out:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
You can see the types of every key in a TOML file with:
```bash
tomlv -types some-toml-file.toml
```
At the moment, only one error message is reported at a time. Error messages
include line numbers. No output means that the files given are valid TOML, or
there is a bug in `tomlv`.
Compatible with TOML version
[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)

View File

@ -0,0 +1,61 @@
// Command tomlv validates TOML documents and prints each key's type.
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"strings"
"text/tabwriter"
"github.com/khlieng/name_pending/Godeps/_workspace/src/github.com/BurntSushi/toml"
)
var (
flagTypes = false
)
func init() {
log.SetFlags(0)
flag.BoolVar(&flagTypes, "types", flagTypes,
"When set, the types of every defined key will be shown.")
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() < 1 {
flag.Usage()
}
for _, f := range flag.Args() {
var tmp interface{}
md, err := toml.DecodeFile(f, &tmp)
if err != nil {
log.Fatalf("Error in '%s': %s", f, err)
}
if flagTypes {
printTypes(md)
}
}
}
func printTypes(md toml.MetaData) {
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
for _, key := range md.Keys() {
fmt.Fprintf(tabw, "%s%s\t%s\n",
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
}
tabw.Flush()
}

View File

@ -0,0 +1,492 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
var e = fmt.Errorf
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, rvalue(v))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("Unsupported type '%s'.", rv.Kind())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("Unsupported type '%s'.", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
return mismatch(rv, "map", mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return e("Type mismatch for '%s.%s': %s",
rv.Type().String(), f.name, err)
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("Field '%s.%s' is unexported, and therefore cannot "+
"be loaded with reflection.", rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
return badtype("slice", data)
}
sliceLen := datav.Len()
if rv.IsNil() {
rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("Value '%d' is out of range for int8.", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("Value '%d' is out of range for int16.", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("Value '%d' is out of range for int32.", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("Value '%d' is out of range for uint8.", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("Value '%d' is out of range for uint16.", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("Value '%d' is out of range for uint32.", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanAddr() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("Expected %s but found '%T'.", expected, data)
}
func mismatch(user reflect.Value, expected string, data interface{}) error {
return e("Type mismatch for %s. Expected %s but found '%T'.",
user.Type().String(), expected, data)
}

View File

@ -0,0 +1,122 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
} else {
return k[i]
}
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

View File

@ -0,0 +1,950 @@
package toml
import (
"fmt"
"log"
"reflect"
"testing"
"time"
)
func init() {
log.SetFlags(0)
}
func TestDecodeSimple(t *testing.T) {
var testSimple = `
age = 250
andrew = "gallant"
kait = "brady"
now = 1987-07-05T05:45:00Z
yesOrNo = true
pi = 3.14
colors = [
["red", "green", "blue"],
["cyan", "magenta", "yellow", "black"],
]
[My.Cats]
plato = "cat 1"
cauchy = "cat 2"
`
type cats struct {
Plato string
Cauchy string
}
type simple struct {
Age int
Colors [][]string
Pi float64
YesOrNo bool
Now time.Time
Andrew string
Kait string
My map[string]cats
}
var val simple
_, err := Decode(testSimple, &val)
if err != nil {
t.Fatal(err)
}
now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
if err != nil {
panic(err)
}
var answer = simple{
Age: 250,
Andrew: "gallant",
Kait: "brady",
Now: now,
YesOrNo: true,
Pi: 3.14,
Colors: [][]string{
{"red", "green", "blue"},
{"cyan", "magenta", "yellow", "black"},
},
My: map[string]cats{
"Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
},
}
if !reflect.DeepEqual(val, answer) {
t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
answer, val)
}
}
func TestDecodeEmbedded(t *testing.T) {
type Dog struct{ Name string }
type Age int
tests := map[string]struct {
input string
decodeInto interface{}
wantDecoded interface{}
}{
"embedded struct": {
input: `Name = "milton"`,
decodeInto: &struct{ Dog }{},
wantDecoded: &struct{ Dog }{Dog{"milton"}},
},
"embedded non-nil pointer to struct": {
input: `Name = "milton"`,
decodeInto: &struct{ *Dog }{},
wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
},
"embedded nil pointer to struct": {
input: ``,
decodeInto: &struct{ *Dog }{},
wantDecoded: &struct{ *Dog }{nil},
},
"embedded int": {
input: `Age = -5`,
decodeInto: &struct{ Age }{},
wantDecoded: &struct{ Age }{-5},
},
}
for label, test := range tests {
_, err := Decode(test.input, test.decodeInto)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
t.Errorf("%s: want decoded == %+v, got %+v",
label, test.wantDecoded, test.decodeInto)
}
}
}
func TestTableArrays(t *testing.T) {
var tomlTableArrays = `
[[albums]]
name = "Born to Run"
[[albums.songs]]
name = "Jungleland"
[[albums.songs]]
name = "Meeting Across the River"
[[albums]]
name = "Born in the USA"
[[albums.songs]]
name = "Glory Days"
[[albums.songs]]
name = "Dancing in the Dark"
`
type Song struct {
Name string
}
type Album struct {
Name string
Songs []Song
}
type Music struct {
Albums []Album
}
expected := Music{[]Album{
{"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
{"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
}}
var got Music
if _, err := Decode(tomlTableArrays, &got); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expected, got) {
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
}
}
// Case insensitive matching tests.
// A bit more comprehensive than needed given the current implementation,
// but implementations change.
// Probably still missing demonstrations of some ugly corner cases regarding
// case insensitive matching and multiple fields.
func TestCase(t *testing.T) {
var caseToml = `
tOpString = "string"
tOpInt = 1
tOpFloat = 1.1
tOpBool = true
tOpdate = 2006-01-02T15:04:05Z
tOparray = [ "array" ]
Match = "i should be in Match only"
MatcH = "i should be in MatcH only"
once = "just once"
[nEst.eD]
nEstedString = "another string"
`
type InsensitiveEd struct {
NestedString string
}
type InsensitiveNest struct {
Ed InsensitiveEd
}
type Insensitive struct {
TopString string
TopInt int
TopFloat float64
TopBool bool
TopDate time.Time
TopArray []string
Match string
MatcH string
Once string
OncE string
Nest InsensitiveNest
}
tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
if err != nil {
panic(err)
}
expected := Insensitive{
TopString: "string",
TopInt: 1,
TopFloat: 1.1,
TopBool: true,
TopDate: tme,
TopArray: []string{"array"},
MatcH: "i should be in MatcH only",
Match: "i should be in Match only",
Once: "just once",
OncE: "",
Nest: InsensitiveNest{
Ed: InsensitiveEd{NestedString: "another string"},
},
}
var got Insensitive
if _, err := Decode(caseToml, &got); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expected, got) {
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
}
}
func TestPointers(t *testing.T) {
type Object struct {
Type string
Description string
}
type Dict struct {
NamedObject map[string]*Object
BaseObject *Object
Strptr *string
Strptrs []*string
}
s1, s2, s3 := "blah", "abc", "def"
expected := &Dict{
Strptr: &s1,
Strptrs: []*string{&s2, &s3},
NamedObject: map[string]*Object{
"foo": {"FOO", "fooooo!!!"},
"bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
},
BaseObject: &Object{"BASE", "da base"},
}
ex1 := `
Strptr = "blah"
Strptrs = ["abc", "def"]
[NamedObject.foo]
Type = "FOO"
Description = "fooooo!!!"
[NamedObject.bar]
Type = "BAR"
Description = "ba-ba-ba-ba-barrrr!!!"
[BaseObject]
Type = "BASE"
Description = "da base"
`
dict := new(Dict)
_, err := Decode(ex1, dict)
if err != nil {
t.Errorf("Decode error: %v", err)
}
if !reflect.DeepEqual(expected, dict) {
t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
}
}
type sphere struct {
Center [3]float64
Radius float64
}
func TestDecodeSimpleArray(t *testing.T) {
var s1 sphere
if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
t.Fatal(err)
}
}
func TestDecodeArrayWrongSize(t *testing.T) {
var s1 sphere
if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
t.Fatal("Expected array type mismatch error")
}
}
func TestDecodeLargeIntoSmallInt(t *testing.T) {
type table struct {
Value int8
}
var tab table
if _, err := Decode(`value = 500`, &tab); err == nil {
t.Fatal("Expected integer out-of-bounds error.")
}
}
func TestDecodeSizedInts(t *testing.T) {
type table struct {
U8 uint8
U16 uint16
U32 uint32
U64 uint64
U uint
I8 int8
I16 int16
I32 int32
I64 int64
I int
}
answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
toml := `
u8 = 1
u16 = 1
u32 = 1
u64 = 1
u = 1
i8 = -1
i16 = -1
i32 = -1
i64 = -1
i = -1
`
var tab table
if _, err := Decode(toml, &tab); err != nil {
t.Fatal(err.Error())
}
if answer != tab {
t.Fatalf("Expected %#v but got %#v", answer, tab)
}
}
func TestUnmarshaler(t *testing.T) {
var tomlBlob = `
[dishes.hamboogie]
name = "Hamboogie with fries"
price = 10.99
[[dishes.hamboogie.ingredients]]
name = "Bread Bun"
[[dishes.hamboogie.ingredients]]
name = "Lettuce"
[[dishes.hamboogie.ingredients]]
name = "Real Beef Patty"
[[dishes.hamboogie.ingredients]]
name = "Tomato"
[dishes.eggsalad]
name = "Egg Salad with rice"
price = 3.99
[[dishes.eggsalad.ingredients]]
name = "Egg"
[[dishes.eggsalad.ingredients]]
name = "Mayo"
[[dishes.eggsalad.ingredients]]
name = "Rice"
`
m := &menu{}
if _, err := Decode(tomlBlob, m); err != nil {
log.Fatal(err)
}
if len(m.Dishes) != 2 {
t.Log("two dishes should be loaded with UnmarshalTOML()")
t.Errorf("expected %d but got %d", 2, len(m.Dishes))
}
eggSalad := m.Dishes["eggsalad"]
if _, ok := interface{}(eggSalad).(dish); !ok {
t.Errorf("expected a dish")
}
if eggSalad.Name != "Egg Salad with rice" {
t.Errorf("expected the dish to be named 'Egg Salad with rice'")
}
if len(eggSalad.Ingredients) != 3 {
t.Log("dish should be loaded with UnmarshalTOML()")
t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients))
}
found := false
for _, i := range eggSalad.Ingredients {
if i.Name == "Rice" {
found = true
break
}
}
if !found {
t.Error("Rice was not loaded in UnmarshalTOML()")
}
// test on a value - must be passed as *
o := menu{}
if _, err := Decode(tomlBlob, &o); err != nil {
log.Fatal(err)
}
}
type menu struct {
Dishes map[string]dish
}
func (m *menu) UnmarshalTOML(p interface{}) error {
m.Dishes = make(map[string]dish)
data, _ := p.(map[string]interface{})
dishes := data["dishes"].(map[string]interface{})
for n, v := range dishes {
if d, ok := v.(map[string]interface{}); ok {
nd := dish{}
nd.UnmarshalTOML(d)
m.Dishes[n] = nd
} else {
return fmt.Errorf("not a dish")
}
}
return nil
}
type dish struct {
Name string
Price float32
Ingredients []ingredient
}
func (d *dish) UnmarshalTOML(p interface{}) error {
data, _ := p.(map[string]interface{})
d.Name, _ = data["name"].(string)
d.Price, _ = data["price"].(float32)
ingredients, _ := data["ingredients"].([]map[string]interface{})
for _, e := range ingredients {
n, _ := interface{}(e).(map[string]interface{})
name, _ := n["name"].(string)
i := ingredient{name}
d.Ingredients = append(d.Ingredients, i)
}
return nil
}
type ingredient struct {
Name string
}
func ExampleMetaData_PrimitiveDecode() {
var md MetaData
var err error
var tomlBlob = `
ranking = ["Springsteen", "J Geils"]
[bands.Springsteen]
started = 1973
albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
[bands."J Geils"]
started = 1970
albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
`
type band struct {
Started int
Albums []string
}
type classics struct {
Ranking []string
Bands map[string]Primitive
}
// Do the initial decode. Reflection is delayed on Primitive values.
var music classics
if md, err = Decode(tomlBlob, &music); err != nil {
log.Fatal(err)
}
// MetaData still includes information on Primitive values.
fmt.Printf("Is `bands.Springsteen` defined? %v\n",
md.IsDefined("bands", "Springsteen"))
// Decode primitive data into Go values.
for _, artist := range music.Ranking {
// A band is a primitive value, so we need to decode it to get a
// real `band` value.
primValue := music.Bands[artist]
var aBand band
if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
log.Fatal(err)
}
fmt.Printf("%s started in %d.\n", artist, aBand.Started)
}
// Check to see if there were any fields left undecoded.
// Note that this won't be empty before decoding the Primitive value!
fmt.Printf("Undecoded: %q\n", md.Undecoded())
// Output:
// Is `bands.Springsteen` defined? true
// Springsteen started in 1973.
// J Geils started in 1970.
// Undecoded: []
}
func ExampleDecode() {
var tomlBlob = `
# Some comments.
[alpha]
ip = "10.0.0.1"
[alpha.config]
Ports = [ 8001, 8002 ]
Location = "Toronto"
Created = 1987-07-05T05:45:00Z
[beta]
ip = "10.0.0.2"
[beta.config]
Ports = [ 9001, 9002 ]
Location = "New Jersey"
Created = 1887-01-05T05:55:00Z
`
type serverConfig struct {
Ports []int
Location string
Created time.Time
}
type server struct {
IP string `toml:"ip"`
Config serverConfig `toml:"config"`
}
type servers map[string]server
var config servers
if _, err := Decode(tomlBlob, &config); err != nil {
log.Fatal(err)
}
for _, name := range []string{"alpha", "beta"} {
s := config[name]
fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
name, s.IP, s.Config.Location,
s.Config.Created.Format("2006-01-02"))
fmt.Printf("Ports: %v\n", s.Config.Ports)
}
// Output:
// Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
// Ports: [8001 8002]
// Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
// Ports: [9001 9002]
}
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
// Example Unmarshaler shows how to decode TOML strings into your own
// custom data type.
func Example_unmarshaler() {
blob := `
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
`
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
// Code to implement the TextUnmarshaler interface for `duration`:
//
// type duration struct {
// time.Duration
// }
//
// func (d *duration) UnmarshalText(text []byte) error {
// var err error
// d.Duration, err = time.ParseDuration(string(text))
// return err
// }
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
// Output:
// Thunder Road (4m49s)
// Stairway to Heaven (8m3s)
}
// Example StrictDecoding shows how to detect whether there are keys in the
// TOML document that weren't decoded into the value given. This is useful
// for returning an error to the user if they've included extraneous fields
// in their configuration.
func Example_strictDecoding() {
var blob = `
key1 = "value1"
key2 = "value2"
key3 = "value3"
`
type config struct {
Key1 string
Key3 string
}
var conf config
md, err := Decode(blob, &conf)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
// Output:
// Undecoded keys: ["key2"]
}
// Example UnmarshalTOML shows how to implement a struct type that knows how to
// unmarshal itself. The struct must take full responsibility for mapping the
// values passed into the struct. The method may be used with interfaces in a
// struct in cases where the actual type is not known until the data is
// examined.
func Example_unmarshalTOML() {
var blob = `
[[parts]]
type = "valve"
id = "valve-1"
size = 1.2
rating = 4
[[parts]]
type = "valve"
id = "valve-2"
size = 2.1
rating = 5
[[parts]]
type = "pipe"
id = "pipe-1"
length = 2.1
diameter = 12
[[parts]]
type = "cable"
id = "cable-1"
length = 12
rating = 3.1
`
o := &order{}
err := Unmarshal([]byte(blob), o)
if err != nil {
log.Fatal(err)
}
fmt.Println(len(o.parts))
for _, part := range o.parts {
fmt.Println(part.Name())
}
// Code to implement UmarshalJSON.
// type order struct {
// // NOTE `order.parts` is a private slice of type `part` which is an
// // interface and may only be loaded from toml using the
// // UnmarshalTOML() method of the Umarshaler interface.
// parts parts
// }
// func (o *order) UnmarshalTOML(data interface{}) error {
// // NOTE the example below contains detailed type casting to show how
// // the 'data' is retrieved. In operational use, a type cast wrapper
// // may be prefered e.g.
// //
// // func AsMap(v interface{}) (map[string]interface{}, error) {
// // return v.(map[string]interface{})
// // }
// //
// // resulting in:
// // d, _ := AsMap(data)
// //
// d, _ := data.(map[string]interface{})
// parts, _ := d["parts"].([]map[string]interface{})
// for _, p := range parts {
// typ, _ := p["type"].(string)
// id, _ := p["id"].(string)
// // detect the type of part and handle each case
// switch p["type"] {
// case "valve":
// size := float32(p["size"].(float64))
// rating := int(p["rating"].(int64))
// valve := &valve{
// Type: typ,
// ID: id,
// Size: size,
// Rating: rating,
// }
// o.parts = append(o.parts, valve)
// case "pipe":
// length := float32(p["length"].(float64))
// diameter := int(p["diameter"].(int64))
// pipe := &pipe{
// Type: typ,
// ID: id,
// Length: length,
// Diameter: diameter,
// }
// o.parts = append(o.parts, pipe)
// case "cable":
// length := int(p["length"].(int64))
// rating := float32(p["rating"].(float64))
// cable := &cable{
// Type: typ,
// ID: id,
// Length: length,
// Rating: rating,
// }
// o.parts = append(o.parts, cable)
// }
// }
// return nil
// }
// type parts []part
// type part interface {
// Name() string
// }
// type valve struct {
// Type string
// ID string
// Size float32
// Rating int
// }
// func (v *valve) Name() string {
// return fmt.Sprintf("VALVE: %s", v.ID)
// }
// type pipe struct {
// Type string
// ID string
// Length float32
// Diameter int
// }
// func (p *pipe) Name() string {
// return fmt.Sprintf("PIPE: %s", p.ID)
// }
// type cable struct {
// Type string
// ID string
// Length int
// Rating float32
// }
// func (c *cable) Name() string {
// return fmt.Sprintf("CABLE: %s", c.ID)
// }
// Output:
// 4
// VALVE: valve-1
// VALVE: valve-2
// PIPE: pipe-1
// CABLE: cable-1
}
type order struct {
// NOTE `order.parts` is a private slice of type `part` which is an
// interface and may only be loaded from toml using the UnmarshalTOML()
// method of the Umarshaler interface.
parts parts
}
func (o *order) UnmarshalTOML(data interface{}) error {
// NOTE the example below contains detailed type casting to show how
// the 'data' is retrieved. In operational use, a type cast wrapper
// may be prefered e.g.
//
// func AsMap(v interface{}) (map[string]interface{}, error) {
// return v.(map[string]interface{})
// }
//
// resulting in:
// d, _ := AsMap(data)
//
d, _ := data.(map[string]interface{})
parts, _ := d["parts"].([]map[string]interface{})
for _, p := range parts {
typ, _ := p["type"].(string)
id, _ := p["id"].(string)
// detect the type of part and handle each case
switch p["type"] {
case "valve":
size := float32(p["size"].(float64))
rating := int(p["rating"].(int64))
valve := &valve{
Type: typ,
ID: id,
Size: size,
Rating: rating,
}
o.parts = append(o.parts, valve)
case "pipe":
length := float32(p["length"].(float64))
diameter := int(p["diameter"].(int64))
pipe := &pipe{
Type: typ,
ID: id,
Length: length,
Diameter: diameter,
}
o.parts = append(o.parts, pipe)
case "cable":
length := int(p["length"].(int64))
rating := float32(p["rating"].(float64))
cable := &cable{
Type: typ,
ID: id,
Length: length,
Rating: rating,
}
o.parts = append(o.parts, cable)
}
}
return nil
}
type parts []part
type part interface {
Name() string
}
type valve struct {
Type string
ID string
Size float32
Rating int
}
func (v *valve) Name() string {
return fmt.Sprintf("VALVE: %s", v.ID)
}
type pipe struct {
Type string
ID string
Length float32
Diameter int
}
func (p *pipe) Name() string {
return fmt.Sprintf("PIPE: %s", p.ID)
}
type cable struct {
Type string
ID string
Length int
Rating float32
}
func (c *cable) Name() string {
return fmt.Sprintf("CABLE: %s", c.ID)
}

View File

@ -0,0 +1,27 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/mojombo/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

View File

@ -0,0 +1,551 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"can't encode array with mixed element types")
errArrayNilElement = errors.New(
"can't encode array with nil element")
errNonString = errors.New(
"can't encode a map with non-string key type")
errAnonNonStruct = errors.New(
"can't encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"TOML array element can't contain a table")
errNoKey = errors.New(
"top-level values must be a Go map or struct")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("Unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("Unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra new line between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexporded fields
if f.PkgPath != "" {
continue
}
frv := rv.Field(i)
if f.Anonymous {
frv := eindirect(frv)
t := frv.Type()
if t.Kind() != reflect.Struct {
encPanic(errAnonNonStruct)
}
addFields(t, frv, f.Index)
} else if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
keyName := sft.Tag.Get("toml")
if keyName == "-" {
continue
}
if keyName == "" {
keyName = sft.Name
}
keyName, opts := getOptions(keyName)
if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
continue
} else if _, ok := opts["omitzero"]; ok && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
} else {
return tomlArray
}
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
func getOptions(keyName string) (string, map[string]struct{}) {
opts := make(map[string]struct{})
ss := strings.Split(keyName, ",")
name := ss[0]
if len(ss) > 1 {
for _, opt := range ss {
opts[opt] = struct{}{}
}
}
return name, opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if rv.Int() == 0 {
return true
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if rv.Uint() == 0 {
return true
}
case reflect.Float32, reflect.Float64:
if rv.Float() == 0.0 {
return true
}
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.String:
if len(strings.TrimSpace(rv.String())) == 0 {
return true
}
case reflect.Array, reflect.Slice, reflect.Map:
if rv.Len() == 0 {
return true
}
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View File

@ -0,0 +1,542 @@
package toml
import (
"bytes"
"fmt"
"log"
"net"
"testing"
"time"
)
func TestEncodeRoundTrip(t *testing.T) {
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time
Ipaddress net.IP
}
var inputs = Config{
13,
[]string{"one", "two", "three"},
3.145,
[]int{11, 2, 3, 4},
time.Now(),
net.ParseIP("192.168.59.254"),
}
var firstBuffer bytes.Buffer
e := NewEncoder(&firstBuffer)
err := e.Encode(inputs)
if err != nil {
t.Fatal(err)
}
var outputs Config
if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
log.Printf("Could not decode:\n-----\n%s\n-----\n",
firstBuffer.String())
t.Fatal(err)
}
// could test each value individually, but I'm lazy
var secondBuffer bytes.Buffer
e2 := NewEncoder(&secondBuffer)
err = e2.Encode(outputs)
if err != nil {
t.Fatal(err)
}
if firstBuffer.String() != secondBuffer.String() {
t.Error(
firstBuffer.String(),
"\n\n is not identical to\n\n",
secondBuffer.String())
}
}
// XXX(burntsushi)
// I think these tests probably should be removed. They are good, but they
// ought to be obsolete by toml-test.
func TestEncode(t *testing.T) {
type Embedded struct {
Int int `toml:"_int"`
}
type NonStruct int
date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
dateStr := "2014-05-11T19:30:40Z"
tests := map[string]struct {
input interface{}
wantOutput string
wantError error
}{
"bool field": {
input: struct {
BoolTrue bool
BoolFalse bool
}{true, false},
wantOutput: "BoolTrue = true\nBoolFalse = false\n",
},
"int fields": {
input: struct {
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
}{1, 2, 3, 4, 5},
wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
},
"uint fields": {
input: struct {
Uint uint
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
}{1, 2, 3, 4, 5},
wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
"\nUint64 = 5\n",
},
"float fields": {
input: struct {
Float32 float32
Float64 float64
}{1.5, 2.5},
wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
},
"string field": {
input: struct{ String string }{"foo"},
wantOutput: "String = \"foo\"\n",
},
"string field and unexported field": {
input: struct {
String string
unexported int
}{"foo", 0},
wantOutput: "String = \"foo\"\n",
},
"datetime field in UTC": {
input: struct{ Date time.Time }{date},
wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
},
"datetime field as primitive": {
// Using a map here to fail if isStructOrMap() returns true for
// time.Time.
input: map[string]interface{}{
"Date": date,
"Int": 1,
},
wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
},
"array fields": {
input: struct {
IntArray0 [0]int
IntArray3 [3]int
}{[0]int{}, [3]int{1, 2, 3}},
wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
},
"slice fields": {
input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
nil, []int{}, []int{1, 2, 3},
},
wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
},
"datetime slices": {
input: struct{ DatetimeSlice []time.Time }{
[]time.Time{date, date},
},
wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
dateStr, dateStr),
},
"nested arrays and slices": {
input: struct {
SliceOfArrays [][2]int
ArrayOfSlices [2][]int
SliceOfArraysOfSlices [][2][]int
ArrayOfSlicesOfArrays [2][][2]int
SliceOfMixedArrays [][2]interface{}
ArrayOfMixedSlices [2][]interface{}
}{
[][2]int{{1, 2}, {3, 4}},
[2][]int{{1, 2}, {3, 4}},
[][2][]int{
{
{1, 2}, {3, 4},
},
{
{5, 6}, {7, 8},
},
},
[2][][2]int{
{
{1, 2}, {3, 4},
},
{
{5, 6}, {7, 8},
},
},
[][2]interface{}{
{1, 2}, {"a", "b"},
},
[2][]interface{}{
{1, 2}, {"a", "b"},
},
},
wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
ArrayOfSlices = [[1, 2], [3, 4]]
SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
SliceOfMixedArrays = [[1, 2], ["a", "b"]]
ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
`,
},
"empty slice": {
input: struct{ Empty []interface{} }{[]interface{}{}},
wantOutput: "Empty = []\n",
},
"(error) slice with element type mismatch (string and integer)": {
input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
wantError: errArrayMixedElementTypes,
},
"(error) slice with element type mismatch (integer and float)": {
input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
wantError: errArrayMixedElementTypes,
},
"slice with elems of differing Go types, same TOML types": {
input: struct {
MixedInts []interface{}
MixedFloats []interface{}
}{
[]interface{}{
int(1), int8(2), int16(3), int32(4), int64(5),
uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
},
[]interface{}{float32(1.5), float64(2.5)},
},
wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
"MixedFloats = [1.5, 2.5]\n",
},
"(error) slice w/ element type mismatch (one is nested array)": {
input: struct{ Mixed []interface{} }{
[]interface{}{1, []interface{}{2}},
},
wantError: errArrayMixedElementTypes,
},
"(error) slice with 1 nil element": {
input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
wantError: errArrayNilElement,
},
"(error) slice with 1 nil element (and other non-nil elements)": {
input: struct{ NilElement []interface{} }{
[]interface{}{1, nil},
},
wantError: errArrayNilElement,
},
"simple map": {
input: map[string]int{"a": 1, "b": 2},
wantOutput: "a = 1\nb = 2\n",
},
"map with interface{} value type": {
input: map[string]interface{}{"a": 1, "b": "c"},
wantOutput: "a = 1\nb = \"c\"\n",
},
"map with interface{} value type, some of which are structs": {
input: map[string]interface{}{
"a": struct{ Int int }{2},
"b": 1,
},
wantOutput: "b = 1\n\n[a]\n Int = 2\n",
},
"nested map": {
input: map[string]map[string]int{
"a": {"b": 1},
"c": {"d": 2},
},
wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
},
"nested struct": {
input: struct{ Struct struct{ Int int } }{
struct{ Int int }{1},
},
wantOutput: "[Struct]\n Int = 1\n",
},
"nested struct and non-struct field": {
input: struct {
Struct struct{ Int int }
Bool bool
}{struct{ Int int }{1}, true},
wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
},
"2 nested structs": {
input: struct{ Struct1, Struct2 struct{ Int int } }{
struct{ Int int }{1}, struct{ Int int }{2},
},
wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
},
"deeply nested structs": {
input: struct {
Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
}{
struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
struct{ Struct3 *struct{ Int int } }{nil},
},
wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
"\n\n[Struct2]\n",
},
"nested struct with nil struct elem": {
input: struct {
Struct struct{ Inner *struct{ Int int } }
}{
struct{ Inner *struct{ Int int } }{nil},
},
wantOutput: "[Struct]\n",
},
"nested struct with no fields": {
input: struct {
Struct struct{ Inner struct{} }
}{
struct{ Inner struct{} }{struct{}{}},
},
wantOutput: "[Struct]\n [Struct.Inner]\n",
},
"struct with tags": {
input: struct {
Struct struct {
Int int `toml:"_int"`
} `toml:"_struct"`
Bool bool `toml:"_bool"`
}{
struct {
Int int `toml:"_int"`
}{1}, true,
},
wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
},
"embedded struct": {
input: struct{ Embedded }{Embedded{1}},
wantOutput: "_int = 1\n",
},
"embedded *struct": {
input: struct{ *Embedded }{&Embedded{1}},
wantOutput: "_int = 1\n",
},
"nested embedded struct": {
input: struct {
Struct struct{ Embedded } `toml:"_struct"`
}{struct{ Embedded }{Embedded{1}}},
wantOutput: "[_struct]\n _int = 1\n",
},
"nested embedded *struct": {
input: struct {
Struct struct{ *Embedded } `toml:"_struct"`
}{struct{ *Embedded }{&Embedded{1}}},
wantOutput: "[_struct]\n _int = 1\n",
},
"array of tables": {
input: struct {
Structs []*struct{ Int int } `toml:"struct"`
}{
[]*struct{ Int int }{{1}, {3}},
},
wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
},
"array of tables order": {
input: map[string]interface{}{
"map": map[string]interface{}{
"zero": 5,
"arr": []map[string]int{
map[string]int{
"friend": 5,
},
},
},
},
wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
},
"(error) top-level slice": {
input: []struct{ Int int }{{1}, {2}, {3}},
wantError: errNoKey,
},
"(error) slice of slice": {
input: struct {
Slices [][]struct{ Int int }
}{
[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
},
wantError: errArrayNoTable,
},
"(error) map no string key": {
input: map[int]string{1: ""},
wantError: errNonString,
},
"(error) anonymous non-struct": {
input: struct{ NonStruct }{5},
wantError: errAnonNonStruct,
},
"(error) empty key name": {
input: map[string]int{"": 1},
wantError: errAnything,
},
"(error) empty map name": {
input: map[string]interface{}{
"": map[string]int{"v": 1},
},
wantError: errAnything,
},
}
for label, test := range tests {
encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
}
}
func TestEncodeNestedTableArrays(t *testing.T) {
type song struct {
Name string `toml:"name"`
}
type album struct {
Name string `toml:"name"`
Songs []song `toml:"songs"`
}
type springsteen struct {
Albums []album `toml:"albums"`
}
value := springsteen{
[]album{
{"Born to Run",
[]song{{"Jungleland"}, {"Meeting Across the River"}}},
{"Born in the USA",
[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
},
}
expected := `[[albums]]
name = "Born to Run"
[[albums.songs]]
name = "Jungleland"
[[albums.songs]]
name = "Meeting Across the River"
[[albums]]
name = "Born in the USA"
[[albums.songs]]
name = "Glory Days"
[[albums.songs]]
name = "Dancing in the Dark"
`
encodeExpected(t, "nested table arrays", value, expected, nil)
}
func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
type Alpha struct {
V int
}
type Beta struct {
V int
}
type Conf struct {
V int
A Alpha
B []Beta
}
val := Conf{
V: 1,
A: Alpha{2},
B: []Beta{{3}},
}
expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
encodeExpected(t, "array hash with normal hash order", val, expected, nil)
}
func TestEncodeWithOmitEmpty(t *testing.T) {
type simple struct {
User string `toml:"user"`
Pass string `toml:"password,omitempty"`
}
value := simple{"Testing", ""}
expected := fmt.Sprintf("user = %q\n", value.User)
encodeExpected(t, "simple with omitempty, is empty", value, expected, nil)
value.Pass = "some password"
expected = fmt.Sprintf("user = %q\npassword = %q\n", value.User, value.Pass)
encodeExpected(t, "simple with omitempty, not empty", value, expected, nil)
}
func TestEncodeWithOmitZero(t *testing.T) {
type simple struct {
Number int `toml:"number,omitzero"`
Real float64 `toml:"real,omitzero"`
Unsigned uint `toml:"unsigned,omitzero"`
}
value := simple{0, 0.0, uint(0)}
expected := ""
encodeExpected(t, "simple with omitzero, all zero", value, expected, nil)
value.Number = 10
value.Real = 20
value.Unsigned = 5
expected = `number = 10
real = 20.0
unsigned = 5
`
encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil)
}
func encodeExpected(
t *testing.T, label string, val interface{}, wantStr string, wantErr error,
) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
err := enc.Encode(val)
if err != wantErr {
if wantErr != nil {
if wantErr == errAnything && err != nil {
return
}
t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
} else {
t.Errorf("%s: Encode failed: %s", label, err)
}
}
if err != nil {
return
}
if got := buf.String(); wantStr != got {
t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
label, wantStr, got)
}
}
func ExampleEncoder_Encode() {
date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
var config = map[string]interface{}{
"date": date,
"counts": []int{1, 1, 2, 3, 5, 8},
"hash": map[string]string{
"key1": "val1",
"key2": "val2",
},
}
buf := new(bytes.Buffer)
if err := NewEncoder(buf).Encode(config); err != nil {
log.Fatal(err)
}
fmt.Println(buf.String())
// Output:
// counts = [1, 1, 2, 3, 5, 8]
// date = 2010-03-14T18:00:00Z
//
// [hash]
// key1 = "val1"
// key2 = "val2"
}

View File

@ -0,0 +1,19 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View File

@ -0,0 +1,18 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

874
Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go generated vendored Normal file
View File

@ -0,0 +1,874 @@
package toml
import (
"fmt"
"strings"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
)
const (
eof = 0
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
arrayValTerm = ','
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
width int
line int
state stateFn
items chan item
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input + "\n",
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop.")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.pos >= len(lx.input) {
lx.width = 0
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.pos += lx.width
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only once per call of next.
func (lx *lexer) backup() {
lx.pos -= lx.width
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (new lines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("Unexpected EOF.")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a new line. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a new line for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.ignore()
return lexTop
}
return lx.errorf("Expected a top-level item to end with a new line, "+
"comment or EOF, but got %q instead.", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("Expected end of table array name delimiter %q, "+
"but got %q instead.", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("Unexpected end of table name. (Table names cannot " +
"be empty.)")
case r == tableSep:
return lx.errorf("Unexpected table separator. (Table names cannot " +
"be empty.)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
case isWhitespace(r):
return lexTableNameStart
default:
return lexBareTableName
}
}
// lexTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareTableName
case r == tableSep || r == tableEnd:
lx.backup()
lx.emitTrim(itemText)
return lexTableNameEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
}
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
"instead.", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("Unexpected key separator %q.", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.emitTrim(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emitTrim(itemText)
return lexKeyEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("Expected key separator %q, but got %q instead.",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT new lines.
// In array syntax, the array states are responsible for ignoring new
// lines.
r := lx.next()
if isWhitespace(r) {
return lexSkip(lx, lexValue)
}
switch {
case r == arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case r == stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case r == rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case r == 't':
return lexTrue
case r == 'f':
return lexFalse
case r == '-':
return lexNumberStart
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
case r == '.': // special error case, be kind to users
return lx.errorf("Floats must start with a digit, not '.'.")
}
return lx.errorf("Expected value but found %q instead.", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and new lines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == arrayValTerm:
return lx.errorf("Unexpected array value terminator %q.",
arrayValTerm)
case r == arrayEnd:
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
// it ignores whitespace and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == arrayValTerm:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf("Expected an array value terminator %q or an array "+
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
}
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
// just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == '\\':
return lexMultilineStringEscape
case r == stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
lx.next()
return lexMultilineString
} else {
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("Invalid escape character %q. Only the following "+
"escape characters are allowed: "+
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
"\\uXXXX and \\UXXXXXXXX.", r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
"but got '%s' instead.", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
"but got '%s' instead.", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either a (positive) integer, float or
// datetime. It assumes that NO negative sign has been consumed.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.")
} else {
return lx.errorf("Expected a digit but got %q.", r)
}
}
return lexNumberOrDate
}
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
switch {
case r == '-':
if lx.pos-lx.start != 5 {
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
}
return lexDateAfterYear
case isDigit(r):
return lexNumberOrDate
case r == '.':
return lexFloatStart
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
// It assumes that "YYYY-" has already been consumed.
func lexDateAfterYear(lx *lexer) stateFn {
formats := []rune{
// digits are '0'.
// everything else is direct equality.
'0', '0', '-', '0', '0',
'T',
'0', '0', ':', '0', '0', ':', '0', '0',
'Z',
}
for _, f := range formats {
r := lx.next()
if f == '0' {
if !isDigit(r) {
return lx.errorf("Expected digit in ISO8601 datetime, "+
"but found %q instead.", r)
}
} else if f != r {
return lx.errorf("Expected %q in ISO8601 datetime, "+
"but found %q instead.", f, r)
}
}
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that
// a negative sign has already been read, but that *no* digits have been
// consumed. lexNumberStart will move to the appropriate integer or float
// states.
func lexNumberStart(lx *lexer) stateFn {
// we MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.")
} else {
return lx.errorf("Expected a digit but got %q.", r)
}
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
switch {
case isDigit(r):
return lexNumber
case r == '.':
return lexFloatStart
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloatStart starts the consumption of digits of a float after a '.'.
// Namely, at least one digit is required.
func lexFloatStart(lx *lexer) stateFn {
r := lx.next()
if !isDigit(r) {
return lx.errorf("Floats must have a digit after the '.', but got "+
"%q instead.", r)
}
return lexFloat
}
// lexFloat consumes the digits of a float after a '.'.
// Assumes that one digit has been consumed after a '.' already.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
// consumed.
func lexConst(lx *lexer, s string) stateFn {
for i := range s[1:] {
if r := lx.next(); r != rune(s[i+1]) {
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
s[:i]+string(r))
}
}
return nil
}
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
// been consumed.
func lexTrue(lx *lexer) stateFn {
if fn := lexConst(lx, "true"); fn != nil {
return fn
}
lx.emit(itemBool)
return lx.pop()
}
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
// been consumed.
func lexFalse(lx *lexer) stateFn {
if fn := lexConst(lx, "false"); fn != nil {
return fn
}
lx.emit(itemBool)
return lx.pop()
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first new line character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString:
return "String"
case itemRawString:
return "String"
case itemMultilineString:
return "String"
case itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

View File

@ -0,0 +1,498 @@
package toml
import (
"fmt"
"log"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
num, err := strconv.ParseInt(it.val, 10, 64)
if err != nil {
// See comment below for floats describing why we make a
// distinction between a bug and a user error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
num, err := strconv.ParseFloat(it.val, 64)
if err != nil {
// Distinguish float values. Normally, it'd be a bug if the lexer
// provides an invalid float, but it's possible that the float is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
//
// This is also true for integers.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.bug("Expected float value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
if err != nil {
p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:len(s)]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
// BUG(burntsushi)
// I honestly don't understand how this works. I can't seem
// to find a way to make this fail. I figured this would fail on invalid
// UTF-8 characters like U+DCFF, but it doesn't.
if !utf8.ValidString(string(rune(hex))) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View File

@ -0,0 +1 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View File

@ -0,0 +1,91 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View File

@ -0,0 +1,241 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
name := sf.Tag.Get("toml")
if name == "-" {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

View File

@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -0,0 +1,42 @@
consul-api
==========
*DEPRECATED* Please use [consul api package](https://github.com/hashicorp/consul/tree/master/api) instead.
Godocs for that package [are here](http://godoc.org/github.com/hashicorp/consul/api).
This package provides the `consulapi` package which attempts to
provide programmatic access to the full Consul API.
Currently, all of the Consul APIs included in version 0.4 are supported.
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/consul-api)
Usage
=====
Below is an example of using the Consul client:
```go
// Get a new client, with KV endpoints
client, _ := consulapi.NewClient(consulapi.DefaultConfig())
kv := client.KV()
// PUT a new KV pair
p := &consulapi.KVPair{Key: "foo", Value: []byte("test")}
_, err := kv.Put(p, nil)
if err != nil {
panic(err)
}
// Lookup the pair
pair, _, err := kv.Get("foo", nil)
if err != nil {
panic(err)
}
fmt.Printf("KV: %v", pair)
```

View File

@ -0,0 +1,140 @@
package consulapi
const (
// ACLCLientType is the client type token
ACLClientType = "client"
// ACLManagementType is the management type token
ACLManagementType = "management"
)
// ACLEntry is used to represent an ACL entry
type ACLEntry struct {
CreateIndex uint64
ModifyIndex uint64
ID string
Name string
Type string
Rules string
}
// ACL can be used to query the ACL endpoints
type ACL struct {
c *Client
}
// ACL returns a handle to the ACL endpoints
func (c *Client) ACL() *ACL {
return &ACL{c}
}
// Create is used to generate a new token with the given parameters
func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/create")
r.setWriteOptions(q)
r.obj = acl
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Update is used to update the rules of an existing token
func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/update")
r.setWriteOptions(q)
r.obj = acl
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Destroy is used to destroy a given ACL token ID
func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Clone is used to return a new token cloned from an existing one
func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Info is used to query for information about an ACL token
func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/info/"+id)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to get all the ACL tokens
func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/list")
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View File

@ -0,0 +1,140 @@
package consulapi
import (
"os"
"testing"
)
// ROOT is a management token for the tests
var CONSUL_ROOT string
func init() {
CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
}
func TestACL_CreateDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae := ACLEntry{
Name: "API test",
Type: ACLClientType,
Rules: `key "" { policy = "deny" }`,
}
id, wm, err := acl.Create(&ae, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
ae2, _, err := acl.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
t.Fatalf("Bad: %#v", ae2)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_CloneDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
id, wm, err := acl.Clone(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_Info(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae, qm, err := acl.Info(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
t.Fatalf("bad: %#v", ae)
}
}
func TestACL_List(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
acls, qm, err := acl.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(acls) < 2 {
t.Fatalf("bad: %v", acls)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}

View File

@ -0,0 +1,272 @@
package consulapi
import (
"fmt"
)
// AgentCheck represents a check known to the agent
type AgentCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
}
// AgentService represents a service known to the agent
type AgentService struct {
ID string
Service string
Tags []string
Port int
}
// AgentMember represents a cluster member known to the agent
type AgentMember struct {
Name string
Addr string
Port uint16
Tags map[string]string
Status int
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// AgentServiceRegistration is used to register a new service
type AgentServiceRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Check *AgentServiceCheck
}
// AgentCheckRegistration is used to register a new check
type AgentCheckRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Notes string `json:",omitempty"`
AgentServiceCheck
}
// AgentServiceCheck is used to create an associated
// check for a service
type AgentServiceCheck struct {
Script string `json:",omitempty"`
Interval string `json:",omitempty"`
TTL string `json:",omitempty"`
}
// Agent can be used to query the Agent endpoints
type Agent struct {
c *Client
// cache the node name
nodeName string
}
// Agent returns a handle to the agent endpoints
func (c *Client) Agent() *Agent {
return &Agent{c: c}
}
// Self is used to query the agent we are speaking to for
// information about itself
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
r := a.c.newRequest("GET", "/v1/agent/self")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// NodeName is used to get the node name of the agent
func (a *Agent) NodeName() (string, error) {
if a.nodeName != "" {
return a.nodeName, nil
}
info, err := a.Self()
if err != nil {
return "", err
}
name := info["Config"]["NodeName"].(string)
a.nodeName = name
return name, nil
}
// Checks returns the locally registered checks
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentCheck
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Services returns the locally registered services
func (a *Agent) Services() (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentService
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Members returns the known gossip members. The WAN
// flag can be used to query a server for WAN members.
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
r := a.c.newRequest("GET", "/v1/agent/members")
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// ServiceRegister is used to register a new service with
// the local agent
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/service/register")
r.obj = service
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ServiceDeregister is used to deregister a service with
// the local agent
func (a *Agent) ServiceDeregister(serviceID string) error {
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// PassTTL is used to set a TTL check to the passing state
func (a *Agent) PassTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "pass")
}
// WarnTTL is used to set a TTL check to the warning state
func (a *Agent) WarnTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "warn")
}
// FailTTL is used to set a TTL check to the failing state
func (a *Agent) FailTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "fail")
}
// UpdateTTL is used to update the TTL of a check
func (a *Agent) UpdateTTL(checkID, note, status string) error {
switch status {
case "pass":
case "warn":
case "fail":
default:
return fmt.Errorf("Invalid status: %s", status)
}
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
r := a.c.newRequest("PUT", endpoint)
r.params.Set("note", note)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckRegister is used to register a new check with
// the local agent
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/check/register")
r.obj = check
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckDeregister is used to deregister a check with
// the local agent
func (a *Agent) CheckDeregister(checkID string) error {
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Join is used to instruct the agent to attempt a join to
// another cluster member
func (a *Agent) Join(addr string, wan bool) error {
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ForceLeave is used to have the agent eject a failed node
func (a *Agent) ForceLeave(node string) error {
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -0,0 +1,162 @@
package consulapi
import (
"testing"
)
func TestAgent_Self(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"]
if name == "" {
t.Fatalf("bad: %v", info)
}
}
func TestAgent_Members(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
members, err := agent.Members(false)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(members) != 1 {
t.Fatalf("bad: %v", members)
}
}
func TestAgent_Services(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Tags: []string{"bar", "baz"},
Port: 8000,
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
services, err := agent.Services()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := services["foo"]; !ok {
t.Fatalf("missing service: %v", services)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := checks["service:foo"]; !ok {
t.Fatalf("missing check: %v", checks)
}
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_SetTTLStatus(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
if err := agent.WarnTTL("service:foo", "test"); err != nil {
t.Fatalf("err: %v", err)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
chk, ok := checks["service:foo"]
if !ok {
t.Fatalf("missing check: %v", checks)
}
if chk.Status != "warning" {
t.Fatalf("Bad: %#v", chk)
}
if chk.Output != "test" {
t.Fatalf("Bad: %#v", chk)
}
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_Checks(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentCheckRegistration{
Name: "foo",
}
reg.TTL = "15s"
if err := agent.CheckRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := checks["foo"]; !ok {
t.Fatalf("missing check: %v", checks)
}
if err := agent.CheckDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_Join(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
// Join ourself
addr := info["Config"]["AdvertiseAddr"].(string)
err = agent.Join(addr, false)
if err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_ForceLeave(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
// Eject somebody
err := agent.ForceLeave("foo")
if err != nil {
t.Fatalf("err: %v", err)
}
}

View File

@ -0,0 +1,323 @@
package consulapi
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
// QueryOptions are used to parameterize a query
type QueryOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// AllowStale allows any Consul server (non-leader) to service
// a read. This allows for lower latency and higher throughput
AllowStale bool
// RequireConsistent forces the read to be fully consistent.
// This is more expensive but prevents ever performing a stale
// read.
RequireConsistent bool
// WaitIndex is used to enable a blocking query. Waits
// until the timeout or the next index is reached
WaitIndex uint64
// WaitTime is used to bound the duration of a wait.
// Defaults to that of the Config, but can be overriden.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// WriteOptions are used to parameterize a write
type WriteOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// QueryMeta is used to return meta data about a query
type QueryMeta struct {
// LastIndex. This can be used as a WaitIndex to perform
// a blocking query
LastIndex uint64
// Time of last contact from the leader for the
// server servicing the request
LastContact time.Duration
// Is there a known leader
KnownLeader bool
// How long did the request take
RequestTime time.Duration
}
// WriteMeta is used to return meta data about a write
type WriteMeta struct {
// How long did the request take
RequestTime time.Duration
}
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
type HttpBasicAuth struct {
// Username to use for HTTP Basic Authentication
Username string
// Password to use for HTTP Basic Authentication
Password string
}
// Config is used to configure the creation of a client
type Config struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// HttpClient is the client to use. Default will be
// used if not provided.
HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// DefaultConfig returns a default configuration for the client
func DefaultConfig() *Config {
return &Config{
Address: "127.0.0.1:8500",
Scheme: "http",
HttpClient: http.DefaultClient,
}
}
// Client provides a client to the Consul API
type Client struct {
config Config
}
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.Address) == 0 {
config.Address = defConfig.Address
}
if len(config.Scheme) == 0 {
config.Scheme = defConfig.Scheme
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
client := &Client{
config: *config,
}
return client, nil
}
// request is used to help build up a request
type request struct {
config *Config
method string
url *url.URL
params url.Values
body io.Reader
obj interface{}
}
// setQueryOptions is used to annotate the request with
// additional query options
func (r *request) setQueryOptions(q *QueryOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.AllowStale {
r.params.Set("stale", "")
}
if q.RequireConsistent {
r.params.Set("consistent", "")
}
if q.WaitIndex != 0 {
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
}
if q.WaitTime != 0 {
r.params.Set("wait", durToMsec(q.WaitTime))
}
if q.Token != "" {
r.params.Set("token", q.Token)
}
}
// durToMsec converts a duration to a millisecond specified string
func durToMsec(dur time.Duration) string {
return fmt.Sprintf("%dms", dur/time.Millisecond)
}
// setWriteOptions is used to annotate the request with
// additional write options
func (r *request) setWriteOptions(q *WriteOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.Token != "" {
r.params.Set("token", q.Token)
}
}
// toHTTP converts the request to an HTTP request
func (r *request) toHTTP() (*http.Request, error) {
// Encode the query parameters
r.url.RawQuery = r.params.Encode()
// Get the url sring
urlRaw := r.url.String()
// Check if we should encode the body
if r.body == nil && r.obj != nil {
if b, err := encodeBody(r.obj); err != nil {
return nil, err
} else {
r.body = b
}
}
// Create the HTTP request
req, err := http.NewRequest(r.method, urlRaw, r.body)
// Setup auth
if err == nil && r.config.HttpAuth != nil {
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
}
return req, err
}
// newRequest is used to create a new request
func (c *Client) newRequest(method, path string) *request {
r := &request{
config: &c.config,
method: method,
url: &url.URL{
Scheme: c.config.Scheme,
Host: c.config.Address,
Path: path,
},
params: make(map[string][]string),
}
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
if c.config.WaitTime != 0 {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}
if c.config.Token != "" {
r.params.Set("token", r.config.Token)
}
return r
}
// doRequest runs a request with our client
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return 0, nil, err
}
start := time.Now()
resp, err := c.config.HttpClient.Do(req)
diff := time.Now().Sub(start)
return diff, resp, err
}
// parseQueryMeta is used to help parse query meta-data
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
header := resp.Header
// Parse the X-Consul-Index
index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
}
q.LastIndex = index
// Parse the X-Consul-LastContact
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
}
q.LastContact = time.Duration(last) * time.Millisecond
// Parse the X-Consul-KnownLeader
switch header.Get("X-Consul-KnownLeader") {
case "true":
q.KnownLeader = true
default:
q.KnownLeader = false
}
return nil
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// requireOK is used to wrap doRequest and check for a 200
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
if e != nil {
return d, resp, e
}
if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
return d, resp, e
}

View File

@ -0,0 +1,126 @@
package consulapi
import (
crand "crypto/rand"
"fmt"
"net/http"
"testing"
"time"
)
func makeClient(t *testing.T) *Client {
conf := DefaultConfig()
client, err := NewClient(conf)
if err != nil {
t.Fatalf("err: %v", err)
}
return client
}
func testKey() string {
buf := make([]byte, 16)
if _, err := crand.Read(buf); err != nil {
panic(fmt.Errorf("Failed to read random bytes: %v", err))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
}
func TestSetQueryOptions(t *testing.T) {
c := makeClient(t)
r := c.newRequest("GET", "/v1/kv/foo")
q := &QueryOptions{
Datacenter: "foo",
AllowStale: true,
RequireConsistent: true,
WaitIndex: 1000,
WaitTime: 100 * time.Second,
Token: "12345",
}
r.setQueryOptions(q)
if r.params.Get("dc") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["stale"]; !ok {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["consistent"]; !ok {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("index") != "1000" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("wait") != "100000ms" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("token") != "12345" {
t.Fatalf("bad: %v", r.params)
}
}
func TestSetWriteOptions(t *testing.T) {
c := makeClient(t)
r := c.newRequest("GET", "/v1/kv/foo")
q := &WriteOptions{
Datacenter: "foo",
Token: "23456",
}
r.setWriteOptions(q)
if r.params.Get("dc") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("token") != "23456" {
t.Fatalf("bad: %v", r.params)
}
}
func TestRequestToHTTP(t *testing.T) {
c := makeClient(t)
r := c.newRequest("DELETE", "/v1/kv/foo")
q := &QueryOptions{
Datacenter: "foo",
}
r.setQueryOptions(q)
req, err := r.toHTTP()
if err != nil {
t.Fatalf("err: %v", err)
}
if req.Method != "DELETE" {
t.Fatalf("bad: %v", req)
}
if req.URL.String() != "http://127.0.0.1:8500/v1/kv/foo?dc=foo" {
t.Fatalf("bad: %v", req)
}
}
func TestParseQueryMeta(t *testing.T) {
resp := &http.Response{
Header: make(map[string][]string),
}
resp.Header.Set("X-Consul-Index", "12345")
resp.Header.Set("X-Consul-LastContact", "80")
resp.Header.Set("X-Consul-KnownLeader", "true")
qm := &QueryMeta{}
if err := parseQueryMeta(resp, qm); err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex != 12345 {
t.Fatalf("Bad: %v", qm)
}
if qm.LastContact != 80*time.Millisecond {
t.Fatalf("Bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("Bad: %v", qm)
}
}

View File

@ -0,0 +1,181 @@
package consulapi
type Node struct {
Node string
Address string
}
type CatalogService struct {
Node string
Address string
ServiceID string
ServiceName string
ServiceTags []string
ServicePort int
}
type CatalogNode struct {
Node *Node
Services map[string]*AgentService
}
type CatalogRegistration struct {
Node string
Address string
Datacenter string
Service *AgentService
Check *AgentCheck
}
type CatalogDeregistration struct {
Node string
Address string
Datacenter string
ServiceID string
CheckID string
}
// Catalog can be used to query the Catalog endpoints
type Catalog struct {
c *Client
}
// Catalog returns a handle to the catalog endpoints
func (c *Client) Catalog() *Catalog {
return &Catalog{c}
}
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/register")
r.setWriteOptions(q)
r.obj = reg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
r.setWriteOptions(q)
r.obj = dereg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Datacenters is used to query for all the known datacenters
func (c *Catalog) Datacenters() ([]string, error) {
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []string
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to query all the known nodes
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Node
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Services is used to query for all known services
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/services")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]string
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query catalog entries for a given service
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
}
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CatalogService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Node is used to query for service information about a single node
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *CatalogNode
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -0,0 +1,219 @@
package consulapi
import (
"testing"
)
func TestCatalog_Datacenters(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
datacenters, err := catalog.Datacenters()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(datacenters) == 0 {
t.Fatalf("Bad: %v", datacenters)
}
}
func TestCatalog_Nodes(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
nodes, meta, err := catalog.Nodes(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(nodes) == 0 {
t.Fatalf("Bad: %v", nodes)
}
}
func TestCatalog_Services(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
services, meta, err := catalog.Services(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
t.Fatalf("Bad: %v", services)
}
}
func TestCatalog_Service(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
services, meta, err := catalog.Service("consul", "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
t.Fatalf("Bad: %v", services)
}
}
func TestCatalog_Node(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
name, _ := c.Agent().NodeName()
info, meta, err := catalog.Node(name, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(info.Services) == 0 {
t.Fatalf("Bad: %v", info)
}
}
func TestCatalog_Registration(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
service := &AgentService{
ID: "redis1",
Service: "redis",
Tags: []string{"master", "v1"},
Port: 8000,
}
check := &AgentCheck{
Node: "foobar",
CheckID: "service:redis1",
Name: "Redis health check",
Notes: "Script based health check",
Status: "passing",
ServiceID: "redis1",
}
reg := &CatalogRegistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
Service: service,
Check: check,
}
_, err := catalog.Register(reg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := node.Services["redis1"]; !ok {
t.Fatalf("missing service: redis1")
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if health[0].CheckID != "service:redis1" {
t.Fatalf("missing checkid service:redis1")
}
}
func TestCatalog_Deregistration(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
dereg := &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
ServiceID: "redis1",
}
_, err := catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := node.Services["redis1"]; ok {
t.Fatalf("ServiceID:redis1 is not deregistered")
}
dereg = &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
CheckID: "service:redis1",
}
_, err = catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(health) != 0 {
t.Fatalf("CheckID:service:redis1 is not deregistered")
}
dereg = &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
}
_, err = catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err = catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if node != nil {
t.Fatalf("node is not deregistered: %v", node)
}
}

View File

@ -0,0 +1,104 @@
package consulapi
import (
"bytes"
"strconv"
)
// Event can be used to query the Event endpoints
type Event struct {
c *Client
}
// UserEvent represents an event that was fired by the user
type UserEvent struct {
ID string
Name string
Payload []byte
NodeFilter string
ServiceFilter string
TagFilter string
Version int
LTime uint64
}
// Event returns a handle to the event endpoints
func (c *Client) Event() *Event {
return &Event{c}
}
// Fire is used to fire a new user event. Only the Name, Payload and Filters
// are respected. This returns the ID or an associated error. Cross DC requests
// are supported.
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
r.setWriteOptions(q)
if params.NodeFilter != "" {
r.params.Set("node", params.NodeFilter)
}
if params.ServiceFilter != "" {
r.params.Set("service", params.ServiceFilter)
}
if params.TagFilter != "" {
r.params.Set("tag", params.TagFilter)
}
if params.Payload != nil {
r.body = bytes.NewReader(params.Payload)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out UserEvent
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// List is used to get the most recent events an agent has received.
// This list can be optionally filtered by the name. This endpoint supports
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
// LastContact or KnownLeader.
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
r := e.c.newRequest("GET", "/v1/event/list")
r.setQueryOptions(q)
if name != "" {
r.params.Set("name", name)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*UserEvent
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// IDToIndex is a bit of a hack. This simulates the index generation to
// convert an event ID into a WaitIndex.
func (e *Event) IDToIndex(uuid string) uint64 {
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
upper := uuid[19:23] + uuid[24:36]
lowVal, err := strconv.ParseUint(lower, 16, 64)
if err != nil {
panic("Failed to convert " + lower)
}
highVal, err := strconv.ParseUint(upper, 16, 64)
if err != nil {
panic("Failed to convert " + upper)
}
return lowVal ^ highVal
}

View File

@ -0,0 +1,37 @@
package consulapi
import (
"testing"
)
func TestEvent_FireList(t *testing.T) {
c := makeClient(t)
event := c.Event()
params := &UserEvent{Name: "foo"}
id, meta, err := event.Fire(params, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
events, qm, err := event.List("", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex != event.IDToIndex(id) {
t.Fatalf("Bad: %#v", qm)
}
if events[len(events)-1].ID != id {
t.Fatalf("bad: %#v", events)
}
}

View File

@ -0,0 +1,136 @@
package consulapi
import (
"fmt"
)
// HealthCheck is used to represent a single check
type HealthCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
}
// ServiceEntry is used for the health service endpoint
type ServiceEntry struct {
Node *Node
Service *AgentService
Checks []*HealthCheck
}
// Health can be used to query the Health endpoints
type Health struct {
c *Client
}
// Health returns a handle to the health endpoints
func (c *Client) Health() *Health {
return &Health{c}
}
// Node is used to query for checks belonging to a given node
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Checks is used to return the checks associated with a service
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query health information along with service info
// for a given service. It can optionally do server-side filtering on a tag
// or nodes with passing health checks only.
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/service/"+service)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
}
if passingOnly {
r.params.Set("passing", "1")
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*ServiceEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// State is used to retreive all the checks in a given state.
// The wildcard "any" state can also be used for all checks.
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
switch state {
case "any":
case "warning":
case "critical":
case "passing":
case "unknown":
default:
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
}
r := h.c.newRequest("GET", "/v1/health/state/"+state)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -0,0 +1,98 @@
package consulapi
import (
"testing"
"time"
)
func TestHealth_Node(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
health := c.Health()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"].(string)
checks, meta, err := health.Node(name, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_Checks(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
health := c.Health()
// Make a service with a check
reg := &AgentServiceRegistration{
Name: "foo",
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
defer agent.ServiceDeregister("foo")
// Wait for the register...
time.Sleep(20 * time.Millisecond)
checks, meta, err := health.Checks("foo", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_Service(t *testing.T) {
c := makeClient(t)
health := c.Health()
// consul service should always exist...
checks, meta, err := health.Service("consul", "", true, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_State(t *testing.T) {
c := makeClient(t)
health := c.Health()
checks, meta, err := health.State("any", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}

219
Godeps/_workspace/src/github.com/armon/consul-api/kv.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
package consulapi
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"strings"
)
// KVPair is used to represent a single K/V entry
type KVPair struct {
Key string
CreateIndex uint64
ModifyIndex uint64
LockIndex uint64
Flags uint64
Value []byte
Session string
}
// KVPairs is a list of KVPair objects
type KVPairs []*KVPair
// KV is used to manipulate the K/V API
type KV struct {
c *Client
}
// KV is used to return a handle to the K/V apis
func (c *Client) KV() *KV {
return &KV{c}
}
// Get is used to lookup a single key
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
resp, qm, err := k.getInternal(key, nil, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to lookup all keys under a prefix
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// Keys is used to list all the keys under a prefix. Optionally,
// a separator can be used to limit the responses.
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
params := map[string]string{"keys": ""}
if separator != "" {
params["separator"] = separator
}
resp, qm, err := k.getInternal(prefix, params, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []string
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
r := k.c.newRequest("GET", "/v1/kv/"+key)
r.setQueryOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := k.c.doRequest(r)
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
resp.Body.Close()
return nil, qm, nil
} else if resp.StatusCode != 200 {
resp.Body.Close()
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
return resp, qm, nil
}
// Put is used to write a new value. Only the
// Key, Flags and Value is respected.
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
params := make(map[string]string, 1)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
_, wm, err := k.put(p.Key, params, p.Value, q)
return wm, err
}
// CAS is used for a Check-And-Set operation. The Key,
// ModifyIndex, Flags and Value are respected. Returns true
// on success or false on failures.
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
return k.put(p.Key, params, p.Value, q)
}
// Acquire is used for a lock acquisiiton operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["acquire"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
// Release is used for a lock release operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["release"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
r := k.c.newRequest("PUT", "/v1/kv/"+key)
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
r.body = bytes.NewReader(body)
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(string(buf.Bytes()), "true")
return res, qm, nil
}
// Delete is used to delete a single key
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
return k.deleteInternal(key, nil, w)
}
// DeleteTree is used to delete all keys under a prefix
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
return k.deleteInternal(prefix, []string{"recurse"}, w)
}
func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) {
r := k.c.newRequest("DELETE", "/v1/kv/"+key)
r.setWriteOptions(q)
for _, param := range params {
r.params.Set(param, "")
}
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
return qm, nil
}

View File

@ -0,0 +1,374 @@
package consulapi
import (
"bytes"
"path"
"testing"
"time"
)
func TestClientPutGetDelete(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
key := testKey()
pair, _, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
// Put the key
value := []byte("test")
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
if pair.Flags != 42 {
t.Fatalf("unexpected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete
if _, err := kv.Delete(key, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Get should fail
pair, _, err = kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
}
func TestClient_List_DeleteRecurse(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Generate some test keys
prefix := testKey()
var keys []string
for i := 0; i < 100; i++ {
keys = append(keys, path.Join(prefix, testKey()))
}
// Set values
value := []byte("test")
for _, key := range keys {
p := &KVPair{Key: key, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// List the values
pairs, meta, err := kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != len(keys) {
t.Fatalf("got %d keys", len(pairs))
}
for _, pair := range pairs {
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete all
if _, err := kv.DeleteTree(prefix, nil); err != nil {
t.Fatalf("err: %v", err)
}
// List the values
pairs, _, err = kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 0 {
t.Fatalf("got %d keys", len(pairs))
}
}
func TestClient_CAS(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Put the key
key := testKey()
value := []byte("test")
p := &KVPair{Key: key, Value: value}
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("CAS failure")
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// CAS update with bad index
newVal := []byte("foo")
p.Value = newVal
p.ModifyIndex = 1
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if work {
t.Fatalf("unexpected CAS")
}
// CAS update with valid index
p.ModifyIndex = meta.LastIndex
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("unexpected CAS failure")
}
}
func TestClient_WatchGet(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
key := testKey()
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Put the key
value := []byte("test")
go func() {
c := makeClient(t)
kv := c.KV()
time.Sleep(100 * time.Millisecond)
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}()
// Get should work
options := &QueryOptions{WaitIndex: meta.LastIndex}
pair, meta2, err := kv.Get(key, options)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
if pair.Flags != 42 {
t.Fatalf("unexpected value: %#v", pair)
}
if meta2.LastIndex <= meta.LastIndex {
t.Fatalf("unexpected value: %#v", meta2)
}
}
func TestClient_WatchList(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
prefix := testKey()
key := path.Join(prefix, testKey())
pairs, meta, err := kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 0 {
t.Fatalf("unexpected value: %#v", pairs)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Put the key
value := []byte("test")
go func() {
c := makeClient(t)
kv := c.KV()
time.Sleep(100 * time.Millisecond)
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}()
// Get should work
options := &QueryOptions{WaitIndex: meta.LastIndex}
pairs, meta2, err := kv.List(prefix, options)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 1 {
t.Fatalf("expected value: %#v", pairs)
}
if !bytes.Equal(pairs[0].Value, value) {
t.Fatalf("unexpected value: %#v", pairs)
}
if pairs[0].Flags != 42 {
t.Fatalf("unexpected value: %#v", pairs)
}
if meta2.LastIndex <= meta.LastIndex {
t.Fatalf("unexpected value: %#v", meta2)
}
}
func TestClient_Keys_DeleteRecurse(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Generate some test keys
prefix := testKey()
var keys []string
for i := 0; i < 100; i++ {
keys = append(keys, path.Join(prefix, testKey()))
}
// Set values
value := []byte("test")
for _, key := range keys {
p := &KVPair{Key: key, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// List the values
out, meta, err := kv.Keys(prefix, "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != len(keys) {
t.Fatalf("got %d keys", len(out))
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete all
if _, err := kv.DeleteTree(prefix, nil); err != nil {
t.Fatalf("err: %v", err)
}
// List the values
out, _, err = kv.Keys(prefix, "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 0 {
t.Fatalf("got %d keys", len(out))
}
}
func TestClient_AcquireRelease(t *testing.T) {
c := makeClient(t)
session := c.Session()
kv := c.KV()
// Make a session
id, _, err := session.CreateNoChecks(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
// Acquire the key
key := testKey()
value := []byte("test")
p := &KVPair{Key: key, Value: value, Session: id}
if work, _, err := kv.Acquire(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("Lock failure")
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != id {
t.Fatalf("Expected lock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Release
if work, _, err := kv.Release(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("Release fail")
}
// Get should work
pair, meta, err = kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != "" {
t.Fatalf("Expected unlock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
}

View File

@ -0,0 +1,204 @@
package consulapi
import (
"time"
)
// SessionEntry represents a session in consul
type SessionEntry struct {
CreateIndex uint64
ID string
Name string
Node string
Checks []string
LockDelay time.Duration
Behavior string
TTL string
}
// Session can be used to query the Session endpoints
type Session struct {
c *Client
}
// Session returns a handle to the session endpoints
func (c *Client) Session() *Session {
return &Session{c}
}
// CreateNoChecks is like Create but is used specifically to create
// a session with no associated health checks.
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
body := make(map[string]interface{})
body["Checks"] = []string{}
if se != nil {
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(body, q)
}
// Create makes a new session. Providing a session entry can
// customize the session. It can also be nil to use defaults.
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
var obj interface{}
if se != nil {
body := make(map[string]interface{})
obj = body
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if len(se.Checks) > 0 {
body["Checks"] = se.Checks
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(obj, q)
}
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/create")
r.setWriteOptions(q)
r.obj = obj
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Destroy invalides a given session
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/destroy/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Renew renews the TTL on a given session
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, wm, err
}
if len(entries) > 0 {
return entries[0], wm, nil
}
return nil, wm, nil
}
// Info looks up a single session
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/info/"+id)
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List gets sessions for a node
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// List gets all active sessions
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/list")
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View File

@ -0,0 +1,190 @@
package consulapi
import (
"testing"
)
func TestSession_CreateDestroy(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, meta, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
meta, err = session.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
}
func TestSession_CreateRenewDestroy(t *testing.T) {
c := makeClient(t)
session := c.Session()
se := &SessionEntry{
TTL: "10s",
}
id, meta, err := session.Create(se, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
renew, meta, err := session.Renew(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if renew == nil {
t.Fatalf("should get session")
}
if renew.ID != id {
t.Fatalf("should have matching id")
}
if renew.TTL != "10s" {
t.Fatalf("should get session with TTL")
}
}
func TestSession_Info(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if info == nil {
t.Fatalf("should get session")
}
if info.CreateIndex == 0 {
t.Fatalf("bad: %v", info)
}
if info.ID != id {
t.Fatalf("bad: %v", info)
}
if info.Name != "" {
t.Fatalf("bad: %v", info)
}
if info.Node == "" {
t.Fatalf("bad: %v", info)
}
if len(info.Checks) == 0 {
t.Fatalf("bad: %v", info)
}
if info.LockDelay == 0 {
t.Fatalf("bad: %v", info)
}
if info.Behavior != "release" {
t.Fatalf("bad: %v", info)
}
if info.TTL != "" {
t.Fatalf("bad: %v", info)
}
}
func TestSession_Node(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
sessions, qm, err := session.Node(info.Node, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("bad: %v", sessions)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}
func TestSession_List(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
sessions, qm, err := session.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("bad: %v", sessions)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}

View File

@ -0,0 +1,43 @@
package consulapi
// Status can be used to query the Status endpoints
type Status struct {
c *Client
}
// Status returns a handle to the status endpoints
func (c *Client) Status() *Status {
return &Status{c}
}
// Leader is used to query for a known leader
func (s *Status) Leader() (string, error) {
r := s.c.newRequest("GET", "/v1/status/leader")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", err
}
defer resp.Body.Close()
var leader string
if err := decodeBody(resp, &leader); err != nil {
return "", err
}
return leader, nil
}
// Peers is used to query for a known raft peers
func (s *Status) Peers() ([]string, error) {
r := s.c.newRequest("GET", "/v1/status/peers")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var peers []string
if err := decodeBody(resp, &peers); err != nil {
return nil, err
}
return peers, nil
}

View File

@ -0,0 +1,31 @@
package consulapi
import (
"testing"
)
func TestStatusLeader(t *testing.T) {
c := makeClient(t)
status := c.Status()
leader, err := status.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
if leader == "" {
t.Fatalf("Expected leader")
}
}
func TestStatusPeers(t *testing.T) {
c := makeClient(t)
status := c.Status()
peers, err := status.Peers()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(peers) == 0 {
t.Fatalf("Expected peers ")
}
}

View File

@ -0,0 +1,23 @@
package etcd
// Add a new directory with a random etcd-generated key under the given path.
func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
raw, err := c.post(key, "", ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Add a new file with a random etcd-generated key under the given path.
func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.post(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}

View File

@ -0,0 +1,73 @@
package etcd
import "testing"
func TestAddChild(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
c.Delete("nonexistentDir", true)
}()
c.CreateDir("fooDir", 5)
_, err := c.AddChild("fooDir", "v0", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.AddChild("fooDir", "v1", 5)
if err != nil {
t.Fatal(err)
}
resp, err := c.Get("fooDir", true, false)
// The child with v0 should proceed the child with v1 because it's added
// earlier, so it should have a lower key.
if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
" The response was: %#v", resp)
}
// Creating a child under a nonexistent directory should succeed.
// The directory should be created.
resp, err = c.AddChild("nonexistentDir", "foo", 5)
if err != nil {
t.Fatal(err)
}
}
func TestAddChildDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
c.Delete("nonexistentDir", true)
}()
c.CreateDir("fooDir", 5)
_, err := c.AddChildDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.AddChildDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
resp, err := c.Get("fooDir", true, false)
// The child with v0 should proceed the child with v1 because it's added
// earlier, so it should have a lower key.
if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
" The response was: %#v", resp)
}
// Creating a child under a nonexistent directory should succeed.
// The directory should be created.
resp, err = c.AddChildDir("nonexistentDir", 5)
if err != nil {
t.Fatal(err)
}
}

View File

@ -0,0 +1,481 @@
package etcd
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
)
// See SetConsistency for how to use these constants.
const (
// Using strings rather than iota because the consistency level
// could be persisted to disk, so it'd be better to use
// human-readable values.
STRONG_CONSISTENCY = "STRONG"
WEAK_CONSISTENCY = "WEAK"
)
const (
defaultBufferSize = 10
)
func init() {
rand.Seed(int64(time.Now().Nanosecond()))
}
type Config struct {
CertFile string `json:"certFile"`
KeyFile string `json:"keyFile"`
CaCertFile []string `json:"caCertFiles"`
DialTimeout time.Duration `json:"timeout"`
Consistency string `json:"consistency"`
}
type credentials struct {
username string
password string
}
type Client struct {
config Config `json:"config"`
cluster *Cluster `json:"cluster"`
httpClient *http.Client
credentials *credentials
transport *http.Transport
persistence io.Writer
cURLch chan string
// CheckRetry can be used to control the policy for failed requests
// and modify the cluster if needed.
// The client calls it before sending requests again, and
// stops retrying if CheckRetry returns some error. The cases that
// this function needs to handle include no response and unexpected
// http status code of response.
// If CheckRetry is nil, client will call the default one
// `DefaultCheckRetry`.
// Argument cluster is the etcd.Cluster object that these requests have been made on.
// Argument numReqs is the number of http.Requests that have been made so far.
// Argument lastResp is the http.Responses from the last request.
// Argument err is the reason of the failure.
CheckRetry func(cluster *Cluster, numReqs int,
lastResp http.Response, err error) error
}
// NewClient create a basic client that is configured to be used
// with the given machine list.
func NewClient(machines []string) *Client {
config := Config{
// default timeout is one second
DialTimeout: time.Second,
Consistency: WEAK_CONSISTENCY,
}
client := &Client{
cluster: NewCluster(machines),
config: config,
}
client.initHTTPClient()
client.saveConfig()
return client
}
// NewTLSClient create a basic client with TLS configuration
func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
// overwrite the default machine to use https
if len(machines) == 0 {
machines = []string{"https://127.0.0.1:4001"}
}
config := Config{
// default timeout is one second
DialTimeout: time.Second,
Consistency: WEAK_CONSISTENCY,
CertFile: cert,
KeyFile: key,
CaCertFile: make([]string, 0),
}
client := &Client{
cluster: NewCluster(machines),
config: config,
}
err := client.initHTTPSClient(cert, key)
if err != nil {
return nil, err
}
err = client.AddRootCA(caCert)
client.saveConfig()
return client, nil
}
// NewClientFromFile creates a client from a given file path.
// The given file is expected to use the JSON format.
func NewClientFromFile(fpath string) (*Client, error) {
fi, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer func() {
if err := fi.Close(); err != nil {
panic(err)
}
}()
return NewClientFromReader(fi)
}
// NewClientFromReader creates a Client configured from a given reader.
// The configuration is expected to use the JSON format.
func NewClientFromReader(reader io.Reader) (*Client, error) {
c := new(Client)
b, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, c)
if err != nil {
return nil, err
}
if c.config.CertFile == "" {
c.initHTTPClient()
} else {
err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
}
if err != nil {
return nil, err
}
for _, caCert := range c.config.CaCertFile {
if err := c.AddRootCA(caCert); err != nil {
return nil, err
}
}
return c, nil
}
// Override the Client's HTTP Transport object
func (c *Client) SetTransport(tr *http.Transport) {
c.httpClient.Transport = tr
c.transport = tr
}
func (c *Client) SetCredentials(username, password string) {
c.credentials = &credentials{username, password}
}
func (c *Client) Close() {
c.transport.DisableKeepAlives = true
c.transport.CloseIdleConnections()
}
// initHTTPClient initializes a HTTP client for etcd client
func (c *Client) initHTTPClient() {
c.transport = &http.Transport{
Dial: c.dial,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
c.httpClient = &http.Client{Transport: c.transport}
}
// initHTTPClient initializes a HTTPS client for etcd client
func (c *Client) initHTTPSClient(cert, key string) error {
if cert == "" || key == "" {
return errors.New("Require both cert and key path")
}
tlsCert, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return err
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{tlsCert},
InsecureSkipVerify: true,
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
Dial: c.dial,
}
c.httpClient = &http.Client{Transport: tr}
return nil
}
// SetPersistence sets a writer to which the config will be
// written every time it's changed.
func (c *Client) SetPersistence(writer io.Writer) {
c.persistence = writer
}
// SetConsistency changes the consistency level of the client.
//
// When consistency is set to STRONG_CONSISTENCY, all requests,
// including GET, are sent to the leader. This means that, assuming
// the absence of leader failures, GET requests are guaranteed to see
// the changes made by previous requests.
//
// When consistency is set to WEAK_CONSISTENCY, other requests
// are still sent to the leader, but GET requests are sent to a
// random server from the server pool. This reduces the read
// load on the leader, but it's not guaranteed that the GET requests
// will see changes made by previous requests (they might have not
// yet been committed on non-leader servers).
func (c *Client) SetConsistency(consistency string) error {
if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
}
c.config.Consistency = consistency
return nil
}
// Sets the DialTimeout value
func (c *Client) SetDialTimeout(d time.Duration) {
c.config.DialTimeout = d
}
// AddRootCA adds a root CA cert for the etcd client
func (c *Client) AddRootCA(caCert string) error {
if c.httpClient == nil {
return errors.New("Client has not been initialized yet!")
}
certBytes, err := ioutil.ReadFile(caCert)
if err != nil {
return err
}
tr, ok := c.httpClient.Transport.(*http.Transport)
if !ok {
panic("AddRootCA(): Transport type assert should not fail")
}
if tr.TLSClientConfig.RootCAs == nil {
caCertPool := x509.NewCertPool()
ok = caCertPool.AppendCertsFromPEM(certBytes)
if ok {
tr.TLSClientConfig.RootCAs = caCertPool
}
tr.TLSClientConfig.InsecureSkipVerify = false
} else {
ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
}
if !ok {
err = errors.New("Unable to load caCert")
}
c.config.CaCertFile = append(c.config.CaCertFile, caCert)
c.saveConfig()
return err
}
// SetCluster updates cluster information using the given machine list.
func (c *Client) SetCluster(machines []string) bool {
success := c.internalSyncCluster(machines)
return success
}
func (c *Client) GetCluster() []string {
return c.cluster.Machines
}
// SyncCluster updates the cluster information using the internal machine list.
func (c *Client) SyncCluster() bool {
return c.internalSyncCluster(c.cluster.Machines)
}
// internalSyncCluster syncs cluster information using the given machine list.
func (c *Client) internalSyncCluster(machines []string) bool {
for _, machine := range machines {
httpPath := c.createHttpPath(machine, path.Join(version, "members"))
resp, err := c.httpClient.Get(httpPath)
if err != nil {
// try another machine in the cluster
continue
}
if resp.StatusCode != http.StatusOK { // fall-back to old endpoint
httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
resp, err := c.httpClient.Get(httpPath)
if err != nil {
// try another machine in the cluster
continue
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
// try another machine in the cluster
continue
}
// update Machines List
c.cluster.updateFromStr(string(b))
} else {
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
// try another machine in the cluster
continue
}
var mCollection memberCollection
if err := json.Unmarshal(b, &mCollection); err != nil {
// try another machine
continue
}
urls := make([]string, 0)
for _, m := range mCollection {
urls = append(urls, m.ClientURLs...)
}
// update Machines List
c.cluster.updateFromStr(strings.Join(urls, ","))
}
logger.Debug("sync.machines ", c.cluster.Machines)
c.saveConfig()
return true
}
return false
}
// createHttpPath creates a complete HTTP URL.
// serverName should contain both the host name and a port number, if any.
func (c *Client) createHttpPath(serverName string, _path string) string {
u, err := url.Parse(serverName)
if err != nil {
panic(err)
}
u.Path = path.Join(u.Path, _path)
if u.Scheme == "" {
u.Scheme = "http"
}
return u.String()
}
// dial attempts to open a TCP connection to the provided address, explicitly
// enabling keep-alives with a one-second interval.
func (c *Client) dial(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
if err != nil {
return nil, err
}
tcpConn, ok := conn.(*net.TCPConn)
if !ok {
return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
}
// Keep TCP alive to check whether or not the remote machine is down
if err = tcpConn.SetKeepAlive(true); err != nil {
return nil, err
}
if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
return nil, err
}
return tcpConn, nil
}
func (c *Client) OpenCURL() {
c.cURLch = make(chan string, defaultBufferSize)
}
func (c *Client) CloseCURL() {
c.cURLch = nil
}
func (c *Client) sendCURL(command string) {
go func() {
select {
case c.cURLch <- command:
default:
}
}()
}
func (c *Client) RecvCURL() string {
return <-c.cURLch
}
// saveConfig saves the current config using c.persistence.
func (c *Client) saveConfig() error {
if c.persistence != nil {
b, err := json.Marshal(c)
if err != nil {
return err
}
_, err = c.persistence.Write(b)
if err != nil {
return err
}
}
return nil
}
// MarshalJSON implements the Marshaller interface
// as defined by the standard JSON package.
func (c *Client) MarshalJSON() ([]byte, error) {
b, err := json.Marshal(struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{
Config: c.config,
Cluster: c.cluster,
})
if err != nil {
return nil, err
}
return b, nil
}
// UnmarshalJSON implements the Unmarshaller interface
// as defined by the standard JSON package.
func (c *Client) UnmarshalJSON(b []byte) error {
temp := struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{}
err := json.Unmarshal(b, &temp)
if err != nil {
return err
}
c.cluster = temp.Cluster
c.config = temp.Config
return nil
}

View File

@ -0,0 +1,108 @@
package etcd
import (
"encoding/json"
"fmt"
"net"
"net/url"
"os"
"testing"
)
// To pass this test, we need to create a cluster of 3 machines
// The server should be listening on localhost:4001, 4002, 4003
func TestSync(t *testing.T) {
fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
// Explicit trailing slash to ensure this doesn't reproduce:
// https://github.com/coreos/go-etcd/issues/82
c := NewClient([]string{"http://127.0.0.1:4001/"})
success := c.SyncCluster()
if !success {
t.Fatal("cannot sync machines")
}
for _, m := range c.GetCluster() {
u, err := url.Parse(m)
if err != nil {
t.Fatal(err)
}
if u.Scheme != "http" {
t.Fatal("scheme must be http")
}
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
t.Fatal(err)
}
if host != "localhost" {
t.Fatal("Host must be localhost")
}
}
badMachines := []string{"abc", "edef"}
success = c.SetCluster(badMachines)
if success {
t.Fatal("should not sync on bad machines")
}
goodMachines := []string{"127.0.0.1:4002"}
success = c.SetCluster(goodMachines)
if !success {
t.Fatal("cannot sync machines")
} else {
fmt.Println(c.cluster.Machines)
}
}
func TestPersistence(t *testing.T) {
c := NewClient(nil)
c.SyncCluster()
fo, err := os.Create("config.json")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := fo.Close(); err != nil {
panic(err)
}
}()
c.SetPersistence(fo)
err = c.saveConfig()
if err != nil {
t.Fatal(err)
}
c2, err := NewClientFromFile("config.json")
if err != nil {
t.Fatal(err)
}
// Verify that the two clients have the same config
b1, _ := json.Marshal(c)
b2, _ := json.Marshal(c2)
if string(b1) != string(b2) {
t.Fatalf("The two configs should be equal!")
}
}
func TestClientRetry(t *testing.T) {
c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"})
// use first endpoint as the picked url
c.cluster.picked = 0
if _, err := c.Set("foo", "bar", 5); err != nil {
t.Fatal(err)
}
if _, err := c.Delete("foo", true); err != nil {
t.Fatal(err)
}
}

View File

@ -0,0 +1,37 @@
package etcd
import (
"math/rand"
"strings"
)
type Cluster struct {
Leader string `json:"leader"`
Machines []string `json:"machines"`
picked int
}
func NewCluster(machines []string) *Cluster {
// if an empty slice was sent in then just assume HTTP 4001 on localhost
if len(machines) == 0 {
machines = []string{"http://127.0.0.1:4001"}
}
// default leader and machines
return &Cluster{
Leader: "",
Machines: machines,
picked: rand.Intn(len(machines)),
}
}
func (cl *Cluster) failure() { cl.picked = rand.Intn(len(cl.Machines)) }
func (cl *Cluster) pick() string { return cl.Machines[cl.picked] }
func (cl *Cluster) updateFromStr(machines string) {
cl.Machines = strings.Split(machines, ",")
for i := range cl.Machines {
cl.Machines[i] = strings.TrimSpace(cl.Machines[i])
}
cl.picked = rand.Intn(len(cl.Machines))
}

View File

@ -0,0 +1,34 @@
package etcd
import "fmt"
func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
if prevValue == "" && prevIndex == 0 {
return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
}
options := Options{}
if prevValue != "" {
options["prevValue"] = prevValue
}
if prevIndex != 0 {
options["prevIndex"] = prevIndex
}
raw, err := c.delete(key, options)
if err != nil {
return nil, err
}
return raw, err
}

View File

@ -0,0 +1,46 @@
package etcd
import (
"testing"
)
func TestCompareAndDelete(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
// This should succeed an correct prevValue
resp, err := c.CompareAndDelete("foo", "bar", 0)
if err != nil {
t.Fatal(err)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
}
resp, _ = c.Set("foo", "bar", 5)
// This should fail because it gives an incorrect prevValue
_, err = c.CompareAndDelete("foo", "xxx", 0)
if err == nil {
t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp)
}
// This should succeed because it gives an correct prevIndex
resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
if err != nil {
t.Fatal(err)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
}
c.Set("foo", "bar", 5)
// This should fail because it gives an incorrect prevIndex
resp, err = c.CompareAndDelete("foo", "", 29817514)
if err == nil {
t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp)
}
}

View File

@ -0,0 +1,36 @@
package etcd
import "fmt"
func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
prevValue string, prevIndex uint64) (*Response, error) {
raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
prevValue string, prevIndex uint64) (*RawResponse, error) {
if prevValue == "" && prevIndex == 0 {
return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
}
options := Options{}
if prevValue != "" {
options["prevValue"] = prevValue
}
if prevIndex != 0 {
options["prevIndex"] = prevIndex
}
raw, err := c.put(key, value, ttl, options)
if err != nil {
return nil, err
}
return raw, err
}

View File

@ -0,0 +1,57 @@
package etcd
import (
"testing"
)
func TestCompareAndSwap(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
// This should succeed
resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
}
// This should fail because it gives an incorrect prevValue
resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
if err == nil {
t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp)
}
resp, err = c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed
resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
}
// This should fail because it gives an incorrect prevIndex
resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
if err == nil {
t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp)
}
}

View File

@ -0,0 +1,55 @@
package etcd
import (
"fmt"
"io/ioutil"
"log"
"strings"
)
var logger *etcdLogger
func SetLogger(l *log.Logger) {
logger = &etcdLogger{l}
}
func GetLogger() *log.Logger {
return logger.log
}
type etcdLogger struct {
log *log.Logger
}
func (p *etcdLogger) Debug(args ...interface{}) {
msg := "DEBUG: " + fmt.Sprint(args...)
p.log.Println(msg)
}
func (p *etcdLogger) Debugf(f string, args ...interface{}) {
msg := "DEBUG: " + fmt.Sprintf(f, args...)
// Append newline if necessary
if !strings.HasSuffix(msg, "\n") {
msg = msg + "\n"
}
p.log.Print(msg)
}
func (p *etcdLogger) Warning(args ...interface{}) {
msg := "WARNING: " + fmt.Sprint(args...)
p.log.Println(msg)
}
func (p *etcdLogger) Warningf(f string, args ...interface{}) {
msg := "WARNING: " + fmt.Sprintf(f, args...)
// Append newline if necessary
if !strings.HasSuffix(msg, "\n") {
msg = msg + "\n"
}
p.log.Print(msg)
}
func init() {
// Default logger uses the go default log.
SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
}

View File

@ -0,0 +1,28 @@
package etcd
import (
"testing"
)
type Foo struct{}
type Bar struct {
one string
two int
}
// Tests that logs don't panic with arbitrary interfaces
func TestDebug(t *testing.T) {
f := &Foo{}
b := &Bar{"asfd", 3}
for _, test := range []interface{}{
1234,
"asdf",
f,
b,
} {
logger.Debug(test)
logger.Debugf("something, %s", test)
logger.Warning(test)
logger.Warningf("something, %s", test)
}
}

View File

@ -0,0 +1,40 @@
package etcd
// Delete deletes the given key.
//
// When recursive set to false, if the key points to a
// directory the method will fail.
//
// When recursive set to true, if the key points to a file,
// the file will be deleted; if the key points to a directory,
// then everything under the directory (including all child directories)
// will be deleted.
func (c *Client) Delete(key string, recursive bool) (*Response, error) {
raw, err := c.RawDelete(key, recursive, false)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// DeleteDir deletes an empty directory or a key value pair
func (c *Client) DeleteDir(key string) (*Response, error) {
raw, err := c.RawDelete(key, false, true)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
ops := Options{
"recursive": recursive,
"dir": dir,
}
return c.delete(key, ops)
}

View File

@ -0,0 +1,81 @@
package etcd
import (
"testing"
)
func TestDelete(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
resp, err := c.Delete("foo", false)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("Delete failed with %s", resp.Node.Value)
}
if !(resp.PrevNode.Value == "bar") {
t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
}
resp, err = c.Delete("foo", false)
if err == nil {
t.Fatalf("Delete should have failed because the key foo did not exist. "+
"The response was: %v", resp)
}
}
func TestDeleteAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("fooDir", true)
}()
c.SetDir("foo", 5)
// test delete an empty dir
resp, err := c.DeleteDir("foo")
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("DeleteAll 1 failed: %#v", resp)
}
if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
}
c.CreateDir("fooDir", 5)
c.Set("fooDir/foo", "bar", 5)
_, err = c.DeleteDir("fooDir")
if err == nil {
t.Fatal("should not able to delete a non-empty dir with deletedir")
}
resp, err = c.Delete("fooDir", true)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("DeleteAll 2 failed: %#v", resp)
}
if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
}
resp, err = c.Delete("foo", true)
if err == nil {
t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+
"The response was: %v", resp)
}
}

View File

@ -0,0 +1,49 @@
package etcd
import (
"encoding/json"
"fmt"
)
const (
ErrCodeEtcdNotReachable = 501
ErrCodeUnhandledHTTPStatus = 502
)
var (
errorMap = map[int]string{
ErrCodeEtcdNotReachable: "All the given peers are not reachable",
}
)
type EtcdError struct {
ErrorCode int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause,omitempty"`
Index uint64 `json:"index"`
}
func (e EtcdError) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
}
func newError(errorCode int, cause string, index uint64) *EtcdError {
return &EtcdError{
ErrorCode: errorCode,
Message: errorMap[errorCode],
Cause: cause,
Index: index,
}
}
func handleError(b []byte) error {
etcdErr := new(EtcdError)
err := json.Unmarshal(b, etcdErr)
if err != nil {
logger.Warningf("cannot unmarshal etcd error: %v", err)
return err
}
return etcdErr
}

View File

@ -0,0 +1,32 @@
package etcd
// Get gets the file or directory associated with the given key.
// If the key points to a directory, files and directories under
// it will be returned in sorted or unsorted order, depending on
// the sort flag.
// If recursive is set to false, contents under child directories
// will not be returned.
// If recursive is set to true, all the contents will be returned.
func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
raw, err := c.RawGet(key, sort, recursive)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
var q bool
if c.config.Consistency == STRONG_CONSISTENCY {
q = true
}
ops := Options{
"recursive": recursive,
"sorted": sort,
"quorum": q,
}
return c.get(key, ops)
}

View File

@ -0,0 +1,131 @@
package etcd
import (
"reflect"
"testing"
)
// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
func cleanNode(n *Node) {
n.Expiration = nil
n.ModifiedIndex = 0
n.CreatedIndex = 0
}
// cleanResult scrubs a result object two levels deep of Expiration,
// ModifiedIndex and CreatedIndex.
func cleanResult(result *Response) {
// TODO(philips): make this recursive.
cleanNode(result.Node)
for i, _ := range result.Node.Nodes {
cleanNode(result.Node.Nodes[i])
for j, _ := range result.Node.Nodes[i].Nodes {
cleanNode(result.Node.Nodes[i].Nodes[j])
}
}
}
func TestGet(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
result, err := c.Get("foo", false, false)
if err != nil {
t.Fatal(err)
}
if result.Node.Key != "/foo" || result.Node.Value != "bar" {
t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
}
result, err = c.Get("goo", false, false)
if err == nil {
t.Fatalf("should not be able to get non-exist key")
}
}
func TestGetAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
c.CreateDir("fooDir", 5)
c.Set("fooDir/k0", "v0", 5)
c.Set("fooDir/k1", "v1", 5)
// Return kv-pairs in sorted order
result, err := c.Get("fooDir", true, false)
if err != nil {
t.Fatal(err)
}
expected := Nodes{
&Node{
Key: "/fooDir/k0",
Value: "v0",
TTL: 5,
},
&Node{
Key: "/fooDir/k1",
Value: "v1",
TTL: 5,
},
}
cleanResult(result)
if !reflect.DeepEqual(result.Node.Nodes, expected) {
t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
}
// Test the `recursive` option
c.CreateDir("fooDir/childDir", 5)
c.Set("fooDir/childDir/k2", "v2", 5)
// Return kv-pairs in sorted order
result, err = c.Get("fooDir", true, true)
cleanResult(result)
if err != nil {
t.Fatal(err)
}
expected = Nodes{
&Node{
Key: "/fooDir/childDir",
Dir: true,
Nodes: Nodes{
&Node{
Key: "/fooDir/childDir/k2",
Value: "v2",
TTL: 5,
},
},
TTL: 5,
},
&Node{
Key: "/fooDir/k0",
Value: "v0",
TTL: 5,
},
&Node{
Key: "/fooDir/k1",
Value: "v1",
TTL: 5,
},
}
cleanResult(result)
if !reflect.DeepEqual(result.Node.Nodes, expected) {
t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
}
}

View File

@ -0,0 +1,30 @@
package etcd
import "encoding/json"
type Member struct {
ID string `json:"id"`
Name string `json:"name"`
PeerURLs []string `json:"peerURLs"`
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}

View File

@ -0,0 +1,71 @@
package etcd
import (
"encoding/json"
"reflect"
"testing"
)
func TestMemberCollectionUnmarshal(t *testing.T) {
tests := []struct {
body []byte
want memberCollection
}{
{
body: []byte(`{"members":[]}`),
want: memberCollection([]Member{}),
},
{
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
want: memberCollection(
[]Member{
{
ID: "2745e2525fce8fe",
Name: "node3",
PeerURLs: []string{
"http://127.0.0.1:7003",
},
ClientURLs: []string{
"http://127.0.0.1:4003",
},
},
{
ID: "42134f434382925",
Name: "node1",
PeerURLs: []string{
"http://127.0.0.1:2380",
"http://127.0.0.1:7001",
},
ClientURLs: []string{
"http://127.0.0.1:2379",
"http://127.0.0.1:4001",
},
},
{
ID: "94088180e21eb87b",
Name: "node2",
PeerURLs: []string{
"http://127.0.0.1:7002",
},
ClientURLs: []string{
"http://127.0.0.1:4002",
},
},
},
),
},
}
for i, tt := range tests {
var got memberCollection
err := json.Unmarshal(tt.body, &got)
if err != nil {
t.Errorf("#%d: unexpected error: %v", i, err)
continue
}
if !reflect.DeepEqual(tt.want, got) {
t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
}
}
}

View File

@ -0,0 +1,72 @@
package etcd
import (
"fmt"
"net/url"
"reflect"
)
type Options map[string]interface{}
// An internally-used data structure that represents a mapping
// between valid options and their kinds
type validOptions map[string]reflect.Kind
// Valid options for GET, PUT, POST, DELETE
// Using CAPITALIZED_UNDERSCORE to emphasize that these
// values are meant to be used as constants.
var (
VALID_GET_OPTIONS = validOptions{
"recursive": reflect.Bool,
"quorum": reflect.Bool,
"sorted": reflect.Bool,
"wait": reflect.Bool,
"waitIndex": reflect.Uint64,
}
VALID_PUT_OPTIONS = validOptions{
"prevValue": reflect.String,
"prevIndex": reflect.Uint64,
"prevExist": reflect.Bool,
"dir": reflect.Bool,
}
VALID_POST_OPTIONS = validOptions{}
VALID_DELETE_OPTIONS = validOptions{
"recursive": reflect.Bool,
"dir": reflect.Bool,
"prevValue": reflect.String,
"prevIndex": reflect.Uint64,
}
)
// Convert options to a string of HTML parameters
func (ops Options) toParameters(validOps validOptions) (string, error) {
p := "?"
values := url.Values{}
if ops == nil {
return "", nil
}
for k, v := range ops {
// Check if the given option is valid (that it exists)
kind := validOps[k]
if kind == reflect.Invalid {
return "", fmt.Errorf("Invalid option: %v", k)
}
// Check if the given option is of the valid type
t := reflect.TypeOf(v)
if kind != t.Kind() {
return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
k, kind, t.Kind())
}
values.Set(k, fmt.Sprintf("%v", v))
}
p += values.Encode()
return p, nil
}

View File

@ -0,0 +1,403 @@
package etcd
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strings"
"sync"
"time"
)
// Errors introduced by handling requests
var (
ErrRequestCancelled = errors.New("sending request is cancelled")
)
type RawRequest struct {
Method string
RelativePath string
Values url.Values
Cancel <-chan bool
}
// NewRawRequest returns a new RawRequest
func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
return &RawRequest{
Method: method,
RelativePath: relativePath,
Values: values,
Cancel: cancel,
}
}
// getCancelable issues a cancelable GET request
func (c *Client) getCancelable(key string, options Options,
cancel <-chan bool) (*RawResponse, error) {
logger.Debugf("get %s [%s]", key, c.cluster.pick())
p := keyToPath(key)
str, err := options.toParameters(VALID_GET_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("GET", p, nil, cancel)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// get issues a GET request
func (c *Client) get(key string, options Options) (*RawResponse, error) {
return c.getCancelable(key, options, nil)
}
// put issues a PUT request
func (c *Client) put(key string, value string, ttl uint64,
options Options) (*RawResponse, error) {
logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
p := keyToPath(key)
str, err := options.toParameters(VALID_PUT_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// post issues a POST request
func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
p := keyToPath(key)
req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// delete issues a DELETE request
func (c *Client) delete(key string, options Options) (*RawResponse, error) {
logger.Debugf("delete %s [%s]", key, c.cluster.pick())
p := keyToPath(key)
str, err := options.toParameters(VALID_DELETE_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("DELETE", p, nil, nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// SendRequest sends a HTTP request and returns a Response as defined by etcd
func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
var req *http.Request
var resp *http.Response
var httpPath string
var err error
var respBody []byte
var numReqs = 1
checkRetry := c.CheckRetry
if checkRetry == nil {
checkRetry = DefaultCheckRetry
}
cancelled := make(chan bool, 1)
reqLock := new(sync.Mutex)
if rr.Cancel != nil {
cancelRoutine := make(chan bool)
defer close(cancelRoutine)
go func() {
select {
case <-rr.Cancel:
cancelled <- true
logger.Debug("send.request is cancelled")
case <-cancelRoutine:
return
}
// Repeat canceling request until this thread is stopped
// because we have no idea about whether it succeeds.
for {
reqLock.Lock()
c.httpClient.Transport.(*http.Transport).CancelRequest(req)
reqLock.Unlock()
select {
case <-time.After(100 * time.Millisecond):
case <-cancelRoutine:
return
}
}
}()
}
// If we connect to a follower and consistency is required, retry until
// we connect to a leader
sleep := 25 * time.Millisecond
maxSleep := time.Second
for attempt := 0; ; attempt++ {
if attempt > 0 {
select {
case <-cancelled:
return nil, ErrRequestCancelled
case <-time.After(sleep):
sleep = sleep * 2
if sleep > maxSleep {
sleep = maxSleep
}
}
}
logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath)
// get httpPath if not set
if httpPath == "" {
httpPath = c.getHttpPath(rr.RelativePath)
}
// Return a cURL command if curlChan is set
if c.cURLch != nil {
command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
for key, value := range rr.Values {
command += fmt.Sprintf(" -d %s=%s", key, value[0])
}
if c.credentials != nil {
command += fmt.Sprintf(" -u %s", c.credentials.username)
}
c.sendCURL(command)
}
logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
req, err := func() (*http.Request, error) {
reqLock.Lock()
defer reqLock.Unlock()
if rr.Values == nil {
if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
return nil, err
}
} else {
body := strings.NewReader(rr.Values.Encode())
if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
return nil, err
}
req.Header.Set("Content-Type",
"application/x-www-form-urlencoded; param=value")
}
return req, nil
}()
if err != nil {
return nil, err
}
if c.credentials != nil {
req.SetBasicAuth(c.credentials.username, c.credentials.password)
}
resp, err = c.httpClient.Do(req)
// clear previous httpPath
httpPath = ""
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
// If the request was cancelled, return ErrRequestCancelled directly
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
numReqs++
// network error, change a machine!
if err != nil {
logger.Debug("network error: ", err.Error())
lastResp := http.Response{}
if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
return nil, checkErr
}
c.cluster.failure()
continue
}
// if there is no error, it should receive response
logger.Debug("recv.response.from ", httpPath)
if validHttpStatusCode[resp.StatusCode] {
// try to read byte code and break the loop
respBody, err = ioutil.ReadAll(resp.Body)
if err == nil {
logger.Debug("recv.success ", httpPath)
break
}
// ReadAll error may be caused due to cancel request
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
if err == io.ErrUnexpectedEOF {
// underlying connection was closed prematurely, probably by timeout
// TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
// this allows the client to detect that and take evasive action. Need
// to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
respBody = []byte{}
break
}
}
if resp.StatusCode == http.StatusTemporaryRedirect {
u, err := resp.Location()
if err != nil {
logger.Warning(err)
} else {
// set httpPath for following redirection
httpPath = u.String()
}
resp.Body.Close()
continue
}
if checkErr := checkRetry(c.cluster, numReqs, *resp,
errors.New("Unexpected HTTP status code")); checkErr != nil {
return nil, checkErr
}
resp.Body.Close()
}
r := &RawResponse{
StatusCode: resp.StatusCode,
Body: respBody,
Header: resp.Header,
}
return r, nil
}
// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
// If we have retried 2 * machine number, stop retrying.
// If status code is InternalServerError, sleep for 200ms.
func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
err error) error {
if numReqs > 2*len(cluster.Machines) {
errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err)
return newError(ErrCodeEtcdNotReachable, errStr, 0)
}
if isEmptyResponse(lastResp) {
// always retry if it failed to get response from one machine
return nil
}
if !shouldRetry(lastResp) {
body := []byte("nil")
if lastResp.Body != nil {
if b, err := ioutil.ReadAll(lastResp.Body); err == nil {
body = b
}
}
errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body)
return newError(ErrCodeUnhandledHTTPStatus, errStr, 0)
}
// sleep some time and expect leader election finish
time.Sleep(time.Millisecond * 200)
logger.Warning("bad response status code", lastResp.StatusCode)
return nil
}
func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 }
// shouldRetry returns whether the reponse deserves retry.
func shouldRetry(r http.Response) bool {
// TODO: only retry when the cluster is in leader election
// We cannot do it exactly because etcd doesn't support it well.
return r.StatusCode == http.StatusInternalServerError
}
func (c *Client) getHttpPath(s ...string) string {
fullPath := c.cluster.pick() + "/" + version
for _, seg := range s {
fullPath = fullPath + "/" + seg
}
return fullPath
}
// buildValues builds a url.Values map according to the given value and ttl
func buildValues(value string, ttl uint64) url.Values {
v := url.Values{}
if value != "" {
v.Set("value", value)
}
if ttl > 0 {
v.Set("ttl", fmt.Sprintf("%v", ttl))
}
return v
}
// convert key string to http path exclude version, including URL escaping
// for example: key[foo] -> path[keys/foo]
// key[/%z] -> path[keys/%25z]
// key[/] -> path[keys/]
func keyToPath(key string) string {
// URL-escape our key, except for slashes
p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1)
// corner case: if key is "/" or "//" ect
// path join will clear the tailing "/"
// we need to add it back
if p == "keys" {
p = "keys/"
}
return p
}

View File

@ -0,0 +1,22 @@
package etcd
import "testing"
func TestKeyToPath(t *testing.T) {
tests := []struct {
key string
wpath string
}{
{"", "keys/"},
{"foo", "keys/foo"},
{"foo/bar", "keys/foo/bar"},
{"%z", "keys/%25z"},
{"/", "keys/"},
}
for i, tt := range tests {
path := keyToPath(tt.key)
if path != tt.wpath {
t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath)
}
}
}

View File

@ -0,0 +1,89 @@
package etcd
import (
"encoding/json"
"net/http"
"strconv"
"time"
)
const (
rawResponse = iota
normalResponse
)
type responseType int
type RawResponse struct {
StatusCode int
Body []byte
Header http.Header
}
var (
validHttpStatusCode = map[int]bool{
http.StatusCreated: true,
http.StatusOK: true,
http.StatusBadRequest: true,
http.StatusNotFound: true,
http.StatusPreconditionFailed: true,
http.StatusForbidden: true,
}
)
// Unmarshal parses RawResponse and stores the result in Response
func (rr *RawResponse) Unmarshal() (*Response, error) {
if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
return nil, handleError(rr.Body)
}
resp := new(Response)
err := json.Unmarshal(rr.Body, resp)
if err != nil {
return nil, err
}
// attach index and term to response
resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
return resp, nil
}
type Response struct {
Action string `json:"action"`
Node *Node `json:"node"`
PrevNode *Node `json:"prevNode,omitempty"`
EtcdIndex uint64 `json:"etcdIndex"`
RaftIndex uint64 `json:"raftIndex"`
RaftTerm uint64 `json:"raftTerm"`
}
type Node struct {
Key string `json:"key, omitempty"`
Value string `json:"value,omitempty"`
Dir bool `json:"dir,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
TTL int64 `json:"ttl,omitempty"`
Nodes Nodes `json:"nodes,omitempty"`
ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
CreatedIndex uint64 `json:"createdIndex,omitempty"`
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int {
return len(ns)
}
func (ns Nodes) Less(i, j int) bool {
return ns[i].Key < ns[j].Key
}
func (ns Nodes) Swap(i, j int) {
ns[i], ns[j] = ns[j], ns[i]
}

View File

@ -0,0 +1,42 @@
package etcd
import (
"fmt"
"testing"
)
func TestSetCurlChan(t *testing.T) {
c := NewClient(nil)
c.OpenCURL()
defer func() {
c.Delete("foo", true)
}()
_, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
c.cluster.pick())
actual := c.RecvCURL()
if expected != actual {
t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
actual, expected)
}
c.SetConsistency(STRONG_CONSISTENCY)
_, err = c.Get("foo", false, false)
if err != nil {
t.Fatal(err)
}
expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?quorum=true&recursive=false&sorted=false",
c.cluster.pick())
actual = c.RecvCURL()
if expected != actual {
t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
actual, expected)
}
}

View File

@ -0,0 +1,137 @@
package etcd
// Set sets the given key to the given value.
// It will create a new key value pair or replace the old one.
// It will not replace a existing directory.
func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawSet(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// SetDir sets the given key to a directory.
// It will create a new directory or replace the old key value pair by a directory.
// It will not replace a existing directory.
func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawSetDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// CreateDir creates a directory. It succeeds only if
// the given key does not yet exist.
func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawCreateDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// UpdateDir updates the given directory. It succeeds only if the
// given key already exists.
func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawUpdateDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Create creates a file with the given value under the given key. It succeeds
// only if the given key does not yet exist.
func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawCreate(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// CreateInOrder creates a file with a key that's guaranteed to be higher than other
// keys in the given directory. It is useful for creating queues.
func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawCreateInOrder(dir, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Update updates the given key to the given value. It succeeds only if the
// given key already exists.
func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawUpdate(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": true,
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": false,
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
return c.put(key, value, ttl, nil)
}
func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": true,
}
return c.put(key, value, ttl, ops)
}
func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": false,
}
return c.put(key, value, ttl, ops)
}
func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
return c.post(dir, value, ttl)
}

View File

@ -0,0 +1,241 @@
package etcd
import (
"testing"
)
func TestSet(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
resp, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
t.Fatalf("Set 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("Set 1 PrevNode failed: %#v", resp)
}
resp, err = c.Set("foo", "bar2", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
t.Fatalf("Set 2 failed: %#v", resp)
}
if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
t.Fatalf("Set 2 PrevNode failed: %#v", resp)
}
}
func TestUpdate(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("nonexistent", true)
}()
resp, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed.
resp, err = c.Update("foo", "wakawaka", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("Update 1 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
t.Fatalf("Update 1 prevValue failed: %#v", resp)
}
// This should fail because the key does not exist.
resp, err = c.Update("nonexistent", "whatever", 5)
if err == nil {
t.Fatalf("The key %v did not exist, so the update should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreate(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("newKey", true)
}()
newKey := "/newKey"
newValue := "/newValue"
// This should succeed
resp, err := c.Create(newKey, newValue, 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Key == newKey &&
resp.Node.Value == newValue && resp.Node.TTL == 5) {
t.Fatalf("Create 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("Create 1 PrevNode failed: %#v", resp)
}
// This should fail, because the key is already there
resp, err = c.Create(newKey, newValue, 5)
if err == nil {
t.Fatalf("The key %v did exist, so the creation should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreateInOrder(t *testing.T) {
c := NewClient(nil)
dir := "/queue"
defer func() {
c.DeleteDir(dir)
}()
var firstKey, secondKey string
resp, err := c.CreateInOrder(dir, "1", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
t.Fatalf("Create 1 failed: %#v", resp)
}
firstKey = resp.Node.Key
resp, err = c.CreateInOrder(dir, "2", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
t.Fatalf("Create 2 failed: %#v", resp)
}
secondKey = resp.Node.Key
if firstKey >= secondKey {
t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
firstKey, secondKey)
}
}
func TestSetDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("fooDir", true)
}()
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("SetDir 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
}
// This should fail because /fooDir already points to a directory
resp, err = c.CreateDir("/fooDir", 5)
if err == nil {
t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
"The response was: %#v", resp)
}
_, err = c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed
// It should replace the key
resp, err = c.SetDir("foo", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("SetDir 2 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
t.Fatalf("SetDir 2 failed: %#v", resp)
}
}
func TestUpdateDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed.
resp, err = c.UpdateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("UpdateDir 1 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
}
// This should fail because the key does not exist.
resp, err = c.UpdateDir("nonexistentDir", 5)
if err == nil {
t.Fatalf("The key %v did not exist, so the update should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreateDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
// This should succeed
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("CreateDir 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
}
// This should fail, because the key is already there
resp, err = c.CreateDir("fooDir", 5)
if err == nil {
t.Fatalf("The key %v did exist, so the creation should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}

View File

@ -0,0 +1,6 @@
package etcd
const (
version = "v2"
packageVersion = "v2.0.0+git"
)

View File

@ -0,0 +1,103 @@
package etcd
import (
"errors"
)
// Errors introduced by the Watch command.
var (
ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
)
// If recursive is set to true the watch returns the first change under the given
// prefix since the given index.
//
// If recursive is set to false the watch returns the first change to the given key
// since the given index.
//
// To watch for the latest change, set waitIndex = 0.
//
// If a receiver channel is given, it will be a long-term watch. Watch will block at the
//channel. After someone receives the channel, it will go on to watch that
// prefix. If a stop channel is given, the client can close long-term watch using
// the stop channel.
func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
receiver chan *Response, stop chan bool) (*Response, error) {
logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
if receiver == nil {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
defer close(receiver)
for {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
resp, err := raw.Unmarshal()
if err != nil {
return nil, err
}
waitIndex = resp.Node.ModifiedIndex + 1
receiver <- resp
}
}
func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
if receiver == nil {
return c.watchOnce(prefix, waitIndex, recursive, stop)
}
for {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
resp, err := raw.Unmarshal()
if err != nil {
return nil, err
}
waitIndex = resp.Node.ModifiedIndex + 1
receiver <- raw
}
}
// helper func
// return when there is change under the given prefix
func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
options := Options{
"wait": true,
}
if waitIndex > 0 {
options["waitIndex"] = waitIndex
}
if recursive {
options["recursive"] = true
}
resp, err := c.getCancelable(key, options, stop)
if err == ErrRequestCancelled {
return nil, ErrWatchStoppedByUser
}
return resp, err
}

View File

@ -0,0 +1,119 @@
package etcd
import (
"fmt"
"runtime"
"testing"
"time"
)
func TestWatch(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("watch_foo", true)
}()
go setHelper("watch_foo", "bar", c)
resp, err := c.Watch("watch_foo", 0, false, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
t.Fatalf("Watch 1 failed: %#v", resp)
}
go setHelper("watch_foo", "bar", c)
resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
t.Fatalf("Watch 2 failed: %#v", resp)
}
routineNum := runtime.NumGoroutine()
ch := make(chan *Response, 10)
stop := make(chan bool, 1)
go setLoop("watch_foo", "bar", c)
go receiver(ch, stop)
_, err = c.Watch("watch_foo", 0, false, ch, stop)
if err != ErrWatchStoppedByUser {
t.Fatalf("Watch returned a non-user stop error")
}
if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
}
}
func TestWatchAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("watch_foo", true)
}()
go setHelper("watch_foo/foo", "bar", c)
resp, err := c.Watch("watch_foo", 0, true, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
t.Fatalf("WatchAll 1 failed: %#v", resp)
}
go setHelper("watch_foo/foo", "bar", c)
resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
t.Fatalf("WatchAll 2 failed: %#v", resp)
}
ch := make(chan *Response, 10)
stop := make(chan bool, 1)
routineNum := runtime.NumGoroutine()
go setLoop("watch_foo/foo", "bar", c)
go receiver(ch, stop)
_, err = c.Watch("watch_foo", 0, true, ch, stop)
if err != ErrWatchStoppedByUser {
t.Fatalf("Watch returned a non-user stop error")
}
if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
}
}
func setHelper(key, value string, c *Client) {
time.Sleep(time.Second)
c.Set(key, value, 100)
}
func setLoop(key, value string, c *Client) {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
newValue := fmt.Sprintf("%s_%v", value, i)
c.Set(key, newValue, 100)
time.Sleep(time.Second / 10)
}
}
func receiver(c chan *Response, stop chan bool) {
for i := 0; i < 10; i++ {
<-c
}
stop <- true
}

View File

@ -0,0 +1,4 @@
[568].out
_go*
_test*
_obj

21
Godeps/_workspace/src/github.com/kr/pretty/License generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

9
Godeps/_workspace/src/github.com/kr/pretty/Readme generated vendored Normal file
View File

@ -0,0 +1,9 @@
package pretty
import "github.com/kr/pretty"
Package pretty provides pretty-printing for Go values.
Documentation
http://godoc.org/github.com/kr/pretty

158
Godeps/_workspace/src/github.com/kr/pretty/diff.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
package pretty
import (
"fmt"
"io"
"reflect"
)
type sbuf []string
func (s *sbuf) Write(b []byte) (int, error) {
*s = append(*s, string(b))
return len(b), nil
}
// Diff returns a slice where each element describes
// a difference between a and b.
func Diff(a, b interface{}) (desc []string) {
Fdiff((*sbuf)(&desc), a, b)
return desc
}
// Fdiff writes to w a description of the differences between a and b.
func Fdiff(w io.Writer, a, b interface{}) {
diffWriter{w: w}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
}
type diffWriter struct {
w io.Writer
l string // label
}
func (w diffWriter) printf(f string, a ...interface{}) {
var l string
if w.l != "" {
l = w.l + ": "
}
fmt.Fprintf(w.w, l+f, a...)
}
func (w diffWriter) diff(av, bv reflect.Value) {
if !av.IsValid() && bv.IsValid() {
w.printf("nil != %#v", bv.Interface())
return
}
if av.IsValid() && !bv.IsValid() {
w.printf("%#v != nil", av.Interface())
return
}
if !av.IsValid() && !bv.IsValid() {
return
}
at := av.Type()
bt := bv.Type()
if at != bt {
w.printf("%v != %v", at, bt)
return
}
// numeric types, including bool
if at.Kind() < reflect.Array {
a, b := av.Interface(), bv.Interface()
if a != b {
w.printf("%#v != %#v", a, b)
}
return
}
switch at.Kind() {
case reflect.String:
a, b := av.Interface(), bv.Interface()
if a != b {
w.printf("%q != %q", a, b)
}
case reflect.Ptr:
switch {
case av.IsNil() && !bv.IsNil():
w.printf("nil != %v", bv.Interface())
case !av.IsNil() && bv.IsNil():
w.printf("%v != nil", av.Interface())
case !av.IsNil() && !bv.IsNil():
w.diff(av.Elem(), bv.Elem())
}
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
}
case reflect.Slice:
lenA := av.Len()
lenB := bv.Len()
if lenA != lenB {
w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB)
break
}
for i := 0; i < lenA; i++ {
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
}
case reflect.Map:
ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
for _, k := range ak {
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
w.printf("%q != (missing)", av.MapIndex(k))
}
for _, k := range both {
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
w.diff(av.MapIndex(k), bv.MapIndex(k))
}
for _, k := range bk {
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
w.printf("(missing) != %q", bv.MapIndex(k))
}
case reflect.Interface:
w.diff(reflect.ValueOf(av.Interface()), reflect.ValueOf(bv.Interface()))
default:
if !reflect.DeepEqual(av.Interface(), bv.Interface()) {
w.printf("%# v != %# v", Formatter(av.Interface()), Formatter(bv.Interface()))
}
}
}
func (d diffWriter) relabel(name string) (d1 diffWriter) {
d1 = d
if d.l != "" && name[0] != '[' {
d1.l += "."
}
d1.l += name
return d1
}
func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
for _, av := range a {
inBoth := false
for _, bv := range b {
if reflect.DeepEqual(av.Interface(), bv.Interface()) {
inBoth = true
both = append(both, av)
break
}
}
if !inBoth {
ak = append(ak, av)
}
}
for _, bv := range b {
inBoth := false
for _, av := range a {
if reflect.DeepEqual(av.Interface(), bv.Interface()) {
inBoth = true
break
}
}
if !inBoth {
bk = append(bk, bv)
}
}
return
}

View File

@ -0,0 +1,74 @@
package pretty
import (
"testing"
)
type difftest struct {
a interface{}
b interface{}
exp []string
}
type S struct {
A int
S *S
I interface{}
C []int
}
var diffs = []difftest{
{a: nil, b: nil},
{a: S{A: 1}, b: S{A: 1}},
{0, "", []string{`int != string`}},
{0, 1, []string{`0 != 1`}},
{S{}, new(S), []string{`pretty.S != *pretty.S`}},
{"a", "b", []string{`"a" != "b"`}},
{S{}, S{A: 1}, []string{`A: 0 != 1`}},
{new(S), &S{A: 1}, []string{`A: 0 != 1`}},
{S{S: new(S)}, S{S: &S{A: 1}}, []string{`S.A: 0 != 1`}},
{S{}, S{I: 0}, []string{`I: nil != 0`}},
{S{I: 1}, S{I: "x"}, []string{`I: int != string`}},
{S{}, S{C: []int{1}}, []string{`C: []int[0] != []int[1]`}},
{S{C: []int{}}, S{C: []int{1}}, []string{`C: []int[0] != []int[1]`}},
{S{C: []int{1, 2, 3}}, S{C: []int{1, 2, 4}}, []string{`C[2]: 3 != 4`}},
{S{}, S{A: 1, S: new(S)}, []string{`A: 0 != 1`, `S: nil != &{0 <nil> <nil> []}`}},
}
func TestDiff(t *testing.T) {
for _, tt := range diffs {
got := Diff(tt.a, tt.b)
eq := len(got) == len(tt.exp)
if eq {
for i := range got {
eq = eq && got[i] == tt.exp[i]
}
}
if !eq {
t.Errorf("diffing % #v", tt.a)
t.Errorf("with % #v", tt.b)
diffdiff(t, got, tt.exp)
continue
}
}
}
func diffdiff(t *testing.T, got, exp []string) {
minus(t, "unexpected:", got, exp)
minus(t, "missing:", exp, got)
}
func minus(t *testing.T, s string, a, b []string) {
var i, j int
for i = 0; i < len(a); i++ {
for j = 0; j < len(b); j++ {
if a[i] == b[j] {
break
}
}
if j == len(b) {
t.Error(s, a[i])
}
}
}

View File

@ -0,0 +1,20 @@
package pretty_test
import (
"fmt"
"github.com/khlieng/name_pending/Godeps/_workspace/src/github.com/kr/pretty"
)
func Example() {
type myType struct {
a, b int
}
var x = []myType{{1, 2}, {3, 4}, {5, 6}}
fmt.Printf("%# v", pretty.Formatter(x))
// output:
// []pretty_test.myType{
// {a:1, b:2},
// {a:3, b:4},
// {a:5, b:6},
// }
}

337
Godeps/_workspace/src/github.com/kr/pretty/formatter.go generated vendored Normal file
View File

@ -0,0 +1,337 @@
package pretty
import (
"fmt"
"io"
"reflect"
"strconv"
"text/tabwriter"
"github.com/khlieng/name_pending/Godeps/_workspace/src/github.com/kr/text"
)
const (
limit = 50
)
type formatter struct {
x interface{}
force bool
quote bool
}
// Formatter makes a wrapper, f, that will format x as go source with line
// breaks and tabs. Object f responds to the "%v" formatting verb when both the
// "#" and " " (space) flags are set, for example:
//
// fmt.Sprintf("%# v", Formatter(x))
//
// If one of these two flags is not set, or any other verb is used, f will
// format x according to the usual rules of package fmt.
// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
func Formatter(x interface{}) (f fmt.Formatter) {
return formatter{x: x, quote: true}
}
func (fo formatter) String() string {
return fmt.Sprint(fo.x) // unwrap it
}
func (fo formatter) passThrough(f fmt.State, c rune) {
s := "%"
for i := 0; i < 128; i++ {
if f.Flag(i) {
s += string(i)
}
}
if w, ok := f.Width(); ok {
s += fmt.Sprintf("%d", w)
}
if p, ok := f.Precision(); ok {
s += fmt.Sprintf(".%d", p)
}
s += string(c)
fmt.Fprintf(f, s, fo.x)
}
func (fo formatter) Format(f fmt.State, c rune) {
if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
p.printValue(reflect.ValueOf(fo.x), true, fo.quote)
w.Flush()
return
}
fo.passThrough(f, c)
}
type printer struct {
io.Writer
tw *tabwriter.Writer
visited map[visit]int
depth int
}
func (p *printer) indent() *printer {
q := *p
q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
return &q
}
func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
if showType {
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, "(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
}
// printValue must keep track of already-printed pointer values to avoid
// infinite recursion.
type visit struct {
v uintptr
typ reflect.Type
}
func (p *printer) printValue(v reflect.Value, showType, quote bool) {
if p.depth > 10 {
io.WriteString(p, "!%v(DEPTH EXCEEDED)")
return
}
switch v.Kind() {
case reflect.Bool:
p.printInline(v, v.Bool(), showType)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.printInline(v, v.Int(), showType)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p.printInline(v, v.Uint(), showType)
case reflect.Float32, reflect.Float64:
p.printInline(v, v.Float(), showType)
case reflect.Complex64, reflect.Complex128:
fmt.Fprintf(p, "%#v", v.Complex())
case reflect.String:
p.fmtString(v.String(), quote)
case reflect.Map:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
keys := v.MapKeys()
for i := 0; i < v.Len(); i++ {
showTypeInStruct := true
k := keys[i]
mv := v.MapIndex(k)
pp.printValue(k, false, true)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = t.Elem().Kind() == reflect.Interface
pp.printValue(mv, showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Struct:
t := v.Type()
if v.CanAddr() {
addr := v.UnsafeAddr()
vis := visit{addr, t}
if vd, ok := p.visited[vis]; ok && vd < p.depth {
p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
break // don't print v again
}
p.visited[vis] = p.depth
}
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.NumField(); i++ {
showTypeInStruct := true
if f := t.Field(i); f.Name != "" {
io.WriteString(pp, f.Name)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = labelType(f.Type)
}
pp.printValue(getField(v, i), showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.NumField()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Interface:
switch e := v.Elem(); {
case e.Kind() == reflect.Invalid:
io.WriteString(p, "nil")
case e.IsValid():
pp := *p
pp.depth++
pp.printValue(e, showType, true)
default:
io.WriteString(p, v.Type().String())
io.WriteString(p, "(nil)")
}
case reflect.Array, reflect.Slice:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
if v.Kind() == reflect.Slice && v.IsNil() && showType {
io.WriteString(p, "(nil)")
break
}
if v.Kind() == reflect.Slice && v.IsNil() {
io.WriteString(p, "nil")
break
}
writeByte(p, '{')
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.Len(); i++ {
showTypeInSlice := t.Elem().Kind() == reflect.Interface
pp.printValue(v.Index(i), showTypeInSlice, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
writeByte(p, '}')
case reflect.Ptr:
e := v.Elem()
if !e.IsValid() {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
io.WriteString(p, ")(nil)")
} else {
pp := *p
pp.depth++
writeByte(pp, '&')
pp.printValue(e, true, true)
}
case reflect.Chan:
x := v.Pointer()
if showType {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, ")(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
case reflect.Func:
io.WriteString(p, v.Type().String())
io.WriteString(p, " {...}")
case reflect.UnsafePointer:
p.printInline(v, v.Pointer(), showType)
case reflect.Invalid:
io.WriteString(p, "nil")
}
}
func canInline(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map:
return !canExpand(t.Elem())
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
if canExpand(t.Field(i).Type) {
return false
}
}
return true
case reflect.Interface:
return false
case reflect.Array, reflect.Slice:
return !canExpand(t.Elem())
case reflect.Ptr:
return false
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
return false
}
return true
}
func canExpand(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map, reflect.Struct,
reflect.Interface, reflect.Array, reflect.Slice,
reflect.Ptr:
return true
}
return false
}
func labelType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Interface, reflect.Struct:
return true
}
return false
}
func (p *printer) fmtString(s string, quote bool) {
if quote {
s = strconv.Quote(s)
}
io.WriteString(p, s)
}
func tryDeepEqual(a, b interface{}) bool {
defer func() { recover() }()
return reflect.DeepEqual(a, b)
}
func writeByte(w io.Writer, b byte) {
w.Write([]byte{b})
}
func getField(v reflect.Value, i int) reflect.Value {
val := v.Field(i)
if val.Kind() == reflect.Interface && !val.IsNil() {
val = val.Elem()
}
return val
}

View File

@ -0,0 +1,261 @@
package pretty
import (
"fmt"
"io"
"strings"
"testing"
"unsafe"
)
type test struct {
v interface{}
s string
}
type LongStructTypeName struct {
longFieldName interface{}
otherLongFieldName interface{}
}
type SA struct {
t *T
v T
}
type T struct {
x, y int
}
type F int
func (f F) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "F(%d)", int(f))
}
var long = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var gosyntax = []test{
{nil, `nil`},
{"", `""`},
{"a", `"a"`},
{1, "int(1)"},
{1.0, "float64(1)"},
{[]int(nil), "[]int(nil)"},
{[0]int{}, "[0]int{}"},
{complex(1, 0), "(1+0i)"},
//{make(chan int), "(chan int)(0x1234)"},
{unsafe.Pointer(uintptr(unsafe.Pointer(&long))), fmt.Sprintf("unsafe.Pointer(0x%02x)", uintptr(unsafe.Pointer(&long)))},
{func(int) {}, "func(int) {...}"},
{map[int]int{1: 1}, "map[int]int{1:1}"},
{int32(1), "int32(1)"},
{io.EOF, `&errors.errorString{s:"EOF"}`},
{[]string{"a"}, `[]string{"a"}`},
{
[]string{long},
`[]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"}`,
},
{F(5), "pretty.F(5)"},
{
SA{&T{1, 2}, T{3, 4}},
`pretty.SA{
t: &pretty.T{x:1, y:2},
v: pretty.T{x:3, y:4},
}`,
},
{
map[int][]byte{1: {}},
`map[int][]uint8{
1: {},
}`,
},
{
map[int]T{1: {}},
`map[int]pretty.T{
1: {},
}`,
},
{
long,
`"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"`,
},
{
LongStructTypeName{
longFieldName: LongStructTypeName{},
otherLongFieldName: long,
},
`pretty.LongStructTypeName{
longFieldName: pretty.LongStructTypeName{},
otherLongFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
}`,
},
{
&LongStructTypeName{
longFieldName: &LongStructTypeName{},
otherLongFieldName: (*LongStructTypeName)(nil),
},
`&pretty.LongStructTypeName{
longFieldName: &pretty.LongStructTypeName{},
otherLongFieldName: (*pretty.LongStructTypeName)(nil),
}`,
},
{
[]LongStructTypeName{
{nil, nil},
{3, 3},
{long, nil},
},
`[]pretty.LongStructTypeName{
{},
{
longFieldName: int(3),
otherLongFieldName: int(3),
},
{
longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
otherLongFieldName: nil,
},
}`,
},
{
[]interface{}{
LongStructTypeName{nil, nil},
[]byte{1, 2, 3},
T{3, 4},
LongStructTypeName{long, nil},
},
`[]interface {}{
pretty.LongStructTypeName{},
[]uint8{0x1, 0x2, 0x3},
pretty.T{x:3, y:4},
pretty.LongStructTypeName{
longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
otherLongFieldName: nil,
},
}`,
},
}
func TestGoSyntax(t *testing.T) {
for _, tt := range gosyntax {
s := fmt.Sprintf("%# v", Formatter(tt.v))
if tt.s != s {
t.Errorf("expected %q", tt.s)
t.Errorf("got %q", s)
t.Errorf("expraw\n%s", tt.s)
t.Errorf("gotraw\n%s", s)
}
}
}
type I struct {
i int
R interface{}
}
func (i *I) I() *I { return i.R.(*I) }
func TestCycle(t *testing.T) {
type A struct{ *A }
v := &A{}
v.A = v
// panics from stack overflow without cycle detection
t.Logf("Example cycle:\n%# v", Formatter(v))
p := &A{}
s := fmt.Sprintf("%# v", Formatter([]*A{p, p}))
if strings.Contains(s, "CYCLIC") {
t.Errorf("Repeated address detected as cyclic reference:\n%s", s)
}
type R struct {
i int
*R
}
r := &R{
i: 1,
R: &R{
i: 2,
R: &R{
i: 3,
},
},
}
r.R.R.R = r
t.Logf("Example longer cycle:\n%# v", Formatter(r))
r = &R{
i: 1,
R: &R{
i: 2,
R: &R{
i: 3,
R: &R{
i: 4,
R: &R{
i: 5,
R: &R{
i: 6,
R: &R{
i: 7,
R: &R{
i: 8,
R: &R{
i: 9,
R: &R{
i: 10,
R: &R{
i: 11,
},
},
},
},
},
},
},
},
},
},
}
// here be pirates
r.R.R.R.R.R.R.R.R.R.R.R = r
t.Logf("Example very long cycle:\n%# v", Formatter(r))
i := &I{
i: 1,
R: &I{
i: 2,
R: &I{
i: 3,
R: &I{
i: 4,
R: &I{
i: 5,
R: &I{
i: 6,
R: &I{
i: 7,
R: &I{
i: 8,
R: &I{
i: 9,
R: &I{
i: 10,
R: &I{
i: 11,
},
},
},
},
},
},
},
},
},
},
}
iv := i.I().I().I().I().I().I().I().I().I().I()
*iv = *i
t.Logf("Example long interface cycle:\n%# v", Formatter(i))
}

98
Godeps/_workspace/src/github.com/kr/pretty/pretty.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Package pretty provides pretty-printing for Go values. This is
// useful during debugging, to avoid wrapping long output lines in
// the terminal.
//
// It provides a function, Formatter, that can be used with any
// function that accepts a format string. It also provides
// convenience wrappers for functions in packages fmt and log.
package pretty
import (
"fmt"
"io"
"log"
)
// Errorf is a convenience wrapper for fmt.Errorf.
//
// Calling Errorf(f, x, y) is equivalent to
// fmt.Errorf(f, Formatter(x), Formatter(y)).
func Errorf(format string, a ...interface{}) error {
return fmt.Errorf(format, wrap(a, false)...)
}
// Fprintf is a convenience wrapper for fmt.Fprintf.
//
// Calling Fprintf(w, f, x, y) is equivalent to
// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
return fmt.Fprintf(w, format, wrap(a, false)...)
}
// Log is a convenience wrapper for log.Printf.
//
// Calling Log(x, y) is equivalent to
// log.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Log(a ...interface{}) {
log.Print(wrap(a, true)...)
}
// Logf is a convenience wrapper for log.Printf.
//
// Calling Logf(f, x, y) is equivalent to
// log.Printf(f, Formatter(x), Formatter(y)).
func Logf(format string, a ...interface{}) {
log.Printf(format, wrap(a, false)...)
}
// Logln is a convenience wrapper for log.Printf.
//
// Calling Logln(x, y) is equivalent to
// log.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Logln(a ...interface{}) {
log.Println(wrap(a, true)...)
}
// Print pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Print(a ...interface{}) (n int, errno error) {
return fmt.Print(wrap(a, true)...)
}
// Printf is a convenience wrapper for fmt.Printf.
//
// Calling Printf(f, x, y) is equivalent to
// fmt.Printf(f, Formatter(x), Formatter(y)).
func Printf(format string, a ...interface{}) (n int, errno error) {
return fmt.Printf(format, wrap(a, false)...)
}
// Println pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Println(a ...interface{}) (n int, errno error) {
return fmt.Println(wrap(a, true)...)
}
// Sprintf is a convenience wrapper for fmt.Sprintf.
//
// Calling Sprintf(f, x, y) is equivalent to
// fmt.Sprintf(f, Formatter(x), Formatter(y)).
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, wrap(a, false)...)
}
func wrap(a []interface{}, force bool) []interface{} {
w := make([]interface{}, len(a))
for i, x := range a {
w[i] = formatter{x: x, force: force}
}
return w
}

41
Godeps/_workspace/src/github.com/kr/pretty/zero.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package pretty
import (
"reflect"
)
func nonzero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() != 0
case reflect.Float32, reflect.Float64:
return v.Float() != 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() != complex(0, 0)
case reflect.String:
return v.String() != ""
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if nonzero(getField(v, i)) {
return true
}
}
return false
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if nonzero(v.Index(i)) {
return true
}
}
return false
case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
return !v.IsNil()
case reflect.UnsafePointer:
return v.Pointer() != 0
}
return true
}

19
Godeps/_workspace/src/github.com/kr/text/License generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

3
Godeps/_workspace/src/github.com/kr/text/Readme generated vendored Normal file
View File

@ -0,0 +1,3 @@
This is a Go package for manipulating paragraphs of text.
See http://go.pkgdoc.org/github.com/kr/text for full documentation.

View File

@ -0,0 +1,5 @@
Package colwriter provides a write filter that formats
input lines in multiple columns.
The package is a straightforward translation from
/src/cmd/draw/mc.c in Plan 9 from User Space.

View File

@ -0,0 +1,147 @@
// Package colwriter provides a write filter that formats
// input lines in multiple columns.
//
// The package is a straightforward translation from
// /src/cmd/draw/mc.c in Plan 9 from User Space.
package colwriter
import (
"bytes"
"io"
"unicode/utf8"
)
const (
tab = 4
)
const (
// Print each input line ending in a colon ':' separately.
BreakOnColon uint = 1 << iota
)
// A Writer is a filter that arranges input lines in as many columns as will
// fit in its width. Tab '\t' chars in the input are translated to sequences
// of spaces ending at multiples of 4 positions.
//
// If BreakOnColon is set, each input line ending in a colon ':' is written
// separately.
//
// The Writer assumes that all Unicode code points have the same width; this
// may not be true in some fonts.
type Writer struct {
w io.Writer
buf []byte
width int
flag uint
}
// NewWriter allocates and initializes a new Writer writing to w.
// Parameter width controls the total number of characters on each line
// across all columns.
func NewWriter(w io.Writer, width int, flag uint) *Writer {
return &Writer{
w: w,
width: width,
flag: flag,
}
}
// Write writes p to the writer w. The only errors returned are ones
// encountered while writing to the underlying output stream.
func (w *Writer) Write(p []byte) (n int, err error) {
var linelen int
var lastWasColon bool
for i, c := range p {
w.buf = append(w.buf, c)
linelen++
if c == '\t' {
w.buf[len(w.buf)-1] = ' '
for linelen%tab != 0 {
w.buf = append(w.buf, ' ')
linelen++
}
}
if w.flag&BreakOnColon != 0 && c == ':' {
lastWasColon = true
} else if lastWasColon {
if c == '\n' {
pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'})
if pos < 0 {
pos = 0
}
line := w.buf[pos:]
w.buf = w.buf[:pos]
if err = w.columnate(); err != nil {
if len(line) < i {
return i - len(line), err
}
return 0, err
}
if n, err := w.w.Write(line); err != nil {
if r := len(line) - n; r < i {
return i - r, err
}
return 0, err
}
}
lastWasColon = false
}
if c == '\n' {
linelen = 0
}
}
return len(p), nil
}
// Flush should be called after the last call to Write to ensure that any data
// buffered in the Writer is written to output.
func (w *Writer) Flush() error {
return w.columnate()
}
func (w *Writer) columnate() error {
words := bytes.Split(w.buf, []byte{'\n'})
w.buf = nil
if len(words[len(words)-1]) == 0 {
words = words[:len(words)-1]
}
maxwidth := 0
for _, wd := range words {
if n := utf8.RuneCount(wd); n > maxwidth {
maxwidth = n
}
}
maxwidth++ // space char
wordsPerLine := w.width / maxwidth
if wordsPerLine <= 0 {
wordsPerLine = 1
}
nlines := (len(words) + wordsPerLine - 1) / wordsPerLine
for i := 0; i < nlines; i++ {
col := 0
endcol := 0
for j := i; j < len(words); j += nlines {
endcol += maxwidth
_, err := w.w.Write(words[j])
if err != nil {
return err
}
col += utf8.RuneCount(words[j])
if j+nlines < len(words) {
for col < endcol {
_, err := w.w.Write([]byte{' '})
if err != nil {
return err
}
col++
}
}
}
_, err := w.w.Write([]byte{'\n'})
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,90 @@
package colwriter
import (
"bytes"
"testing"
)
var src = `
.git
.gitignore
.godir
Procfile:
README.md
api.go
apps.go
auth.go
darwin.go
data.go
dyno.go:
env.go
git.go
help.go
hkdist
linux.go
ls.go
main.go
plugin.go
run.go
scale.go
ssh.go
tail.go
term
unix.go
update.go
version.go
windows.go
`[1:]
var tests = []struct {
wid int
flag uint
src string
want string
}{
{80, 0, "", ""},
{80, 0, src, `
.git README.md darwin.go git.go ls.go scale.go unix.go
.gitignore api.go data.go help.go main.go ssh.go update.go
.godir apps.go dyno.go: hkdist plugin.go tail.go version.go
Procfile: auth.go env.go linux.go run.go term windows.go
`[1:]},
{80, BreakOnColon, src, `
.git .gitignore .godir
Procfile:
README.md api.go apps.go auth.go darwin.go data.go
dyno.go:
env.go hkdist main.go scale.go term version.go
git.go linux.go plugin.go ssh.go unix.go windows.go
help.go ls.go run.go tail.go update.go
`[1:]},
{20, 0, `
Hello
Γειά σου
안녕
今日は
`[1:], `
Hello 안녕
Γειά σου 今日は
`[1:]},
}
func TestWriter(t *testing.T) {
for _, test := range tests {
b := new(bytes.Buffer)
w := NewWriter(b, test.wid, test.flag)
if _, err := w.Write([]byte(test.src)); err != nil {
t.Error(err)
}
if err := w.Flush(); err != nil {
t.Error(err)
}
if g := b.String(); test.want != g {
t.Log("\n" + test.want)
t.Log("\n" + g)
t.Errorf("%q != %q", test.want, g)
}
}
}

3
Godeps/_workspace/src/github.com/kr/text/doc.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
// Package text provides rudimentary functions for manipulating text in
// paragraphs.
package text

74
Godeps/_workspace/src/github.com/kr/text/indent.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package text
import (
"io"
)
// Indent inserts prefix at the beginning of each non-empty line of s. The
// end-of-line marker is NL.
func Indent(s, prefix string) string {
return string(IndentBytes([]byte(s), []byte(prefix)))
}
// IndentBytes inserts prefix at the beginning of each non-empty line of b.
// The end-of-line marker is NL.
func IndentBytes(b, prefix []byte) []byte {
var res []byte
bol := true
for _, c := range b {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// Writer indents each line of its input.
type indentWriter struct {
w io.Writer
bol bool
pre [][]byte
sel int
off int
}
// NewIndentWriter makes a new write filter that indents the input
// lines. Each line is prefixed in order with the corresponding
// element of pre. If there are more lines than elements, the last
// element of pre is repeated for each subsequent line.
func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
return &indentWriter{
w: w,
pre: pre,
bol: true,
}
}
// The only errors returned are from the underlying indentWriter.
func (w *indentWriter) Write(p []byte) (n int, err error) {
for _, c := range p {
if w.bol {
var i int
i, err = w.w.Write(w.pre[w.sel][w.off:])
w.off += i
if err != nil {
return n, err
}
}
_, err = w.w.Write([]byte{c})
if err != nil {
return n, err
}
n++
w.bol = c == '\n'
if w.bol {
w.off = 0
if w.sel < len(w.pre)-1 {
w.sel++
}
}
}
return n, nil
}

119
Godeps/_workspace/src/github.com/kr/text/indent_test.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package text
import (
"bytes"
"testing"
)
type T struct {
inp, exp, pre string
}
var tests = []T{
{
"The quick brown fox\njumps over the lazy\ndog.\nBut not quickly.\n",
"xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\nxxxBut not quickly.\n",
"xxx",
},
{
"The quick brown fox\njumps over the lazy\ndog.\n\nBut not quickly.",
"xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\n\nxxxBut not quickly.",
"xxx",
},
}
func TestIndent(t *testing.T) {
for _, test := range tests {
got := Indent(test.inp, test.pre)
if got != test.exp {
t.Errorf("mismatch %q != %q", got, test.exp)
}
}
}
type IndentWriterTest struct {
inp, exp string
pre []string
}
var ts = []IndentWriterTest{
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.
`[1:],
`
xxxThe quick brown fox
xxxjumps over the lazy
xxxdog.
xxxBut not quickly.
`[1:],
[]string{"xxx"},
},
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.
`[1:],
`
xxaThe quick brown fox
xxxjumps over the lazy
xxxdog.
xxxBut not quickly.
`[1:],
[]string{"xxa", "xxx"},
},
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.
`[1:],
`
xxaThe quick brown fox
xxbjumps over the lazy
xxcdog.
xxxBut not quickly.
`[1:],
[]string{"xxa", "xxb", "xxc", "xxx"},
},
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.`[1:],
`
xxaThe quick brown fox
xxxjumps over the lazy
xxxdog.
xxx
xxxBut not quickly.`[1:],
[]string{"xxa", "xxx"},
},
}
func TestIndentWriter(t *testing.T) {
for _, test := range ts {
b := new(bytes.Buffer)
pre := make([][]byte, len(test.pre))
for i := range test.pre {
pre[i] = []byte(test.pre[i])
}
w := NewIndentWriter(b, pre...)
if _, err := w.Write([]byte(test.inp)); err != nil {
t.Error(err)
}
if got := b.String(); got != test.exp {
t.Errorf("mismatch %q != %q", got, test.exp)
t.Log(got)
t.Log(test.exp)
}
}
}

9
Godeps/_workspace/src/github.com/kr/text/mc/Readme generated vendored Normal file
View File

@ -0,0 +1,9 @@
Command mc prints in multiple columns.
Usage: mc [-] [-N] [file...]
Mc splits the input into as many columns as will fit in N
print positions. If the output is a tty, the default N is
the number of characters in a terminal line; otherwise the
default N is 80. Under option - each input line ending in
a colon ':' is printed separately.

62
Godeps/_workspace/src/github.com/kr/text/mc/mc.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Command mc prints in multiple columns.
//
// Usage: mc [-] [-N] [file...]
//
// Mc splits the input into as many columns as will fit in N
// print positions. If the output is a tty, the default N is
// the number of characters in a terminal line; otherwise the
// default N is 80. Under option - each input line ending in
// a colon ':' is printed separately.
package main
import (
"github.com/khlieng/name_pending/Godeps/_workspace/src/github.com/kr/text/colwriter"
"github.com/kr/pty"
"io"
"log"
"os"
"strconv"
)
func main() {
var width int
var flag uint
args := os.Args[1:]
for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' {
if len(args[0]) > 1 {
width, _ = strconv.Atoi(args[0][1:])
} else {
flag |= colwriter.BreakOnColon
}
args = args[1:]
}
if width < 1 {
_, width, _ = pty.Getsize(os.Stdout)
}
if width < 1 {
width = 80
}
w := colwriter.NewWriter(os.Stdout, width, flag)
if len(args) > 0 {
for _, s := range args {
if f, err := os.Open(s); err == nil {
copyin(w, f)
f.Close()
} else {
log.Println(err)
}
}
} else {
copyin(w, os.Stdin)
}
}
func copyin(w *colwriter.Writer, r io.Reader) {
if _, err := io.Copy(w, r); err != nil {
log.Println(err)
}
if err := w.Flush(); err != nil {
log.Println(err)
}
}

86
Godeps/_workspace/src/github.com/kr/text/wrap.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package text
import (
"bytes"
"math"
)
var (
nl = []byte{'\n'}
sp = []byte{' '}
)
const defaultPenalty = 1e5
// Wrap wraps s into a paragraph of lines of length lim, with minimal
// raggedness.
func Wrap(s string, lim int) string {
return string(WrapBytes([]byte(s), lim))
}
// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
// raggedness.
func WrapBytes(b []byte, lim int) []byte {
words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
var lines [][]byte
for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
lines = append(lines, bytes.Join(line, sp))
}
return bytes.Join(lines, nl)
}
// WrapWords is the low-level line-breaking algorithm, useful if you need more
// control over the details of the text wrapping process. For most uses, either
// Wrap or WrapBytes will be sufficient and more convenient.
//
// WrapWords splits a list of words into lines with minimal "raggedness",
// treating each byte as one unit, accounting for spc units between adjacent
// words on each line, and attempting to limit lines to lim units. Raggedness
// is the total error over all lines, where error is the square of the
// difference of the length of the line and lim. Too-long lines (which only
// happen when a single word is longer than lim units) have pen penalty units
// added to the error.
func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
n := len(words)
length := make([][]int, n)
for i := 0; i < n; i++ {
length[i] = make([]int, n)
length[i][i] = len(words[i])
for j := i + 1; j < n; j++ {
length[i][j] = length[i][j-1] + spc + len(words[j])
}
}
nbrk := make([]int, n)
cost := make([]int, n)
for i := range cost {
cost[i] = math.MaxInt32
}
for i := n - 1; i >= 0; i-- {
if length[i][n-1] <= lim {
cost[i] = 0
nbrk[i] = n
} else {
for j := i + 1; j < n; j++ {
d := lim - length[i][j-1]
c := d*d + cost[j]
if length[i][j-1] > lim {
c += pen // too-long lines get a worse penalty
}
if c < cost[i] {
cost[i] = c
nbrk[i] = j
}
}
}
}
var lines [][][]byte
i := 0
for i < n {
lines = append(lines, words[i:nbrk[i]])
i = nbrk[i]
}
return lines
}

44
Godeps/_workspace/src/github.com/kr/text/wrap_test.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package text
import (
"bytes"
"testing"
)
var text = "The quick brown fox jumps over the lazy dog."
func TestWrap(t *testing.T) {
exp := [][]string{
{"The", "quick", "brown", "fox"},
{"jumps", "over", "the", "lazy", "dog."},
}
words := bytes.Split([]byte(text), sp)
got := WrapWords(words, 1, 24, defaultPenalty)
if len(exp) != len(got) {
t.Fail()
}
for i := range exp {
if len(exp[i]) != len(got[i]) {
t.Fail()
}
for j := range exp[i] {
if exp[i][j] != string(got[i][j]) {
t.Fatal(i, exp[i][j], got[i][j])
}
}
}
}
func TestWrapNarrow(t *testing.T) {
exp := "The\nquick\nbrown\nfox\njumps\nover\nthe\nlazy\ndog."
if Wrap(text, 5) != exp {
t.Fail()
}
}
func TestWrapOneLine(t *testing.T) {
exp := "The quick brown fox jumps over the lazy dog."
if Wrap(text, 500) != exp {
t.Fail()
}
}

Some files were not shown because too many files have changed in this diff Show More