Vendor changed child dependencies

This commit is contained in:
Ken-Håvard Lieng 2016-01-15 19:53:01 +01:00
parent f1c3326af1
commit 383ca39354
268 changed files with 12194 additions and 17552 deletions

41
Godeps/Godeps.json generated
View File

@ -10,10 +10,6 @@
"Comment": "v0.1.0-21-g056c9bc",
"Rev": "056c9bc7be7190eaa7715723883caffa5f8fa3e4"
},
{
"ImportPath": "github.com/armon/consul-api",
"Rev": "dcfedd50ed5334f96adee43fc88518a4f095e15c"
},
{
"ImportPath": "github.com/blevesearch/bleve",
"Rev": "16f538d7b76dd85c935a3104c390307cae5cbf79"
@ -32,11 +28,6 @@
"Comment": "v1.1.0-61-g6465994",
"Rev": "6465994716bf6400605746e79224cf1e7ed68725"
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
"Comment": "v2.0.0-7-g73a8ef7",
"Rev": "73a8ef737e8ea002281a28b4cb92a1de121ad4c6"
},
{
"ImportPath": "github.com/cznic/b",
"Rev": "c4adf3a58579a2d57cd3097f455dcdf75edcdfd8"
@ -55,8 +46,8 @@
"Rev": "3986be78bf859e01f01af631ad76da5b269d270c"
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "197e8d3cf42199cfd53cd775deb37f3637234635"
},
{
"ImportPath": "github.com/jpillora/backoff",
@ -143,33 +134,10 @@
"Comment": "v0.2.0-6-gdb3a956",
"Rev": "db3a956d52bf23cc5201fe98bc9c9787d3b32c2d"
},
{
"ImportPath": "github.com/xordataexchange/crypt/backend",
"Comment": "v0.0.2-17-g749e360",
"Rev": "749e360c8f236773f28fc6d3ddfce4a470795227"
},
{
"ImportPath": "github.com/xordataexchange/crypt/config",
"Comment": "v0.0.2-17-g749e360",
"Rev": "749e360c8f236773f28fc6d3ddfce4a470795227"
},
{
"ImportPath": "github.com/xordataexchange/crypt/encoding/secconf",
"Comment": "v0.0.2-17-g749e360",
"Rev": "749e360c8f236773f28fc6d3ddfce4a470795227"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
},
{
"ImportPath": "golang.org/x/crypto/ocsp",
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
},
{
"ImportPath": "golang.org/x/crypto/openpgp",
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
},
{
"ImportPath": "golang.org/x/crypto/sha3",
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
@ -186,6 +154,11 @@
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "c92eb3cd6e70951a111680995e651ea4b2c35539"
},
{
"ImportPath": "gopkg.in/fsnotify.v1",
"Comment": "v1.2.9",
"Rev": "8611c35ab31c1c28aa903d33cf8b6e44a399b09e"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213"

View File

@ -1,23 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -1,42 +0,0 @@
consul-api
==========
*DEPRECATED* Please use [consul api package](https://github.com/hashicorp/consul/tree/master/api) instead.
Godocs for that package [are here](http://godoc.org/github.com/hashicorp/consul/api).
This package provides the `consulapi` package which attempts to
provide programmatic access to the full Consul API.
Currently, all of the Consul APIs included in version 0.4 are supported.
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/consul-api)
Usage
=====
Below is an example of using the Consul client:
```go
// Get a new client, with KV endpoints
client, _ := consulapi.NewClient(consulapi.DefaultConfig())
kv := client.KV()
// PUT a new KV pair
p := &consulapi.KVPair{Key: "foo", Value: []byte("test")}
_, err := kv.Put(p, nil)
if err != nil {
panic(err)
}
// Lookup the pair
pair, _, err := kv.Get("foo", nil)
if err != nil {
panic(err)
}
fmt.Printf("KV: %v", pair)
```

View File

@ -1,140 +0,0 @@
package consulapi
const (
// ACLCLientType is the client type token
ACLClientType = "client"
// ACLManagementType is the management type token
ACLManagementType = "management"
)
// ACLEntry is used to represent an ACL entry
type ACLEntry struct {
CreateIndex uint64
ModifyIndex uint64
ID string
Name string
Type string
Rules string
}
// ACL can be used to query the ACL endpoints
type ACL struct {
c *Client
}
// ACL returns a handle to the ACL endpoints
func (c *Client) ACL() *ACL {
return &ACL{c}
}
// Create is used to generate a new token with the given parameters
func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/create")
r.setWriteOptions(q)
r.obj = acl
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Update is used to update the rules of an existing token
func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/update")
r.setWriteOptions(q)
r.obj = acl
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Destroy is used to destroy a given ACL token ID
func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Clone is used to return a new token cloned from an existing one
func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Info is used to query for information about an ACL token
func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/info/"+id)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to get all the ACL tokens
func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/list")
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View File

@ -1,140 +0,0 @@
package consulapi
import (
"os"
"testing"
)
// ROOT is a management token for the tests
var CONSUL_ROOT string
func init() {
CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
}
func TestACL_CreateDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae := ACLEntry{
Name: "API test",
Type: ACLClientType,
Rules: `key "" { policy = "deny" }`,
}
id, wm, err := acl.Create(&ae, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
ae2, _, err := acl.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
t.Fatalf("Bad: %#v", ae2)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_CloneDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
id, wm, err := acl.Clone(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_Info(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae, qm, err := acl.Info(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
t.Fatalf("bad: %#v", ae)
}
}
func TestACL_List(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
acls, qm, err := acl.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(acls) < 2 {
t.Fatalf("bad: %v", acls)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}

View File

@ -1,272 +0,0 @@
package consulapi
import (
"fmt"
)
// AgentCheck represents a check known to the agent
type AgentCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
}
// AgentService represents a service known to the agent
type AgentService struct {
ID string
Service string
Tags []string
Port int
}
// AgentMember represents a cluster member known to the agent
type AgentMember struct {
Name string
Addr string
Port uint16
Tags map[string]string
Status int
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// AgentServiceRegistration is used to register a new service
type AgentServiceRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Check *AgentServiceCheck
}
// AgentCheckRegistration is used to register a new check
type AgentCheckRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Notes string `json:",omitempty"`
AgentServiceCheck
}
// AgentServiceCheck is used to create an associated
// check for a service
type AgentServiceCheck struct {
Script string `json:",omitempty"`
Interval string `json:",omitempty"`
TTL string `json:",omitempty"`
}
// Agent can be used to query the Agent endpoints
type Agent struct {
c *Client
// cache the node name
nodeName string
}
// Agent returns a handle to the agent endpoints
func (c *Client) Agent() *Agent {
return &Agent{c: c}
}
// Self is used to query the agent we are speaking to for
// information about itself
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
r := a.c.newRequest("GET", "/v1/agent/self")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// NodeName is used to get the node name of the agent
func (a *Agent) NodeName() (string, error) {
if a.nodeName != "" {
return a.nodeName, nil
}
info, err := a.Self()
if err != nil {
return "", err
}
name := info["Config"]["NodeName"].(string)
a.nodeName = name
return name, nil
}
// Checks returns the locally registered checks
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentCheck
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Services returns the locally registered services
func (a *Agent) Services() (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentService
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Members returns the known gossip members. The WAN
// flag can be used to query a server for WAN members.
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
r := a.c.newRequest("GET", "/v1/agent/members")
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// ServiceRegister is used to register a new service with
// the local agent
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/service/register")
r.obj = service
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ServiceDeregister is used to deregister a service with
// the local agent
func (a *Agent) ServiceDeregister(serviceID string) error {
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// PassTTL is used to set a TTL check to the passing state
func (a *Agent) PassTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "pass")
}
// WarnTTL is used to set a TTL check to the warning state
func (a *Agent) WarnTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "warn")
}
// FailTTL is used to set a TTL check to the failing state
func (a *Agent) FailTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "fail")
}
// UpdateTTL is used to update the TTL of a check
func (a *Agent) UpdateTTL(checkID, note, status string) error {
switch status {
case "pass":
case "warn":
case "fail":
default:
return fmt.Errorf("Invalid status: %s", status)
}
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
r := a.c.newRequest("PUT", endpoint)
r.params.Set("note", note)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckRegister is used to register a new check with
// the local agent
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/check/register")
r.obj = check
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckDeregister is used to deregister a check with
// the local agent
func (a *Agent) CheckDeregister(checkID string) error {
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Join is used to instruct the agent to attempt a join to
// another cluster member
func (a *Agent) Join(addr string, wan bool) error {
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ForceLeave is used to have the agent eject a failed node
func (a *Agent) ForceLeave(node string) error {
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -1,162 +0,0 @@
package consulapi
import (
"testing"
)
func TestAgent_Self(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"]
if name == "" {
t.Fatalf("bad: %v", info)
}
}
func TestAgent_Members(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
members, err := agent.Members(false)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(members) != 1 {
t.Fatalf("bad: %v", members)
}
}
func TestAgent_Services(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Tags: []string{"bar", "baz"},
Port: 8000,
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
services, err := agent.Services()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := services["foo"]; !ok {
t.Fatalf("missing service: %v", services)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := checks["service:foo"]; !ok {
t.Fatalf("missing check: %v", checks)
}
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_SetTTLStatus(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
if err := agent.WarnTTL("service:foo", "test"); err != nil {
t.Fatalf("err: %v", err)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
chk, ok := checks["service:foo"]
if !ok {
t.Fatalf("missing check: %v", checks)
}
if chk.Status != "warning" {
t.Fatalf("Bad: %#v", chk)
}
if chk.Output != "test" {
t.Fatalf("Bad: %#v", chk)
}
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_Checks(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentCheckRegistration{
Name: "foo",
}
reg.TTL = "15s"
if err := agent.CheckRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := checks["foo"]; !ok {
t.Fatalf("missing check: %v", checks)
}
if err := agent.CheckDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_Join(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
// Join ourself
addr := info["Config"]["AdvertiseAddr"].(string)
err = agent.Join(addr, false)
if err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_ForceLeave(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
// Eject somebody
err := agent.ForceLeave("foo")
if err != nil {
t.Fatalf("err: %v", err)
}
}

View File

@ -1,323 +0,0 @@
package consulapi
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
// QueryOptions are used to parameterize a query
type QueryOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// AllowStale allows any Consul server (non-leader) to service
// a read. This allows for lower latency and higher throughput
AllowStale bool
// RequireConsistent forces the read to be fully consistent.
// This is more expensive but prevents ever performing a stale
// read.
RequireConsistent bool
// WaitIndex is used to enable a blocking query. Waits
// until the timeout or the next index is reached
WaitIndex uint64
// WaitTime is used to bound the duration of a wait.
// Defaults to that of the Config, but can be overriden.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// WriteOptions are used to parameterize a write
type WriteOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// QueryMeta is used to return meta data about a query
type QueryMeta struct {
// LastIndex. This can be used as a WaitIndex to perform
// a blocking query
LastIndex uint64
// Time of last contact from the leader for the
// server servicing the request
LastContact time.Duration
// Is there a known leader
KnownLeader bool
// How long did the request take
RequestTime time.Duration
}
// WriteMeta is used to return meta data about a write
type WriteMeta struct {
// How long did the request take
RequestTime time.Duration
}
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
type HttpBasicAuth struct {
// Username to use for HTTP Basic Authentication
Username string
// Password to use for HTTP Basic Authentication
Password string
}
// Config is used to configure the creation of a client
type Config struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// HttpClient is the client to use. Default will be
// used if not provided.
HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// DefaultConfig returns a default configuration for the client
func DefaultConfig() *Config {
return &Config{
Address: "127.0.0.1:8500",
Scheme: "http",
HttpClient: http.DefaultClient,
}
}
// Client provides a client to the Consul API
type Client struct {
config Config
}
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.Address) == 0 {
config.Address = defConfig.Address
}
if len(config.Scheme) == 0 {
config.Scheme = defConfig.Scheme
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
client := &Client{
config: *config,
}
return client, nil
}
// request is used to help build up a request
type request struct {
config *Config
method string
url *url.URL
params url.Values
body io.Reader
obj interface{}
}
// setQueryOptions is used to annotate the request with
// additional query options
func (r *request) setQueryOptions(q *QueryOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.AllowStale {
r.params.Set("stale", "")
}
if q.RequireConsistent {
r.params.Set("consistent", "")
}
if q.WaitIndex != 0 {
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
}
if q.WaitTime != 0 {
r.params.Set("wait", durToMsec(q.WaitTime))
}
if q.Token != "" {
r.params.Set("token", q.Token)
}
}
// durToMsec converts a duration to a millisecond specified string
func durToMsec(dur time.Duration) string {
return fmt.Sprintf("%dms", dur/time.Millisecond)
}
// setWriteOptions is used to annotate the request with
// additional write options
func (r *request) setWriteOptions(q *WriteOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.Token != "" {
r.params.Set("token", q.Token)
}
}
// toHTTP converts the request to an HTTP request
func (r *request) toHTTP() (*http.Request, error) {
// Encode the query parameters
r.url.RawQuery = r.params.Encode()
// Get the url sring
urlRaw := r.url.String()
// Check if we should encode the body
if r.body == nil && r.obj != nil {
if b, err := encodeBody(r.obj); err != nil {
return nil, err
} else {
r.body = b
}
}
// Create the HTTP request
req, err := http.NewRequest(r.method, urlRaw, r.body)
// Setup auth
if err == nil && r.config.HttpAuth != nil {
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
}
return req, err
}
// newRequest is used to create a new request
func (c *Client) newRequest(method, path string) *request {
r := &request{
config: &c.config,
method: method,
url: &url.URL{
Scheme: c.config.Scheme,
Host: c.config.Address,
Path: path,
},
params: make(map[string][]string),
}
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
if c.config.WaitTime != 0 {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}
if c.config.Token != "" {
r.params.Set("token", r.config.Token)
}
return r
}
// doRequest runs a request with our client
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return 0, nil, err
}
start := time.Now()
resp, err := c.config.HttpClient.Do(req)
diff := time.Now().Sub(start)
return diff, resp, err
}
// parseQueryMeta is used to help parse query meta-data
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
header := resp.Header
// Parse the X-Consul-Index
index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
}
q.LastIndex = index
// Parse the X-Consul-LastContact
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
}
q.LastContact = time.Duration(last) * time.Millisecond
// Parse the X-Consul-KnownLeader
switch header.Get("X-Consul-KnownLeader") {
case "true":
q.KnownLeader = true
default:
q.KnownLeader = false
}
return nil
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// requireOK is used to wrap doRequest and check for a 200
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
if e != nil {
return d, resp, e
}
if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
return d, resp, e
}

View File

@ -1,126 +0,0 @@
package consulapi
import (
crand "crypto/rand"
"fmt"
"net/http"
"testing"
"time"
)
func makeClient(t *testing.T) *Client {
conf := DefaultConfig()
client, err := NewClient(conf)
if err != nil {
t.Fatalf("err: %v", err)
}
return client
}
func testKey() string {
buf := make([]byte, 16)
if _, err := crand.Read(buf); err != nil {
panic(fmt.Errorf("Failed to read random bytes: %v", err))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
}
func TestSetQueryOptions(t *testing.T) {
c := makeClient(t)
r := c.newRequest("GET", "/v1/kv/foo")
q := &QueryOptions{
Datacenter: "foo",
AllowStale: true,
RequireConsistent: true,
WaitIndex: 1000,
WaitTime: 100 * time.Second,
Token: "12345",
}
r.setQueryOptions(q)
if r.params.Get("dc") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["stale"]; !ok {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["consistent"]; !ok {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("index") != "1000" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("wait") != "100000ms" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("token") != "12345" {
t.Fatalf("bad: %v", r.params)
}
}
func TestSetWriteOptions(t *testing.T) {
c := makeClient(t)
r := c.newRequest("GET", "/v1/kv/foo")
q := &WriteOptions{
Datacenter: "foo",
Token: "23456",
}
r.setWriteOptions(q)
if r.params.Get("dc") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("token") != "23456" {
t.Fatalf("bad: %v", r.params)
}
}
func TestRequestToHTTP(t *testing.T) {
c := makeClient(t)
r := c.newRequest("DELETE", "/v1/kv/foo")
q := &QueryOptions{
Datacenter: "foo",
}
r.setQueryOptions(q)
req, err := r.toHTTP()
if err != nil {
t.Fatalf("err: %v", err)
}
if req.Method != "DELETE" {
t.Fatalf("bad: %v", req)
}
if req.URL.String() != "http://127.0.0.1:8500/v1/kv/foo?dc=foo" {
t.Fatalf("bad: %v", req)
}
}
func TestParseQueryMeta(t *testing.T) {
resp := &http.Response{
Header: make(map[string][]string),
}
resp.Header.Set("X-Consul-Index", "12345")
resp.Header.Set("X-Consul-LastContact", "80")
resp.Header.Set("X-Consul-KnownLeader", "true")
qm := &QueryMeta{}
if err := parseQueryMeta(resp, qm); err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex != 12345 {
t.Fatalf("Bad: %v", qm)
}
if qm.LastContact != 80*time.Millisecond {
t.Fatalf("Bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("Bad: %v", qm)
}
}

View File

@ -1,181 +0,0 @@
package consulapi
type Node struct {
Node string
Address string
}
type CatalogService struct {
Node string
Address string
ServiceID string
ServiceName string
ServiceTags []string
ServicePort int
}
type CatalogNode struct {
Node *Node
Services map[string]*AgentService
}
type CatalogRegistration struct {
Node string
Address string
Datacenter string
Service *AgentService
Check *AgentCheck
}
type CatalogDeregistration struct {
Node string
Address string
Datacenter string
ServiceID string
CheckID string
}
// Catalog can be used to query the Catalog endpoints
type Catalog struct {
c *Client
}
// Catalog returns a handle to the catalog endpoints
func (c *Client) Catalog() *Catalog {
return &Catalog{c}
}
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/register")
r.setWriteOptions(q)
r.obj = reg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
r.setWriteOptions(q)
r.obj = dereg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Datacenters is used to query for all the known datacenters
func (c *Catalog) Datacenters() ([]string, error) {
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []string
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to query all the known nodes
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Node
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Services is used to query for all known services
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/services")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]string
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query catalog entries for a given service
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
}
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CatalogService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Node is used to query for service information about a single node
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *CatalogNode
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -1,219 +0,0 @@
package consulapi
import (
"testing"
)
func TestCatalog_Datacenters(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
datacenters, err := catalog.Datacenters()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(datacenters) == 0 {
t.Fatalf("Bad: %v", datacenters)
}
}
func TestCatalog_Nodes(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
nodes, meta, err := catalog.Nodes(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(nodes) == 0 {
t.Fatalf("Bad: %v", nodes)
}
}
func TestCatalog_Services(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
services, meta, err := catalog.Services(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
t.Fatalf("Bad: %v", services)
}
}
func TestCatalog_Service(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
services, meta, err := catalog.Service("consul", "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
t.Fatalf("Bad: %v", services)
}
}
func TestCatalog_Node(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
name, _ := c.Agent().NodeName()
info, meta, err := catalog.Node(name, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(info.Services) == 0 {
t.Fatalf("Bad: %v", info)
}
}
func TestCatalog_Registration(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
service := &AgentService{
ID: "redis1",
Service: "redis",
Tags: []string{"master", "v1"},
Port: 8000,
}
check := &AgentCheck{
Node: "foobar",
CheckID: "service:redis1",
Name: "Redis health check",
Notes: "Script based health check",
Status: "passing",
ServiceID: "redis1",
}
reg := &CatalogRegistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
Service: service,
Check: check,
}
_, err := catalog.Register(reg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := node.Services["redis1"]; !ok {
t.Fatalf("missing service: redis1")
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if health[0].CheckID != "service:redis1" {
t.Fatalf("missing checkid service:redis1")
}
}
func TestCatalog_Deregistration(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
dereg := &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
ServiceID: "redis1",
}
_, err := catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := node.Services["redis1"]; ok {
t.Fatalf("ServiceID:redis1 is not deregistered")
}
dereg = &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
CheckID: "service:redis1",
}
_, err = catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(health) != 0 {
t.Fatalf("CheckID:service:redis1 is not deregistered")
}
dereg = &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
}
_, err = catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err = catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if node != nil {
t.Fatalf("node is not deregistered: %v", node)
}
}

View File

@ -1,104 +0,0 @@
package consulapi
import (
"bytes"
"strconv"
)
// Event can be used to query the Event endpoints
type Event struct {
c *Client
}
// UserEvent represents an event that was fired by the user
type UserEvent struct {
ID string
Name string
Payload []byte
NodeFilter string
ServiceFilter string
TagFilter string
Version int
LTime uint64
}
// Event returns a handle to the event endpoints
func (c *Client) Event() *Event {
return &Event{c}
}
// Fire is used to fire a new user event. Only the Name, Payload and Filters
// are respected. This returns the ID or an associated error. Cross DC requests
// are supported.
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
r.setWriteOptions(q)
if params.NodeFilter != "" {
r.params.Set("node", params.NodeFilter)
}
if params.ServiceFilter != "" {
r.params.Set("service", params.ServiceFilter)
}
if params.TagFilter != "" {
r.params.Set("tag", params.TagFilter)
}
if params.Payload != nil {
r.body = bytes.NewReader(params.Payload)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out UserEvent
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// List is used to get the most recent events an agent has received.
// This list can be optionally filtered by the name. This endpoint supports
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
// LastContact or KnownLeader.
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
r := e.c.newRequest("GET", "/v1/event/list")
r.setQueryOptions(q)
if name != "" {
r.params.Set("name", name)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*UserEvent
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// IDToIndex is a bit of a hack. This simulates the index generation to
// convert an event ID into a WaitIndex.
func (e *Event) IDToIndex(uuid string) uint64 {
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
upper := uuid[19:23] + uuid[24:36]
lowVal, err := strconv.ParseUint(lower, 16, 64)
if err != nil {
panic("Failed to convert " + lower)
}
highVal, err := strconv.ParseUint(upper, 16, 64)
if err != nil {
panic("Failed to convert " + upper)
}
return lowVal ^ highVal
}

View File

@ -1,37 +0,0 @@
package consulapi
import (
"testing"
)
func TestEvent_FireList(t *testing.T) {
c := makeClient(t)
event := c.Event()
params := &UserEvent{Name: "foo"}
id, meta, err := event.Fire(params, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
events, qm, err := event.List("", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex != event.IDToIndex(id) {
t.Fatalf("Bad: %#v", qm)
}
if events[len(events)-1].ID != id {
t.Fatalf("bad: %#v", events)
}
}

View File

@ -1,136 +0,0 @@
package consulapi
import (
"fmt"
)
// HealthCheck is used to represent a single check
type HealthCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
}
// ServiceEntry is used for the health service endpoint
type ServiceEntry struct {
Node *Node
Service *AgentService
Checks []*HealthCheck
}
// Health can be used to query the Health endpoints
type Health struct {
c *Client
}
// Health returns a handle to the health endpoints
func (c *Client) Health() *Health {
return &Health{c}
}
// Node is used to query for checks belonging to a given node
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Checks is used to return the checks associated with a service
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query health information along with service info
// for a given service. It can optionally do server-side filtering on a tag
// or nodes with passing health checks only.
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/service/"+service)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
}
if passingOnly {
r.params.Set("passing", "1")
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*ServiceEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// State is used to retreive all the checks in a given state.
// The wildcard "any" state can also be used for all checks.
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
switch state {
case "any":
case "warning":
case "critical":
case "passing":
case "unknown":
default:
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
}
r := h.c.newRequest("GET", "/v1/health/state/"+state)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -1,98 +0,0 @@
package consulapi
import (
"testing"
"time"
)
func TestHealth_Node(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
health := c.Health()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"].(string)
checks, meta, err := health.Node(name, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_Checks(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
health := c.Health()
// Make a service with a check
reg := &AgentServiceRegistration{
Name: "foo",
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
defer agent.ServiceDeregister("foo")
// Wait for the register...
time.Sleep(20 * time.Millisecond)
checks, meta, err := health.Checks("foo", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_Service(t *testing.T) {
c := makeClient(t)
health := c.Health()
// consul service should always exist...
checks, meta, err := health.Service("consul", "", true, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_State(t *testing.T) {
c := makeClient(t)
health := c.Health()
checks, meta, err := health.State("any", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}

View File

@ -1,219 +0,0 @@
package consulapi
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"strings"
)
// KVPair is used to represent a single K/V entry
type KVPair struct {
Key string
CreateIndex uint64
ModifyIndex uint64
LockIndex uint64
Flags uint64
Value []byte
Session string
}
// KVPairs is a list of KVPair objects
type KVPairs []*KVPair
// KV is used to manipulate the K/V API
type KV struct {
c *Client
}
// KV is used to return a handle to the K/V apis
func (c *Client) KV() *KV {
return &KV{c}
}
// Get is used to lookup a single key
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
resp, qm, err := k.getInternal(key, nil, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to lookup all keys under a prefix
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// Keys is used to list all the keys under a prefix. Optionally,
// a separator can be used to limit the responses.
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
params := map[string]string{"keys": ""}
if separator != "" {
params["separator"] = separator
}
resp, qm, err := k.getInternal(prefix, params, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []string
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
r := k.c.newRequest("GET", "/v1/kv/"+key)
r.setQueryOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := k.c.doRequest(r)
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
resp.Body.Close()
return nil, qm, nil
} else if resp.StatusCode != 200 {
resp.Body.Close()
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
return resp, qm, nil
}
// Put is used to write a new value. Only the
// Key, Flags and Value is respected.
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
params := make(map[string]string, 1)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
_, wm, err := k.put(p.Key, params, p.Value, q)
return wm, err
}
// CAS is used for a Check-And-Set operation. The Key,
// ModifyIndex, Flags and Value are respected. Returns true
// on success or false on failures.
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
return k.put(p.Key, params, p.Value, q)
}
// Acquire is used for a lock acquisiiton operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["acquire"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
// Release is used for a lock release operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["release"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
r := k.c.newRequest("PUT", "/v1/kv/"+key)
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
r.body = bytes.NewReader(body)
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(string(buf.Bytes()), "true")
return res, qm, nil
}
// Delete is used to delete a single key
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
return k.deleteInternal(key, nil, w)
}
// DeleteTree is used to delete all keys under a prefix
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
return k.deleteInternal(prefix, []string{"recurse"}, w)
}
func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) {
r := k.c.newRequest("DELETE", "/v1/kv/"+key)
r.setWriteOptions(q)
for _, param := range params {
r.params.Set(param, "")
}
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
return qm, nil
}

View File

@ -1,374 +0,0 @@
package consulapi
import (
"bytes"
"path"
"testing"
"time"
)
func TestClientPutGetDelete(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
key := testKey()
pair, _, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
// Put the key
value := []byte("test")
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
if pair.Flags != 42 {
t.Fatalf("unexpected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete
if _, err := kv.Delete(key, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Get should fail
pair, _, err = kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
}
func TestClient_List_DeleteRecurse(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Generate some test keys
prefix := testKey()
var keys []string
for i := 0; i < 100; i++ {
keys = append(keys, path.Join(prefix, testKey()))
}
// Set values
value := []byte("test")
for _, key := range keys {
p := &KVPair{Key: key, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// List the values
pairs, meta, err := kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != len(keys) {
t.Fatalf("got %d keys", len(pairs))
}
for _, pair := range pairs {
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete all
if _, err := kv.DeleteTree(prefix, nil); err != nil {
t.Fatalf("err: %v", err)
}
// List the values
pairs, _, err = kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 0 {
t.Fatalf("got %d keys", len(pairs))
}
}
func TestClient_CAS(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Put the key
key := testKey()
value := []byte("test")
p := &KVPair{Key: key, Value: value}
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("CAS failure")
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// CAS update with bad index
newVal := []byte("foo")
p.Value = newVal
p.ModifyIndex = 1
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if work {
t.Fatalf("unexpected CAS")
}
// CAS update with valid index
p.ModifyIndex = meta.LastIndex
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("unexpected CAS failure")
}
}
func TestClient_WatchGet(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
key := testKey()
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Put the key
value := []byte("test")
go func() {
c := makeClient(t)
kv := c.KV()
time.Sleep(100 * time.Millisecond)
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}()
// Get should work
options := &QueryOptions{WaitIndex: meta.LastIndex}
pair, meta2, err := kv.Get(key, options)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
if pair.Flags != 42 {
t.Fatalf("unexpected value: %#v", pair)
}
if meta2.LastIndex <= meta.LastIndex {
t.Fatalf("unexpected value: %#v", meta2)
}
}
func TestClient_WatchList(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
prefix := testKey()
key := path.Join(prefix, testKey())
pairs, meta, err := kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 0 {
t.Fatalf("unexpected value: %#v", pairs)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Put the key
value := []byte("test")
go func() {
c := makeClient(t)
kv := c.KV()
time.Sleep(100 * time.Millisecond)
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}()
// Get should work
options := &QueryOptions{WaitIndex: meta.LastIndex}
pairs, meta2, err := kv.List(prefix, options)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 1 {
t.Fatalf("expected value: %#v", pairs)
}
if !bytes.Equal(pairs[0].Value, value) {
t.Fatalf("unexpected value: %#v", pairs)
}
if pairs[0].Flags != 42 {
t.Fatalf("unexpected value: %#v", pairs)
}
if meta2.LastIndex <= meta.LastIndex {
t.Fatalf("unexpected value: %#v", meta2)
}
}
func TestClient_Keys_DeleteRecurse(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Generate some test keys
prefix := testKey()
var keys []string
for i := 0; i < 100; i++ {
keys = append(keys, path.Join(prefix, testKey()))
}
// Set values
value := []byte("test")
for _, key := range keys {
p := &KVPair{Key: key, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// List the values
out, meta, err := kv.Keys(prefix, "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != len(keys) {
t.Fatalf("got %d keys", len(out))
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete all
if _, err := kv.DeleteTree(prefix, nil); err != nil {
t.Fatalf("err: %v", err)
}
// List the values
out, _, err = kv.Keys(prefix, "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 0 {
t.Fatalf("got %d keys", len(out))
}
}
func TestClient_AcquireRelease(t *testing.T) {
c := makeClient(t)
session := c.Session()
kv := c.KV()
// Make a session
id, _, err := session.CreateNoChecks(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
// Acquire the key
key := testKey()
value := []byte("test")
p := &KVPair{Key: key, Value: value, Session: id}
if work, _, err := kv.Acquire(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("Lock failure")
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != id {
t.Fatalf("Expected lock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Release
if work, _, err := kv.Release(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("Release fail")
}
// Get should work
pair, meta, err = kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != "" {
t.Fatalf("Expected unlock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
}

View File

@ -1,204 +0,0 @@
package consulapi
import (
"time"
)
// SessionEntry represents a session in consul
type SessionEntry struct {
CreateIndex uint64
ID string
Name string
Node string
Checks []string
LockDelay time.Duration
Behavior string
TTL string
}
// Session can be used to query the Session endpoints
type Session struct {
c *Client
}
// Session returns a handle to the session endpoints
func (c *Client) Session() *Session {
return &Session{c}
}
// CreateNoChecks is like Create but is used specifically to create
// a session with no associated health checks.
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
body := make(map[string]interface{})
body["Checks"] = []string{}
if se != nil {
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(body, q)
}
// Create makes a new session. Providing a session entry can
// customize the session. It can also be nil to use defaults.
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
var obj interface{}
if se != nil {
body := make(map[string]interface{})
obj = body
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if len(se.Checks) > 0 {
body["Checks"] = se.Checks
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(obj, q)
}
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/create")
r.setWriteOptions(q)
r.obj = obj
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Destroy invalides a given session
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/destroy/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Renew renews the TTL on a given session
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, wm, err
}
if len(entries) > 0 {
return entries[0], wm, nil
}
return nil, wm, nil
}
// Info looks up a single session
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/info/"+id)
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List gets sessions for a node
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// List gets all active sessions
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/list")
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View File

@ -1,190 +0,0 @@
package consulapi
import (
"testing"
)
func TestSession_CreateDestroy(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, meta, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
meta, err = session.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
}
func TestSession_CreateRenewDestroy(t *testing.T) {
c := makeClient(t)
session := c.Session()
se := &SessionEntry{
TTL: "10s",
}
id, meta, err := session.Create(se, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
renew, meta, err := session.Renew(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if renew == nil {
t.Fatalf("should get session")
}
if renew.ID != id {
t.Fatalf("should have matching id")
}
if renew.TTL != "10s" {
t.Fatalf("should get session with TTL")
}
}
func TestSession_Info(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if info == nil {
t.Fatalf("should get session")
}
if info.CreateIndex == 0 {
t.Fatalf("bad: %v", info)
}
if info.ID != id {
t.Fatalf("bad: %v", info)
}
if info.Name != "" {
t.Fatalf("bad: %v", info)
}
if info.Node == "" {
t.Fatalf("bad: %v", info)
}
if len(info.Checks) == 0 {
t.Fatalf("bad: %v", info)
}
if info.LockDelay == 0 {
t.Fatalf("bad: %v", info)
}
if info.Behavior != "release" {
t.Fatalf("bad: %v", info)
}
if info.TTL != "" {
t.Fatalf("bad: %v", info)
}
}
func TestSession_Node(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
sessions, qm, err := session.Node(info.Node, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("bad: %v", sessions)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}
func TestSession_List(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
sessions, qm, err := session.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("bad: %v", sessions)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}

View File

@ -1,43 +0,0 @@
package consulapi
// Status can be used to query the Status endpoints
type Status struct {
c *Client
}
// Status returns a handle to the status endpoints
func (c *Client) Status() *Status {
return &Status{c}
}
// Leader is used to query for a known leader
func (s *Status) Leader() (string, error) {
r := s.c.newRequest("GET", "/v1/status/leader")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", err
}
defer resp.Body.Close()
var leader string
if err := decodeBody(resp, &leader); err != nil {
return "", err
}
return leader, nil
}
// Peers is used to query for a known raft peers
func (s *Status) Peers() ([]string, error) {
r := s.c.newRequest("GET", "/v1/status/peers")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var peers []string
if err := decodeBody(resp, &peers); err != nil {
return nil, err
}
return peers, nil
}

View File

@ -1,31 +0,0 @@
package consulapi
import (
"testing"
)
func TestStatusLeader(t *testing.T) {
c := makeClient(t)
status := c.Status()
leader, err := status.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
if leader == "" {
t.Fatalf("Expected leader")
}
}
func TestStatusPeers(t *testing.T) {
c := makeClient(t)
status := c.Status()
peers, err := status.Peers()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(peers) == 0 {
t.Fatalf("Expected peers ")
}
}

View File

@ -1,23 +0,0 @@
package etcd
// Add a new directory with a random etcd-generated key under the given path.
func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
raw, err := c.post(key, "", ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Add a new file with a random etcd-generated key under the given path.
func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.post(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}

View File

@ -1,73 +0,0 @@
package etcd
import "testing"
func TestAddChild(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
c.Delete("nonexistentDir", true)
}()
c.CreateDir("fooDir", 5)
_, err := c.AddChild("fooDir", "v0", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.AddChild("fooDir", "v1", 5)
if err != nil {
t.Fatal(err)
}
resp, err := c.Get("fooDir", true, false)
// The child with v0 should proceed the child with v1 because it's added
// earlier, so it should have a lower key.
if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
" The response was: %#v", resp)
}
// Creating a child under a nonexistent directory should succeed.
// The directory should be created.
resp, err = c.AddChild("nonexistentDir", "foo", 5)
if err != nil {
t.Fatal(err)
}
}
func TestAddChildDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
c.Delete("nonexistentDir", true)
}()
c.CreateDir("fooDir", 5)
_, err := c.AddChildDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.AddChildDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
resp, err := c.Get("fooDir", true, false)
// The child with v0 should proceed the child with v1 because it's added
// earlier, so it should have a lower key.
if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
" The response was: %#v", resp)
}
// Creating a child under a nonexistent directory should succeed.
// The directory should be created.
resp, err = c.AddChildDir("nonexistentDir", 5)
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,481 +0,0 @@
package etcd
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
)
// See SetConsistency for how to use these constants.
const (
// Using strings rather than iota because the consistency level
// could be persisted to disk, so it'd be better to use
// human-readable values.
STRONG_CONSISTENCY = "STRONG"
WEAK_CONSISTENCY = "WEAK"
)
const (
defaultBufferSize = 10
)
func init() {
rand.Seed(int64(time.Now().Nanosecond()))
}
type Config struct {
CertFile string `json:"certFile"`
KeyFile string `json:"keyFile"`
CaCertFile []string `json:"caCertFiles"`
DialTimeout time.Duration `json:"timeout"`
Consistency string `json:"consistency"`
}
type credentials struct {
username string
password string
}
type Client struct {
config Config `json:"config"`
cluster *Cluster `json:"cluster"`
httpClient *http.Client
credentials *credentials
transport *http.Transport
persistence io.Writer
cURLch chan string
// CheckRetry can be used to control the policy for failed requests
// and modify the cluster if needed.
// The client calls it before sending requests again, and
// stops retrying if CheckRetry returns some error. The cases that
// this function needs to handle include no response and unexpected
// http status code of response.
// If CheckRetry is nil, client will call the default one
// `DefaultCheckRetry`.
// Argument cluster is the etcd.Cluster object that these requests have been made on.
// Argument numReqs is the number of http.Requests that have been made so far.
// Argument lastResp is the http.Responses from the last request.
// Argument err is the reason of the failure.
CheckRetry func(cluster *Cluster, numReqs int,
lastResp http.Response, err error) error
}
// NewClient create a basic client that is configured to be used
// with the given machine list.
func NewClient(machines []string) *Client {
config := Config{
// default timeout is one second
DialTimeout: time.Second,
Consistency: WEAK_CONSISTENCY,
}
client := &Client{
cluster: NewCluster(machines),
config: config,
}
client.initHTTPClient()
client.saveConfig()
return client
}
// NewTLSClient create a basic client with TLS configuration
func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
// overwrite the default machine to use https
if len(machines) == 0 {
machines = []string{"https://127.0.0.1:4001"}
}
config := Config{
// default timeout is one second
DialTimeout: time.Second,
Consistency: WEAK_CONSISTENCY,
CertFile: cert,
KeyFile: key,
CaCertFile: make([]string, 0),
}
client := &Client{
cluster: NewCluster(machines),
config: config,
}
err := client.initHTTPSClient(cert, key)
if err != nil {
return nil, err
}
err = client.AddRootCA(caCert)
client.saveConfig()
return client, nil
}
// NewClientFromFile creates a client from a given file path.
// The given file is expected to use the JSON format.
func NewClientFromFile(fpath string) (*Client, error) {
fi, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer func() {
if err := fi.Close(); err != nil {
panic(err)
}
}()
return NewClientFromReader(fi)
}
// NewClientFromReader creates a Client configured from a given reader.
// The configuration is expected to use the JSON format.
func NewClientFromReader(reader io.Reader) (*Client, error) {
c := new(Client)
b, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, c)
if err != nil {
return nil, err
}
if c.config.CertFile == "" {
c.initHTTPClient()
} else {
err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
}
if err != nil {
return nil, err
}
for _, caCert := range c.config.CaCertFile {
if err := c.AddRootCA(caCert); err != nil {
return nil, err
}
}
return c, nil
}
// Override the Client's HTTP Transport object
func (c *Client) SetTransport(tr *http.Transport) {
c.httpClient.Transport = tr
c.transport = tr
}
func (c *Client) SetCredentials(username, password string) {
c.credentials = &credentials{username, password}
}
func (c *Client) Close() {
c.transport.DisableKeepAlives = true
c.transport.CloseIdleConnections()
}
// initHTTPClient initializes a HTTP client for etcd client
func (c *Client) initHTTPClient() {
c.transport = &http.Transport{
Dial: c.dial,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
c.httpClient = &http.Client{Transport: c.transport}
}
// initHTTPClient initializes a HTTPS client for etcd client
func (c *Client) initHTTPSClient(cert, key string) error {
if cert == "" || key == "" {
return errors.New("Require both cert and key path")
}
tlsCert, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return err
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{tlsCert},
InsecureSkipVerify: true,
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
Dial: c.dial,
}
c.httpClient = &http.Client{Transport: tr}
return nil
}
// SetPersistence sets a writer to which the config will be
// written every time it's changed.
func (c *Client) SetPersistence(writer io.Writer) {
c.persistence = writer
}
// SetConsistency changes the consistency level of the client.
//
// When consistency is set to STRONG_CONSISTENCY, all requests,
// including GET, are sent to the leader. This means that, assuming
// the absence of leader failures, GET requests are guaranteed to see
// the changes made by previous requests.
//
// When consistency is set to WEAK_CONSISTENCY, other requests
// are still sent to the leader, but GET requests are sent to a
// random server from the server pool. This reduces the read
// load on the leader, but it's not guaranteed that the GET requests
// will see changes made by previous requests (they might have not
// yet been committed on non-leader servers).
func (c *Client) SetConsistency(consistency string) error {
if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
}
c.config.Consistency = consistency
return nil
}
// Sets the DialTimeout value
func (c *Client) SetDialTimeout(d time.Duration) {
c.config.DialTimeout = d
}
// AddRootCA adds a root CA cert for the etcd client
func (c *Client) AddRootCA(caCert string) error {
if c.httpClient == nil {
return errors.New("Client has not been initialized yet!")
}
certBytes, err := ioutil.ReadFile(caCert)
if err != nil {
return err
}
tr, ok := c.httpClient.Transport.(*http.Transport)
if !ok {
panic("AddRootCA(): Transport type assert should not fail")
}
if tr.TLSClientConfig.RootCAs == nil {
caCertPool := x509.NewCertPool()
ok = caCertPool.AppendCertsFromPEM(certBytes)
if ok {
tr.TLSClientConfig.RootCAs = caCertPool
}
tr.TLSClientConfig.InsecureSkipVerify = false
} else {
ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
}
if !ok {
err = errors.New("Unable to load caCert")
}
c.config.CaCertFile = append(c.config.CaCertFile, caCert)
c.saveConfig()
return err
}
// SetCluster updates cluster information using the given machine list.
func (c *Client) SetCluster(machines []string) bool {
success := c.internalSyncCluster(machines)
return success
}
func (c *Client) GetCluster() []string {
return c.cluster.Machines
}
// SyncCluster updates the cluster information using the internal machine list.
func (c *Client) SyncCluster() bool {
return c.internalSyncCluster(c.cluster.Machines)
}
// internalSyncCluster syncs cluster information using the given machine list.
func (c *Client) internalSyncCluster(machines []string) bool {
for _, machine := range machines {
httpPath := c.createHttpPath(machine, path.Join(version, "members"))
resp, err := c.httpClient.Get(httpPath)
if err != nil {
// try another machine in the cluster
continue
}
if resp.StatusCode != http.StatusOK { // fall-back to old endpoint
httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
resp, err := c.httpClient.Get(httpPath)
if err != nil {
// try another machine in the cluster
continue
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
// try another machine in the cluster
continue
}
// update Machines List
c.cluster.updateFromStr(string(b))
} else {
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
// try another machine in the cluster
continue
}
var mCollection memberCollection
if err := json.Unmarshal(b, &mCollection); err != nil {
// try another machine
continue
}
urls := make([]string, 0)
for _, m := range mCollection {
urls = append(urls, m.ClientURLs...)
}
// update Machines List
c.cluster.updateFromStr(strings.Join(urls, ","))
}
logger.Debug("sync.machines ", c.cluster.Machines)
c.saveConfig()
return true
}
return false
}
// createHttpPath creates a complete HTTP URL.
// serverName should contain both the host name and a port number, if any.
func (c *Client) createHttpPath(serverName string, _path string) string {
u, err := url.Parse(serverName)
if err != nil {
panic(err)
}
u.Path = path.Join(u.Path, _path)
if u.Scheme == "" {
u.Scheme = "http"
}
return u.String()
}
// dial attempts to open a TCP connection to the provided address, explicitly
// enabling keep-alives with a one-second interval.
func (c *Client) dial(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
if err != nil {
return nil, err
}
tcpConn, ok := conn.(*net.TCPConn)
if !ok {
return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
}
// Keep TCP alive to check whether or not the remote machine is down
if err = tcpConn.SetKeepAlive(true); err != nil {
return nil, err
}
if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
return nil, err
}
return tcpConn, nil
}
func (c *Client) OpenCURL() {
c.cURLch = make(chan string, defaultBufferSize)
}
func (c *Client) CloseCURL() {
c.cURLch = nil
}
func (c *Client) sendCURL(command string) {
go func() {
select {
case c.cURLch <- command:
default:
}
}()
}
func (c *Client) RecvCURL() string {
return <-c.cURLch
}
// saveConfig saves the current config using c.persistence.
func (c *Client) saveConfig() error {
if c.persistence != nil {
b, err := json.Marshal(c)
if err != nil {
return err
}
_, err = c.persistence.Write(b)
if err != nil {
return err
}
}
return nil
}
// MarshalJSON implements the Marshaller interface
// as defined by the standard JSON package.
func (c *Client) MarshalJSON() ([]byte, error) {
b, err := json.Marshal(struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{
Config: c.config,
Cluster: c.cluster,
})
if err != nil {
return nil, err
}
return b, nil
}
// UnmarshalJSON implements the Unmarshaller interface
// as defined by the standard JSON package.
func (c *Client) UnmarshalJSON(b []byte) error {
temp := struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{}
err := json.Unmarshal(b, &temp)
if err != nil {
return err
}
c.cluster = temp.Cluster
c.config = temp.Config
return nil
}

View File

@ -1,108 +0,0 @@
package etcd
import (
"encoding/json"
"fmt"
"net"
"net/url"
"os"
"testing"
)
// To pass this test, we need to create a cluster of 3 machines
// The server should be listening on localhost:4001, 4002, 4003
func TestSync(t *testing.T) {
fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
// Explicit trailing slash to ensure this doesn't reproduce:
// https://github.com/coreos/go-etcd/issues/82
c := NewClient([]string{"http://127.0.0.1:4001/"})
success := c.SyncCluster()
if !success {
t.Fatal("cannot sync machines")
}
for _, m := range c.GetCluster() {
u, err := url.Parse(m)
if err != nil {
t.Fatal(err)
}
if u.Scheme != "http" {
t.Fatal("scheme must be http")
}
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
t.Fatal(err)
}
if host != "localhost" {
t.Fatal("Host must be localhost")
}
}
badMachines := []string{"abc", "edef"}
success = c.SetCluster(badMachines)
if success {
t.Fatal("should not sync on bad machines")
}
goodMachines := []string{"127.0.0.1:4002"}
success = c.SetCluster(goodMachines)
if !success {
t.Fatal("cannot sync machines")
} else {
fmt.Println(c.cluster.Machines)
}
}
func TestPersistence(t *testing.T) {
c := NewClient(nil)
c.SyncCluster()
fo, err := os.Create("config.json")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := fo.Close(); err != nil {
panic(err)
}
}()
c.SetPersistence(fo)
err = c.saveConfig()
if err != nil {
t.Fatal(err)
}
c2, err := NewClientFromFile("config.json")
if err != nil {
t.Fatal(err)
}
// Verify that the two clients have the same config
b1, _ := json.Marshal(c)
b2, _ := json.Marshal(c2)
if string(b1) != string(b2) {
t.Fatalf("The two configs should be equal!")
}
}
func TestClientRetry(t *testing.T) {
c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"})
// use first endpoint as the picked url
c.cluster.picked = 0
if _, err := c.Set("foo", "bar", 5); err != nil {
t.Fatal(err)
}
if _, err := c.Delete("foo", true); err != nil {
t.Fatal(err)
}
}

View File

@ -1,37 +0,0 @@
package etcd
import (
"math/rand"
"strings"
)
type Cluster struct {
Leader string `json:"leader"`
Machines []string `json:"machines"`
picked int
}
func NewCluster(machines []string) *Cluster {
// if an empty slice was sent in then just assume HTTP 4001 on localhost
if len(machines) == 0 {
machines = []string{"http://127.0.0.1:4001"}
}
// default leader and machines
return &Cluster{
Leader: "",
Machines: machines,
picked: rand.Intn(len(machines)),
}
}
func (cl *Cluster) failure() { cl.picked = rand.Intn(len(cl.Machines)) }
func (cl *Cluster) pick() string { return cl.Machines[cl.picked] }
func (cl *Cluster) updateFromStr(machines string) {
cl.Machines = strings.Split(machines, ",")
for i := range cl.Machines {
cl.Machines[i] = strings.TrimSpace(cl.Machines[i])
}
cl.picked = rand.Intn(len(cl.Machines))
}

View File

@ -1,34 +0,0 @@
package etcd
import "fmt"
func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
if prevValue == "" && prevIndex == 0 {
return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
}
options := Options{}
if prevValue != "" {
options["prevValue"] = prevValue
}
if prevIndex != 0 {
options["prevIndex"] = prevIndex
}
raw, err := c.delete(key, options)
if err != nil {
return nil, err
}
return raw, err
}

View File

@ -1,46 +0,0 @@
package etcd
import (
"testing"
)
func TestCompareAndDelete(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
// This should succeed an correct prevValue
resp, err := c.CompareAndDelete("foo", "bar", 0)
if err != nil {
t.Fatal(err)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
}
resp, _ = c.Set("foo", "bar", 5)
// This should fail because it gives an incorrect prevValue
_, err = c.CompareAndDelete("foo", "xxx", 0)
if err == nil {
t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp)
}
// This should succeed because it gives an correct prevIndex
resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
if err != nil {
t.Fatal(err)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
}
c.Set("foo", "bar", 5)
// This should fail because it gives an incorrect prevIndex
resp, err = c.CompareAndDelete("foo", "", 29817514)
if err == nil {
t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp)
}
}

View File

@ -1,36 +0,0 @@
package etcd
import "fmt"
func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
prevValue string, prevIndex uint64) (*Response, error) {
raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
prevValue string, prevIndex uint64) (*RawResponse, error) {
if prevValue == "" && prevIndex == 0 {
return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
}
options := Options{}
if prevValue != "" {
options["prevValue"] = prevValue
}
if prevIndex != 0 {
options["prevIndex"] = prevIndex
}
raw, err := c.put(key, value, ttl, options)
if err != nil {
return nil, err
}
return raw, err
}

View File

@ -1,57 +0,0 @@
package etcd
import (
"testing"
)
func TestCompareAndSwap(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
// This should succeed
resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
}
// This should fail because it gives an incorrect prevValue
resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
if err == nil {
t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp)
}
resp, err = c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed
resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
}
// This should fail because it gives an incorrect prevIndex
resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
if err == nil {
t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp)
}
}

View File

@ -1,55 +0,0 @@
package etcd
import (
"fmt"
"io/ioutil"
"log"
"strings"
)
var logger *etcdLogger
func SetLogger(l *log.Logger) {
logger = &etcdLogger{l}
}
func GetLogger() *log.Logger {
return logger.log
}
type etcdLogger struct {
log *log.Logger
}
func (p *etcdLogger) Debug(args ...interface{}) {
msg := "DEBUG: " + fmt.Sprint(args...)
p.log.Println(msg)
}
func (p *etcdLogger) Debugf(f string, args ...interface{}) {
msg := "DEBUG: " + fmt.Sprintf(f, args...)
// Append newline if necessary
if !strings.HasSuffix(msg, "\n") {
msg = msg + "\n"
}
p.log.Print(msg)
}
func (p *etcdLogger) Warning(args ...interface{}) {
msg := "WARNING: " + fmt.Sprint(args...)
p.log.Println(msg)
}
func (p *etcdLogger) Warningf(f string, args ...interface{}) {
msg := "WARNING: " + fmt.Sprintf(f, args...)
// Append newline if necessary
if !strings.HasSuffix(msg, "\n") {
msg = msg + "\n"
}
p.log.Print(msg)
}
func init() {
// Default logger uses the go default log.
SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
}

View File

@ -1,28 +0,0 @@
package etcd
import (
"testing"
)
type Foo struct{}
type Bar struct {
one string
two int
}
// Tests that logs don't panic with arbitrary interfaces
func TestDebug(t *testing.T) {
f := &Foo{}
b := &Bar{"asfd", 3}
for _, test := range []interface{}{
1234,
"asdf",
f,
b,
} {
logger.Debug(test)
logger.Debugf("something, %s", test)
logger.Warning(test)
logger.Warningf("something, %s", test)
}
}

View File

@ -1,40 +0,0 @@
package etcd
// Delete deletes the given key.
//
// When recursive set to false, if the key points to a
// directory the method will fail.
//
// When recursive set to true, if the key points to a file,
// the file will be deleted; if the key points to a directory,
// then everything under the directory (including all child directories)
// will be deleted.
func (c *Client) Delete(key string, recursive bool) (*Response, error) {
raw, err := c.RawDelete(key, recursive, false)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// DeleteDir deletes an empty directory or a key value pair
func (c *Client) DeleteDir(key string) (*Response, error) {
raw, err := c.RawDelete(key, false, true)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
ops := Options{
"recursive": recursive,
"dir": dir,
}
return c.delete(key, ops)
}

View File

@ -1,81 +0,0 @@
package etcd
import (
"testing"
)
func TestDelete(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
resp, err := c.Delete("foo", false)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("Delete failed with %s", resp.Node.Value)
}
if !(resp.PrevNode.Value == "bar") {
t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
}
resp, err = c.Delete("foo", false)
if err == nil {
t.Fatalf("Delete should have failed because the key foo did not exist. "+
"The response was: %v", resp)
}
}
func TestDeleteAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("fooDir", true)
}()
c.SetDir("foo", 5)
// test delete an empty dir
resp, err := c.DeleteDir("foo")
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("DeleteAll 1 failed: %#v", resp)
}
if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
}
c.CreateDir("fooDir", 5)
c.Set("fooDir/foo", "bar", 5)
_, err = c.DeleteDir("fooDir")
if err == nil {
t.Fatal("should not able to delete a non-empty dir with deletedir")
}
resp, err = c.Delete("fooDir", true)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("DeleteAll 2 failed: %#v", resp)
}
if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
}
resp, err = c.Delete("foo", true)
if err == nil {
t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+
"The response was: %v", resp)
}
}

View File

@ -1,49 +0,0 @@
package etcd
import (
"encoding/json"
"fmt"
)
const (
ErrCodeEtcdNotReachable = 501
ErrCodeUnhandledHTTPStatus = 502
)
var (
errorMap = map[int]string{
ErrCodeEtcdNotReachable: "All the given peers are not reachable",
}
)
type EtcdError struct {
ErrorCode int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause,omitempty"`
Index uint64 `json:"index"`
}
func (e EtcdError) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
}
func newError(errorCode int, cause string, index uint64) *EtcdError {
return &EtcdError{
ErrorCode: errorCode,
Message: errorMap[errorCode],
Cause: cause,
Index: index,
}
}
func handleError(b []byte) error {
etcdErr := new(EtcdError)
err := json.Unmarshal(b, etcdErr)
if err != nil {
logger.Warningf("cannot unmarshal etcd error: %v", err)
return err
}
return etcdErr
}

View File

@ -1,32 +0,0 @@
package etcd
// Get gets the file or directory associated with the given key.
// If the key points to a directory, files and directories under
// it will be returned in sorted or unsorted order, depending on
// the sort flag.
// If recursive is set to false, contents under child directories
// will not be returned.
// If recursive is set to true, all the contents will be returned.
func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
raw, err := c.RawGet(key, sort, recursive)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
var q bool
if c.config.Consistency == STRONG_CONSISTENCY {
q = true
}
ops := Options{
"recursive": recursive,
"sorted": sort,
"quorum": q,
}
return c.get(key, ops)
}

View File

@ -1,131 +0,0 @@
package etcd
import (
"reflect"
"testing"
)
// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
func cleanNode(n *Node) {
n.Expiration = nil
n.ModifiedIndex = 0
n.CreatedIndex = 0
}
// cleanResult scrubs a result object two levels deep of Expiration,
// ModifiedIndex and CreatedIndex.
func cleanResult(result *Response) {
// TODO(philips): make this recursive.
cleanNode(result.Node)
for i, _ := range result.Node.Nodes {
cleanNode(result.Node.Nodes[i])
for j, _ := range result.Node.Nodes[i].Nodes {
cleanNode(result.Node.Nodes[i].Nodes[j])
}
}
}
func TestGet(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
result, err := c.Get("foo", false, false)
if err != nil {
t.Fatal(err)
}
if result.Node.Key != "/foo" || result.Node.Value != "bar" {
t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
}
result, err = c.Get("goo", false, false)
if err == nil {
t.Fatalf("should not be able to get non-exist key")
}
}
func TestGetAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
c.CreateDir("fooDir", 5)
c.Set("fooDir/k0", "v0", 5)
c.Set("fooDir/k1", "v1", 5)
// Return kv-pairs in sorted order
result, err := c.Get("fooDir", true, false)
if err != nil {
t.Fatal(err)
}
expected := Nodes{
&Node{
Key: "/fooDir/k0",
Value: "v0",
TTL: 5,
},
&Node{
Key: "/fooDir/k1",
Value: "v1",
TTL: 5,
},
}
cleanResult(result)
if !reflect.DeepEqual(result.Node.Nodes, expected) {
t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
}
// Test the `recursive` option
c.CreateDir("fooDir/childDir", 5)
c.Set("fooDir/childDir/k2", "v2", 5)
// Return kv-pairs in sorted order
result, err = c.Get("fooDir", true, true)
cleanResult(result)
if err != nil {
t.Fatal(err)
}
expected = Nodes{
&Node{
Key: "/fooDir/childDir",
Dir: true,
Nodes: Nodes{
&Node{
Key: "/fooDir/childDir/k2",
Value: "v2",
TTL: 5,
},
},
TTL: 5,
},
&Node{
Key: "/fooDir/k0",
Value: "v0",
TTL: 5,
},
&Node{
Key: "/fooDir/k1",
Value: "v1",
TTL: 5,
},
}
cleanResult(result)
if !reflect.DeepEqual(result.Node.Nodes, expected) {
t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
}
}

View File

@ -1,30 +0,0 @@
package etcd
import "encoding/json"
type Member struct {
ID string `json:"id"`
Name string `json:"name"`
PeerURLs []string `json:"peerURLs"`
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}

View File

@ -1,71 +0,0 @@
package etcd
import (
"encoding/json"
"reflect"
"testing"
)
func TestMemberCollectionUnmarshal(t *testing.T) {
tests := []struct {
body []byte
want memberCollection
}{
{
body: []byte(`{"members":[]}`),
want: memberCollection([]Member{}),
},
{
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
want: memberCollection(
[]Member{
{
ID: "2745e2525fce8fe",
Name: "node3",
PeerURLs: []string{
"http://127.0.0.1:7003",
},
ClientURLs: []string{
"http://127.0.0.1:4003",
},
},
{
ID: "42134f434382925",
Name: "node1",
PeerURLs: []string{
"http://127.0.0.1:2380",
"http://127.0.0.1:7001",
},
ClientURLs: []string{
"http://127.0.0.1:2379",
"http://127.0.0.1:4001",
},
},
{
ID: "94088180e21eb87b",
Name: "node2",
PeerURLs: []string{
"http://127.0.0.1:7002",
},
ClientURLs: []string{
"http://127.0.0.1:4002",
},
},
},
),
},
}
for i, tt := range tests {
var got memberCollection
err := json.Unmarshal(tt.body, &got)
if err != nil {
t.Errorf("#%d: unexpected error: %v", i, err)
continue
}
if !reflect.DeepEqual(tt.want, got) {
t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
}
}
}

View File

@ -1,72 +0,0 @@
package etcd
import (
"fmt"
"net/url"
"reflect"
)
type Options map[string]interface{}
// An internally-used data structure that represents a mapping
// between valid options and their kinds
type validOptions map[string]reflect.Kind
// Valid options for GET, PUT, POST, DELETE
// Using CAPITALIZED_UNDERSCORE to emphasize that these
// values are meant to be used as constants.
var (
VALID_GET_OPTIONS = validOptions{
"recursive": reflect.Bool,
"quorum": reflect.Bool,
"sorted": reflect.Bool,
"wait": reflect.Bool,
"waitIndex": reflect.Uint64,
}
VALID_PUT_OPTIONS = validOptions{
"prevValue": reflect.String,
"prevIndex": reflect.Uint64,
"prevExist": reflect.Bool,
"dir": reflect.Bool,
}
VALID_POST_OPTIONS = validOptions{}
VALID_DELETE_OPTIONS = validOptions{
"recursive": reflect.Bool,
"dir": reflect.Bool,
"prevValue": reflect.String,
"prevIndex": reflect.Uint64,
}
)
// Convert options to a string of HTML parameters
func (ops Options) toParameters(validOps validOptions) (string, error) {
p := "?"
values := url.Values{}
if ops == nil {
return "", nil
}
for k, v := range ops {
// Check if the given option is valid (that it exists)
kind := validOps[k]
if kind == reflect.Invalid {
return "", fmt.Errorf("Invalid option: %v", k)
}
// Check if the given option is of the valid type
t := reflect.TypeOf(v)
if kind != t.Kind() {
return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
k, kind, t.Kind())
}
values.Set(k, fmt.Sprintf("%v", v))
}
p += values.Encode()
return p, nil
}

View File

@ -1,403 +0,0 @@
package etcd
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strings"
"sync"
"time"
)
// Errors introduced by handling requests
var (
ErrRequestCancelled = errors.New("sending request is cancelled")
)
type RawRequest struct {
Method string
RelativePath string
Values url.Values
Cancel <-chan bool
}
// NewRawRequest returns a new RawRequest
func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
return &RawRequest{
Method: method,
RelativePath: relativePath,
Values: values,
Cancel: cancel,
}
}
// getCancelable issues a cancelable GET request
func (c *Client) getCancelable(key string, options Options,
cancel <-chan bool) (*RawResponse, error) {
logger.Debugf("get %s [%s]", key, c.cluster.pick())
p := keyToPath(key)
str, err := options.toParameters(VALID_GET_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("GET", p, nil, cancel)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// get issues a GET request
func (c *Client) get(key string, options Options) (*RawResponse, error) {
return c.getCancelable(key, options, nil)
}
// put issues a PUT request
func (c *Client) put(key string, value string, ttl uint64,
options Options) (*RawResponse, error) {
logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
p := keyToPath(key)
str, err := options.toParameters(VALID_PUT_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// post issues a POST request
func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
p := keyToPath(key)
req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// delete issues a DELETE request
func (c *Client) delete(key string, options Options) (*RawResponse, error) {
logger.Debugf("delete %s [%s]", key, c.cluster.pick())
p := keyToPath(key)
str, err := options.toParameters(VALID_DELETE_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("DELETE", p, nil, nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// SendRequest sends a HTTP request and returns a Response as defined by etcd
func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
var req *http.Request
var resp *http.Response
var httpPath string
var err error
var respBody []byte
var numReqs = 1
checkRetry := c.CheckRetry
if checkRetry == nil {
checkRetry = DefaultCheckRetry
}
cancelled := make(chan bool, 1)
reqLock := new(sync.Mutex)
if rr.Cancel != nil {
cancelRoutine := make(chan bool)
defer close(cancelRoutine)
go func() {
select {
case <-rr.Cancel:
cancelled <- true
logger.Debug("send.request is cancelled")
case <-cancelRoutine:
return
}
// Repeat canceling request until this thread is stopped
// because we have no idea about whether it succeeds.
for {
reqLock.Lock()
c.httpClient.Transport.(*http.Transport).CancelRequest(req)
reqLock.Unlock()
select {
case <-time.After(100 * time.Millisecond):
case <-cancelRoutine:
return
}
}
}()
}
// If we connect to a follower and consistency is required, retry until
// we connect to a leader
sleep := 25 * time.Millisecond
maxSleep := time.Second
for attempt := 0; ; attempt++ {
if attempt > 0 {
select {
case <-cancelled:
return nil, ErrRequestCancelled
case <-time.After(sleep):
sleep = sleep * 2
if sleep > maxSleep {
sleep = maxSleep
}
}
}
logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath)
// get httpPath if not set
if httpPath == "" {
httpPath = c.getHttpPath(rr.RelativePath)
}
// Return a cURL command if curlChan is set
if c.cURLch != nil {
command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
for key, value := range rr.Values {
command += fmt.Sprintf(" -d %s=%s", key, value[0])
}
if c.credentials != nil {
command += fmt.Sprintf(" -u %s", c.credentials.username)
}
c.sendCURL(command)
}
logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
req, err := func() (*http.Request, error) {
reqLock.Lock()
defer reqLock.Unlock()
if rr.Values == nil {
if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
return nil, err
}
} else {
body := strings.NewReader(rr.Values.Encode())
if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
return nil, err
}
req.Header.Set("Content-Type",
"application/x-www-form-urlencoded; param=value")
}
return req, nil
}()
if err != nil {
return nil, err
}
if c.credentials != nil {
req.SetBasicAuth(c.credentials.username, c.credentials.password)
}
resp, err = c.httpClient.Do(req)
// clear previous httpPath
httpPath = ""
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
// If the request was cancelled, return ErrRequestCancelled directly
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
numReqs++
// network error, change a machine!
if err != nil {
logger.Debug("network error: ", err.Error())
lastResp := http.Response{}
if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
return nil, checkErr
}
c.cluster.failure()
continue
}
// if there is no error, it should receive response
logger.Debug("recv.response.from ", httpPath)
if validHttpStatusCode[resp.StatusCode] {
// try to read byte code and break the loop
respBody, err = ioutil.ReadAll(resp.Body)
if err == nil {
logger.Debug("recv.success ", httpPath)
break
}
// ReadAll error may be caused due to cancel request
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
if err == io.ErrUnexpectedEOF {
// underlying connection was closed prematurely, probably by timeout
// TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
// this allows the client to detect that and take evasive action. Need
// to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
respBody = []byte{}
break
}
}
if resp.StatusCode == http.StatusTemporaryRedirect {
u, err := resp.Location()
if err != nil {
logger.Warning(err)
} else {
// set httpPath for following redirection
httpPath = u.String()
}
resp.Body.Close()
continue
}
if checkErr := checkRetry(c.cluster, numReqs, *resp,
errors.New("Unexpected HTTP status code")); checkErr != nil {
return nil, checkErr
}
resp.Body.Close()
}
r := &RawResponse{
StatusCode: resp.StatusCode,
Body: respBody,
Header: resp.Header,
}
return r, nil
}
// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
// If we have retried 2 * machine number, stop retrying.
// If status code is InternalServerError, sleep for 200ms.
func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
err error) error {
if numReqs > 2*len(cluster.Machines) {
errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err)
return newError(ErrCodeEtcdNotReachable, errStr, 0)
}
if isEmptyResponse(lastResp) {
// always retry if it failed to get response from one machine
return nil
}
if !shouldRetry(lastResp) {
body := []byte("nil")
if lastResp.Body != nil {
if b, err := ioutil.ReadAll(lastResp.Body); err == nil {
body = b
}
}
errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body)
return newError(ErrCodeUnhandledHTTPStatus, errStr, 0)
}
// sleep some time and expect leader election finish
time.Sleep(time.Millisecond * 200)
logger.Warning("bad response status code", lastResp.StatusCode)
return nil
}
func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 }
// shouldRetry returns whether the reponse deserves retry.
func shouldRetry(r http.Response) bool {
// TODO: only retry when the cluster is in leader election
// We cannot do it exactly because etcd doesn't support it well.
return r.StatusCode == http.StatusInternalServerError
}
func (c *Client) getHttpPath(s ...string) string {
fullPath := c.cluster.pick() + "/" + version
for _, seg := range s {
fullPath = fullPath + "/" + seg
}
return fullPath
}
// buildValues builds a url.Values map according to the given value and ttl
func buildValues(value string, ttl uint64) url.Values {
v := url.Values{}
if value != "" {
v.Set("value", value)
}
if ttl > 0 {
v.Set("ttl", fmt.Sprintf("%v", ttl))
}
return v
}
// convert key string to http path exclude version, including URL escaping
// for example: key[foo] -> path[keys/foo]
// key[/%z] -> path[keys/%25z]
// key[/] -> path[keys/]
func keyToPath(key string) string {
// URL-escape our key, except for slashes
p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1)
// corner case: if key is "/" or "//" ect
// path join will clear the tailing "/"
// we need to add it back
if p == "keys" {
p = "keys/"
}
return p
}

View File

@ -1,22 +0,0 @@
package etcd
import "testing"
func TestKeyToPath(t *testing.T) {
tests := []struct {
key string
wpath string
}{
{"", "keys/"},
{"foo", "keys/foo"},
{"foo/bar", "keys/foo/bar"},
{"%z", "keys/%25z"},
{"/", "keys/"},
}
for i, tt := range tests {
path := keyToPath(tt.key)
if path != tt.wpath {
t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath)
}
}
}

View File

@ -1,89 +0,0 @@
package etcd
import (
"encoding/json"
"net/http"
"strconv"
"time"
)
const (
rawResponse = iota
normalResponse
)
type responseType int
type RawResponse struct {
StatusCode int
Body []byte
Header http.Header
}
var (
validHttpStatusCode = map[int]bool{
http.StatusCreated: true,
http.StatusOK: true,
http.StatusBadRequest: true,
http.StatusNotFound: true,
http.StatusPreconditionFailed: true,
http.StatusForbidden: true,
}
)
// Unmarshal parses RawResponse and stores the result in Response
func (rr *RawResponse) Unmarshal() (*Response, error) {
if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
return nil, handleError(rr.Body)
}
resp := new(Response)
err := json.Unmarshal(rr.Body, resp)
if err != nil {
return nil, err
}
// attach index and term to response
resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
return resp, nil
}
type Response struct {
Action string `json:"action"`
Node *Node `json:"node"`
PrevNode *Node `json:"prevNode,omitempty"`
EtcdIndex uint64 `json:"etcdIndex"`
RaftIndex uint64 `json:"raftIndex"`
RaftTerm uint64 `json:"raftTerm"`
}
type Node struct {
Key string `json:"key, omitempty"`
Value string `json:"value,omitempty"`
Dir bool `json:"dir,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
TTL int64 `json:"ttl,omitempty"`
Nodes Nodes `json:"nodes,omitempty"`
ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
CreatedIndex uint64 `json:"createdIndex,omitempty"`
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int {
return len(ns)
}
func (ns Nodes) Less(i, j int) bool {
return ns[i].Key < ns[j].Key
}
func (ns Nodes) Swap(i, j int) {
ns[i], ns[j] = ns[j], ns[i]
}

View File

@ -1,42 +0,0 @@
package etcd
import (
"fmt"
"testing"
)
func TestSetCurlChan(t *testing.T) {
c := NewClient(nil)
c.OpenCURL()
defer func() {
c.Delete("foo", true)
}()
_, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
c.cluster.pick())
actual := c.RecvCURL()
if expected != actual {
t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
actual, expected)
}
c.SetConsistency(STRONG_CONSISTENCY)
_, err = c.Get("foo", false, false)
if err != nil {
t.Fatal(err)
}
expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?quorum=true&recursive=false&sorted=false",
c.cluster.pick())
actual = c.RecvCURL()
if expected != actual {
t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
actual, expected)
}
}

View File

@ -1,137 +0,0 @@
package etcd
// Set sets the given key to the given value.
// It will create a new key value pair or replace the old one.
// It will not replace a existing directory.
func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawSet(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// SetDir sets the given key to a directory.
// It will create a new directory or replace the old key value pair by a directory.
// It will not replace a existing directory.
func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawSetDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// CreateDir creates a directory. It succeeds only if
// the given key does not yet exist.
func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawCreateDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// UpdateDir updates the given directory. It succeeds only if the
// given key already exists.
func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawUpdateDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Create creates a file with the given value under the given key. It succeeds
// only if the given key does not yet exist.
func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawCreate(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// CreateInOrder creates a file with a key that's guaranteed to be higher than other
// keys in the given directory. It is useful for creating queues.
func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawCreateInOrder(dir, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Update updates the given key to the given value. It succeeds only if the
// given key already exists.
func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawUpdate(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": true,
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": false,
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
return c.put(key, value, ttl, nil)
}
func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": true,
}
return c.put(key, value, ttl, ops)
}
func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": false,
}
return c.put(key, value, ttl, ops)
}
func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
return c.post(dir, value, ttl)
}

View File

@ -1,241 +0,0 @@
package etcd
import (
"testing"
)
func TestSet(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
resp, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
t.Fatalf("Set 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("Set 1 PrevNode failed: %#v", resp)
}
resp, err = c.Set("foo", "bar2", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
t.Fatalf("Set 2 failed: %#v", resp)
}
if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
t.Fatalf("Set 2 PrevNode failed: %#v", resp)
}
}
func TestUpdate(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("nonexistent", true)
}()
resp, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed.
resp, err = c.Update("foo", "wakawaka", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("Update 1 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
t.Fatalf("Update 1 prevValue failed: %#v", resp)
}
// This should fail because the key does not exist.
resp, err = c.Update("nonexistent", "whatever", 5)
if err == nil {
t.Fatalf("The key %v did not exist, so the update should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreate(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("newKey", true)
}()
newKey := "/newKey"
newValue := "/newValue"
// This should succeed
resp, err := c.Create(newKey, newValue, 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Key == newKey &&
resp.Node.Value == newValue && resp.Node.TTL == 5) {
t.Fatalf("Create 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("Create 1 PrevNode failed: %#v", resp)
}
// This should fail, because the key is already there
resp, err = c.Create(newKey, newValue, 5)
if err == nil {
t.Fatalf("The key %v did exist, so the creation should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreateInOrder(t *testing.T) {
c := NewClient(nil)
dir := "/queue"
defer func() {
c.DeleteDir(dir)
}()
var firstKey, secondKey string
resp, err := c.CreateInOrder(dir, "1", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
t.Fatalf("Create 1 failed: %#v", resp)
}
firstKey = resp.Node.Key
resp, err = c.CreateInOrder(dir, "2", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
t.Fatalf("Create 2 failed: %#v", resp)
}
secondKey = resp.Node.Key
if firstKey >= secondKey {
t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
firstKey, secondKey)
}
}
func TestSetDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("fooDir", true)
}()
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("SetDir 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
}
// This should fail because /fooDir already points to a directory
resp, err = c.CreateDir("/fooDir", 5)
if err == nil {
t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
"The response was: %#v", resp)
}
_, err = c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed
// It should replace the key
resp, err = c.SetDir("foo", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("SetDir 2 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
t.Fatalf("SetDir 2 failed: %#v", resp)
}
}
func TestUpdateDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed.
resp, err = c.UpdateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("UpdateDir 1 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
}
// This should fail because the key does not exist.
resp, err = c.UpdateDir("nonexistentDir", 5)
if err == nil {
t.Fatalf("The key %v did not exist, so the update should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreateDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
// This should succeed
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("CreateDir 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
}
// This should fail, because the key is already there
resp, err = c.CreateDir("fooDir", 5)
if err == nil {
t.Fatalf("The key %v did exist, so the creation should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}

View File

@ -1,6 +0,0 @@
package etcd
const (
version = "v2"
packageVersion = "v2.0.0+git"
)

View File

@ -1,103 +0,0 @@
package etcd
import (
"errors"
)
// Errors introduced by the Watch command.
var (
ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
)
// If recursive is set to true the watch returns the first change under the given
// prefix since the given index.
//
// If recursive is set to false the watch returns the first change to the given key
// since the given index.
//
// To watch for the latest change, set waitIndex = 0.
//
// If a receiver channel is given, it will be a long-term watch. Watch will block at the
//channel. After someone receives the channel, it will go on to watch that
// prefix. If a stop channel is given, the client can close long-term watch using
// the stop channel.
func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
receiver chan *Response, stop chan bool) (*Response, error) {
logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
if receiver == nil {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
defer close(receiver)
for {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
resp, err := raw.Unmarshal()
if err != nil {
return nil, err
}
waitIndex = resp.Node.ModifiedIndex + 1
receiver <- resp
}
}
func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
if receiver == nil {
return c.watchOnce(prefix, waitIndex, recursive, stop)
}
for {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
resp, err := raw.Unmarshal()
if err != nil {
return nil, err
}
waitIndex = resp.Node.ModifiedIndex + 1
receiver <- raw
}
}
// helper func
// return when there is change under the given prefix
func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
options := Options{
"wait": true,
}
if waitIndex > 0 {
options["waitIndex"] = waitIndex
}
if recursive {
options["recursive"] = true
}
resp, err := c.getCancelable(key, options, stop)
if err == ErrRequestCancelled {
return nil, ErrWatchStoppedByUser
}
return resp, err
}

View File

@ -1,119 +0,0 @@
package etcd
import (
"fmt"
"runtime"
"testing"
"time"
)
func TestWatch(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("watch_foo", true)
}()
go setHelper("watch_foo", "bar", c)
resp, err := c.Watch("watch_foo", 0, false, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
t.Fatalf("Watch 1 failed: %#v", resp)
}
go setHelper("watch_foo", "bar", c)
resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
t.Fatalf("Watch 2 failed: %#v", resp)
}
routineNum := runtime.NumGoroutine()
ch := make(chan *Response, 10)
stop := make(chan bool, 1)
go setLoop("watch_foo", "bar", c)
go receiver(ch, stop)
_, err = c.Watch("watch_foo", 0, false, ch, stop)
if err != ErrWatchStoppedByUser {
t.Fatalf("Watch returned a non-user stop error")
}
if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
}
}
func TestWatchAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("watch_foo", true)
}()
go setHelper("watch_foo/foo", "bar", c)
resp, err := c.Watch("watch_foo", 0, true, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
t.Fatalf("WatchAll 1 failed: %#v", resp)
}
go setHelper("watch_foo/foo", "bar", c)
resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
t.Fatalf("WatchAll 2 failed: %#v", resp)
}
ch := make(chan *Response, 10)
stop := make(chan bool, 1)
routineNum := runtime.NumGoroutine()
go setLoop("watch_foo/foo", "bar", c)
go receiver(ch, stop)
_, err = c.Watch("watch_foo", 0, true, ch, stop)
if err != ErrWatchStoppedByUser {
t.Fatalf("Watch returned a non-user stop error")
}
if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
}
}
func setHelper(key, value string, c *Client) {
time.Sleep(time.Second)
c.Set(key, value, 100)
}
func setLoop(key, value string, c *Client) {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
newValue := fmt.Sprintf("%s_%v", value, i)
c.Set(key, newValue, 100)
time.Sleep(time.Second / 10)
}
}
func receiver(c chan *Response, stop chan bool) {
for i := 0; i < 10; i++ {
<-c
}
stop <- true
}

View File

@ -0,0 +1,7 @@
y.output
# ignore intellij files
.idea
*.iml
*.ipr
*.iws

View File

@ -0,0 +1,3 @@
sudo: false
language: go
go: 1.5

View File

@ -2,89 +2,89 @@ Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
Contributor and that particular Contributors Contribution.
1.3. "Contribution"
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. "Covered Software"
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. "Executable Form"
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. "Larger Work"
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. "License"
1.8. “License”
means this document.
1.9. "Licensable"
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. "Modifications"
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. "Secondary License"
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
@ -100,59 +100,57 @@ Mozilla Public License, version 2.0
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
@ -165,12 +163,11 @@ Mozilla Public License, version 2.0
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
@ -182,40 +179,39 @@ Mozilla Public License, version 2.0
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
@ -224,14 +220,14 @@ Mozilla Public License, version 2.0
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
@ -239,22 +235,21 @@ Mozilla Public License, version 2.0
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
@ -263,16 +258,16 @@ Mozilla Public License, version 2.0
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
@ -284,29 +279,27 @@ Mozilla Public License, version 2.0
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
@ -320,24 +313,23 @@ Mozilla Public License, version 2.0
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
@ -348,15 +340,15 @@ Exhibit A - Source Code Form License Notice
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
This Source Code Form is Incompatible
With Secondary Licenses, as defined by
the Mozilla Public License, v. 2.0.

View File

@ -0,0 +1,17 @@
TEST?=./...
default: test
fmt: generate
go fmt ./...
test: generate
go test $(TEST) $(TESTARGS)
generate:
go generate ./...
updatedeps:
go get -u golang.org/x/tools/cmd/stringer
.PHONY: default generate test updatedeps

View File

@ -0,0 +1,94 @@
# HCL
[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
HCL (HashiCorp Configuration Language) is a configuration language built
by HashiCorp. The goal of HCL is to build a structured configuration language
that is both human and machine friendly for use with command-line tools, but
specifically targeted towards DevOps tools, servers, etc.
HCL is also fully JSON compatible. That is, JSON can be used as completely
valid input to a system expecting HCL. This helps makes systems
interoperable with other systems.
HCL is heavily inspired by
[libucl](https://github.com/vstakhov/libucl),
nginx configuration, and others similar.
## Why?
A common question when viewing HCL is to ask the question: why not
JSON, YAML, etc.?
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
used a variety of configuration languages from full programming languages
such as Ruby to complete data structure languages such as JSON. What we
learned is that some people wanted human-friendly configuration languages
and some people wanted machine-friendly languages.
JSON fits a nice balance in this, but is fairly verbose and most
importantly doesn't support comments. With YAML, we found that beginners
had a really hard time determining what the actual structure was, and
ended up guessing more than not whether to use a hyphen, colon, etc.
in order to represent some configuration key.
Full programming languages such as Ruby enable complex behavior
a configuration language shouldn't usually allow, and also forces
people to learn some set of Ruby.
Because of this, we decided to create our own configuration language
that is JSON-compatible. Our configuration language (HCL) is designed
to be written and modified by humans. The API for HCL allows JSON
as an input so that it is also machine-friendly (machines can generate
JSON instead of trying to generate HCL).
Our goal with HCL is not to alienate other configuration languages.
It is instead to provide HCL as a specialized language for our tools,
and JSON as the interoperability layer.
## Syntax
For a complete grammar, please see the parser itself. A high-level overview
of the syntax and grammar is listed here.
* Single line comments start with `#` or `//`
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
are not allowed. A multi-line comment (also known as a block comment)
terminates at the first `*/` found.
* Values are assigned with the syntax `key = value` (whitespace doesn't
matter). The value can be any primitive: a string, number, boolean,
object, or list.
* Strings are double-quoted and can contain any UTF-8 characters.
Example: `"Hello, World"`
* Numbers are assumed to be base 10. If you prefix a number with 0x,
it is treated as a hexadecimal. If it is prefixed with 0, it is
treated as an octal. Numbers can be in scientific notation: "1e10".
* Boolean values: `true`, `false`
* Arrays can be made by wrapping it in `[]`. Example:
`["foo", "bar", 42]`. Arrays can contain primitives
and other arrays, but cannot contain objects. Objects must
use the block syntax shown below.
Objects and nested objects are created using the structure shown below:
```
variable "ami" {
description = "the AMI to use"
}
```
## Thanks
Thanks to:
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
and syntax that HCL was based off of.
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
in pure Go (no goyacc) and support for a printer.

View File

@ -0,0 +1,627 @@
package hcl
import (
"errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/ast"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/parser"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
// This is the tag to use with structures to have settings for HCL
const tagName = "hcl"
var (
// nodeType holds a reference to the type of ast.Node
nodeType reflect.Type = findNodeType()
)
// Decode reads the given input and decodes it into the structure
// given by `out`.
func Decode(out interface{}, in string) error {
obj, err := Parse(in)
if err != nil {
return err
}
return DecodeObject(out, obj)
}
// DecodeObject is a lower-level version of Decode. It decodes a
// raw Object into the given output.
func DecodeObject(out interface{}, n ast.Node) error {
val := reflect.ValueOf(out)
if val.Kind() != reflect.Ptr {
return errors.New("result must be a pointer")
}
// If we have the file, we really decode the root node
if f, ok := n.(*ast.File); ok {
n = f.Node
}
var d decoder
return d.decode("root", n, val.Elem())
}
type decoder struct {
stack []reflect.Kind
}
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
k := result
// If we have an interface with a valid value, we use that
// for the check.
if result.Kind() == reflect.Interface {
elem := result.Elem()
if elem.IsValid() {
k = elem
}
}
// Push current onto stack unless it is an interface.
if k.Kind() != reflect.Interface {
d.stack = append(d.stack, k.Kind())
// Schedule a pop
defer func() {
d.stack = d.stack[:len(d.stack)-1]
}()
}
switch k.Kind() {
case reflect.Bool:
return d.decodeBool(name, node, result)
case reflect.Float64:
return d.decodeFloat(name, node, result)
case reflect.Int:
return d.decodeInt(name, node, result)
case reflect.Interface:
// When we see an interface, we make our own thing
return d.decodeInterface(name, node, result)
case reflect.Map:
return d.decodeMap(name, node, result)
case reflect.Ptr:
return d.decodePtr(name, node, result)
case reflect.Slice:
return d.decodeSlice(name, node, result)
case reflect.String:
return d.decodeString(name, node, result)
case reflect.Struct:
return d.decodeStruct(name, node, result)
default:
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
}
}
}
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
if n.Token.Type == token.BOOL {
v, err := strconv.ParseBool(n.Token.Text)
if err != nil {
return err
}
result.Set(reflect.ValueOf(v))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type %T", name, node),
}
}
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
if n.Token.Type == token.FLOAT {
v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil {
return err
}
result.Set(reflect.ValueOf(v))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type %T", name, node),
}
}
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
switch n.Token.Type {
case token.NUMBER:
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
if err != nil {
return err
}
result.Set(reflect.ValueOf(int(v)))
return nil
case token.STRING:
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
if err != nil {
return err
}
result.Set(reflect.ValueOf(int(v)))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type %T", name, node),
}
}
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
// When we see an ast.Node, we retain the value to enable deferred decoding.
// Very useful in situations where we want to preserve ast.Node information
// like Pos
if result.Type() == nodeType && result.CanSet() {
result.Set(reflect.ValueOf(node))
return nil
}
var set reflect.Value
redecode := true
// For testing types, ObjectType should just be treated as a list. We
// set this to a temporary var because we want to pass in the real node.
testNode := node
if ot, ok := node.(*ast.ObjectType); ok {
testNode = ot.List
}
switch n := testNode.(type) {
case *ast.ObjectList:
// If we're at the root or we're directly within a slice, then we
// decode objects into map[string]interface{}, otherwise we decode
// them into lists.
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
var temp map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeMap(
reflect.MapOf(
reflect.TypeOf(""),
tempVal.Type().Elem()))
set = result
} else {
var temp []map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeSlice(
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
set = result
}
case *ast.ObjectType:
// If we're at the root or we're directly within a slice, then we
// decode objects into map[string]interface{}, otherwise we decode
// them into lists.
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
var temp map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeMap(
reflect.MapOf(
reflect.TypeOf(""),
tempVal.Type().Elem()))
set = result
} else {
var temp []map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeSlice(
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
set = result
}
case *ast.ListType:
var temp []interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeSlice(
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
set = result
case *ast.LiteralType:
switch n.Token.Type {
case token.BOOL:
var result bool
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.FLOAT:
var result float64
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.NUMBER:
var result int
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.STRING, token.HEREDOC:
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
default:
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
}
}
default:
return fmt.Errorf(
"%s: cannot decode into interface: %T",
name, node)
}
// Set the result to what its supposed to be, then reset
// result so we don't reflect into this method anymore.
result.Set(set)
if redecode {
// Revisit the node so that we can use the newly instantiated
// thing and populate it.
if err := d.decode(name, node, result); err != nil {
return err
}
}
return nil
}
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
if item, ok := node.(*ast.ObjectItem); ok {
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
}
if ot, ok := node.(*ast.ObjectType); ok {
node = ot.List
}
n, ok := node.(*ast.ObjectList)
if !ok {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
}
}
// If we have an interface, then we can address the interface,
// but not the slice itself, so get the element but set the interface
set := result
if result.Kind() == reflect.Interface {
result = result.Elem()
}
resultType := result.Type()
resultElemType := resultType.Elem()
resultKeyType := resultType.Key()
if resultKeyType.Kind() != reflect.String {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: map must have string keys", name),
}
}
// Make a map if it is nil
resultMap := result
if result.IsNil() {
resultMap = reflect.MakeMap(
reflect.MapOf(resultKeyType, resultElemType))
}
// Go through each element and decode it.
done := make(map[string]struct{})
for _, item := range n.Items {
if item.Val == nil {
continue
}
// Get the key we're dealing with, which is the first item
keyStr := item.Keys[0].Token.Value().(string)
// If we've already processed this key, then ignore it
if _, ok := done[keyStr]; ok {
continue
}
// Determine the value. If we have more than one key, then we
// get the objectlist of only these keys.
itemVal := item.Val
if len(item.Keys) > 1 {
itemVal = n.Filter(keyStr)
done[keyStr] = struct{}{}
}
// Make the field name
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
// Get the key/value as reflection values
key := reflect.ValueOf(keyStr)
val := reflect.Indirect(reflect.New(resultElemType))
// If we have a pre-existing value in the map, use that
oldVal := resultMap.MapIndex(key)
if oldVal.IsValid() {
val.Set(oldVal)
}
// Decode!
if err := d.decode(fieldName, itemVal, val); err != nil {
return err
}
// Set the value on the map
resultMap.SetMapIndex(key, val)
}
// Set the final map if we can
set.Set(resultMap)
return nil
}
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
// Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type.
resultType := result.Type()
resultElemType := resultType.Elem()
val := reflect.New(resultElemType)
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
return err
}
result.Set(val)
return nil
}
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
// If we have an interface, then we can address the interface,
// but not the slice itself, so get the element but set the interface
set := result
if result.Kind() == reflect.Interface {
result = result.Elem()
}
// Create the slice if it isn't nil
resultType := result.Type()
resultElemType := resultType.Elem()
if result.IsNil() {
resultSliceType := reflect.SliceOf(resultElemType)
result = reflect.MakeSlice(
resultSliceType, 0, 0)
}
// Figure out the items we'll be copying into the slice
var items []ast.Node
switch n := node.(type) {
case *ast.ObjectList:
items = make([]ast.Node, len(n.Items))
for i, item := range n.Items {
items[i] = item
}
case *ast.ObjectType:
items = []ast.Node{n}
case *ast.ListType:
items = n.List
default:
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("unknown slice type: %T", node),
}
}
for i, item := range items {
fieldName := fmt.Sprintf("%s[%d]", name, i)
// Decode
val := reflect.Indirect(reflect.New(resultElemType))
if err := d.decode(fieldName, item, val); err != nil {
return err
}
// Append it onto the slice
result = reflect.Append(result, val)
}
set.Set(result)
return nil
}
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
switch n.Token.Type {
case token.NUMBER:
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
return nil
case token.STRING, token.HEREDOC:
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
}
}
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
var item *ast.ObjectItem
if it, ok := node.(*ast.ObjectItem); ok {
item = it
node = it.Val
}
if ot, ok := node.(*ast.ObjectType); ok {
node = ot.List
}
list, ok := node.(*ast.ObjectList)
if !ok {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
}
}
// This slice will keep track of all the structs we'll be decoding.
// There can be more than one struct if there are embedded structs
// that are squashed.
structs := make([]reflect.Value, 1, 5)
structs[0] = result
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
fields := make(map[*reflect.StructField]reflect.Value)
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
structType := structVal.Type()
for i := 0; i < structType.NumField(); i++ {
fieldType := structType.Field(i)
if fieldType.Anonymous {
fieldKind := fieldType.Type.Kind()
if fieldKind != reflect.Struct {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unsupported type to struct: %s",
fieldType.Name, fieldKind),
}
}
// We have an embedded field. We "squash" the fields down
// if specified in the tag.
squash := false
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
break
}
}
if squash {
structs = append(
structs, result.FieldByName(fieldType.Name))
continue
}
}
// Normal struct field, store it away
fields[&fieldType] = structVal.Field(i)
}
}
usedKeys := make(map[string]struct{})
decodedFields := make([]string, 0, len(fields))
decodedFieldsVal := make([]reflect.Value, 0)
unusedKeysVal := make([]reflect.Value, 0)
for fieldType, field := range fields {
if !field.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
if !field.CanSet() {
continue
}
fieldName := fieldType.Name
tagValue := fieldType.Tag.Get(tagName)
tagParts := strings.SplitN(tagValue, ",", 2)
if len(tagParts) >= 2 {
switch tagParts[1] {
case "decodedFields":
decodedFieldsVal = append(decodedFieldsVal, field)
continue
case "key":
if item == nil {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
name, fieldName),
}
}
field.SetString(item.Keys[0].Token.Value().(string))
continue
case "unusedKeys":
unusedKeysVal = append(unusedKeysVal, field)
continue
}
}
if tagParts[0] != "" {
fieldName = tagParts[0]
}
// Determine the element we'll use to decode. If it is a single
// match (only object with the field), then we decode it exactly.
// If it is a prefix match, then we decode the matches.
filter := list.Filter(fieldName)
prefixMatches := filter.Children()
matches := filter.Elem()
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
continue
}
// Track the used key
usedKeys[fieldName] = struct{}{}
// Create the field name and decode. We range over the elements
// because we actually want the value.
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
if len(prefixMatches.Items) > 0 {
if err := d.decode(fieldName, prefixMatches, field); err != nil {
return err
}
}
for _, match := range matches.Items {
var decodeNode ast.Node = match.Val
if ot, ok := decodeNode.(*ast.ObjectType); ok {
decodeNode = &ast.ObjectList{Items: ot.List.Items}
}
if err := d.decode(fieldName, decodeNode, field); err != nil {
return err
}
}
decodedFields = append(decodedFields, fieldType.Name)
}
if len(decodedFieldsVal) > 0 {
// Sort it so that it is deterministic
sort.Strings(decodedFields)
for _, v := range decodedFieldsVal {
v.Set(reflect.ValueOf(decodedFields))
}
}
return nil
}
// findNodeType returns the type of ast.Node
func findNodeType() reflect.Type {
var nodeContainer struct {
Node ast.Node
}
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
return value.Type()
}

View File

@ -0,0 +1,663 @@
package hcl
import (
"io/ioutil"
"path/filepath"
"reflect"
"testing"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/ast"
)
func TestDecode_interface(t *testing.T) {
cases := []struct {
File string
Err bool
Out interface{}
}{
{
"basic.hcl",
false,
map[string]interface{}{
"foo": "bar",
"bar": "${file(\"bing/bong.txt\")}",
},
},
{
"basic_squish.hcl",
false,
map[string]interface{}{
"foo": "bar",
"bar": "${file(\"bing/bong.txt\")}",
"foo-bar": "baz",
},
},
{
"empty.hcl",
false,
map[string]interface{}{
"resource": []map[string]interface{}{
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{},
},
},
},
},
},
{
"tfvars.hcl",
false,
map[string]interface{}{
"regularvar": "Should work",
"map.key1": "Value",
"map.key2": "Other value",
},
},
{
"escape.hcl",
false,
map[string]interface{}{
"foo": "bar\"baz\\n",
},
},
{
"interpolate_escape.hcl",
false,
map[string]interface{}{
"foo": "${file(\"bing/bong.txt\")}",
},
},
{
"float.hcl",
false,
map[string]interface{}{
"a": 1.02,
},
},
{
"multiline_bad.hcl",
true,
nil,
},
{
"multiline_no_marker.hcl",
true,
nil,
},
{
"multiline.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n"},
},
{
"multiline_no_eof.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
},
{
"multiline.json",
false,
map[string]interface{}{"foo": "bar\nbaz"},
},
{
"scientific.json",
false,
map[string]interface{}{
"a": 1e-10,
"b": 1e+10,
"c": 1e10,
"d": 1.2e-10,
"e": 1.2e+10,
"f": 1.2e10,
},
},
{
"scientific.hcl",
false,
map[string]interface{}{
"a": 1e-10,
"b": 1e+10,
"c": 1e10,
"d": 1.2e-10,
"e": 1.2e+10,
"f": 1.2e10,
},
},
{
"terraform_heroku.hcl",
false,
map[string]interface{}{
"name": "terraform-test-app",
"config_vars": []map[string]interface{}{
map[string]interface{}{
"FOO": "bar",
},
},
},
},
{
"structure_multi.hcl",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"baz": []map[string]interface{}{
map[string]interface{}{"key": 7},
},
},
map[string]interface{}{
"bar": []map[string]interface{}{
map[string]interface{}{"key": 12},
},
},
},
},
},
{
"structure_multi.json",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"baz": []map[string]interface{}{
map[string]interface{}{"key": 7},
},
},
map[string]interface{}{
"bar": []map[string]interface{}{
map[string]interface{}{"key": 12},
},
},
},
},
},
{
"structure_list.hcl",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"key": 7,
},
map[string]interface{}{
"key": 12,
},
},
},
},
{
"structure_list.json",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"key": 7,
},
map[string]interface{}{
"key": 12,
},
},
},
},
{
"structure_list_deep.json",
false,
map[string]interface{}{
"bar": []map[string]interface{}{
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"name": "terraform_example",
"ingress": []map[string]interface{}{
map[string]interface{}{
"from_port": 22,
},
map[string]interface{}{
"from_port": 80,
},
},
},
},
},
},
},
},
{
"nested_block_comment.hcl",
false,
map[string]interface{}{
"bar": "value",
},
},
{
"unterminated_block_comment.hcl",
true,
nil,
},
{
"object_list.json",
false,
map[string]interface{}{
"resource": []map[string]interface{}{
map[string]interface{}{
"aws_instance": []map[string]interface{}{
map[string]interface{}{
"db": []map[string]interface{}{
map[string]interface{}{
"vpc": "foo",
"provisioner": []map[string]interface{}{
map[string]interface{}{
"file": []map[string]interface{}{
map[string]interface{}{
"source": "foo",
"destination": "bar",
},
},
},
},
},
},
},
},
},
},
},
},
}
for _, tc := range cases {
t.Logf("Testing: %s", tc.File)
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
if err != nil {
t.Fatalf("err: %s", err)
}
var out interface{}
err = Decode(&out, string(d))
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
}
if !reflect.DeepEqual(out, tc.Out) {
t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
}
}
}
func TestDecode_equal(t *testing.T) {
cases := []struct {
One, Two string
}{
{
"basic.hcl",
"basic.json",
},
{
"float.hcl",
"float.json",
},
/*
{
"structure.hcl",
"structure.json",
},
*/
{
"structure.hcl",
"structure_flat.json",
},
{
"terraform_heroku.hcl",
"terraform_heroku.json",
},
}
for _, tc := range cases {
p1 := filepath.Join(fixtureDir, tc.One)
p2 := filepath.Join(fixtureDir, tc.Two)
d1, err := ioutil.ReadFile(p1)
if err != nil {
t.Fatalf("err: %s", err)
}
d2, err := ioutil.ReadFile(p2)
if err != nil {
t.Fatalf("err: %s", err)
}
var i1, i2 interface{}
err = Decode(&i1, string(d1))
if err != nil {
t.Fatalf("err: %s", err)
}
err = Decode(&i2, string(d2))
if err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(i1, i2) {
t.Fatalf(
"%s != %s\n\n%#v\n\n%#v",
tc.One, tc.Two,
i1, i2)
}
}
}
func TestDecode_flatMap(t *testing.T) {
var val map[string]map[string]string
err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := map[string]map[string]string{
"foo": map[string]string{
"foo": "bar",
"key": "7",
},
}
if !reflect.DeepEqual(val, expected) {
t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
}
}
func TestDecode_structure(t *testing.T) {
type V struct {
Key int
Foo string
}
var actual V
err := Decode(&actual, testReadFile(t, "flat.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := V{
Key: 7,
Foo: "bar",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
}
}
func TestDecode_structurePtr(t *testing.T) {
type V struct {
Key int
Foo string
}
var actual *V
err := Decode(&actual, testReadFile(t, "flat.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &V{
Key: 7,
Foo: "bar",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
}
}
func TestDecode_structureArray(t *testing.T) {
// This test is extracted from a failure in Consul (consul.io),
// hence the interesting structure naming.
type KeyPolicyType string
type KeyPolicy struct {
Prefix string `hcl:",key"`
Policy KeyPolicyType
}
type Policy struct {
Keys []KeyPolicy `hcl:"key,expand"`
}
expected := Policy{
Keys: []KeyPolicy{
KeyPolicy{
Prefix: "",
Policy: "read",
},
KeyPolicy{
Prefix: "foo/",
Policy: "write",
},
KeyPolicy{
Prefix: "foo/bar/",
Policy: "read",
},
KeyPolicy{
Prefix: "foo/bar/baz",
Policy: "deny",
},
},
}
files := []string{
"decode_policy.hcl",
"decode_policy.json",
}
for _, f := range files {
var actual Policy
err := Decode(&actual, testReadFile(t, f))
if err != nil {
t.Fatalf("Input: %s\n\nerr: %s", f, err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
}
}
}
func TestDecode_sliceExpand(t *testing.T) {
type testInner struct {
Name string `hcl:",key"`
Key string
}
type testStruct struct {
Services []testInner `hcl:"service,expand"`
}
expected := testStruct{
Services: []testInner{
testInner{
Name: "my-service-0",
Key: "value",
},
testInner{
Name: "my-service-1",
Key: "value",
},
},
}
files := []string{
"slice_expand.hcl",
}
for _, f := range files {
t.Logf("Testing: %s", f)
var actual testStruct
err := Decode(&actual, testReadFile(t, f))
if err != nil {
t.Fatalf("Input: %s\n\nerr: %s", f, err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
}
}
}
func TestDecode_structureMap(t *testing.T) {
// This test is extracted from a failure in Terraform (terraform.io),
// hence the interesting structure naming.
type hclVariable struct {
Default interface{}
Description string
Fields []string `hcl:",decodedFields"`
}
type rawConfig struct {
Variable map[string]hclVariable
}
expected := rawConfig{
Variable: map[string]hclVariable{
"foo": hclVariable{
Default: "bar",
Description: "bar",
Fields: []string{"Default", "Description"},
},
"amis": hclVariable{
Default: []map[string]interface{}{
map[string]interface{}{
"east": "foo",
},
},
Fields: []string{"Default"},
},
},
}
files := []string{
"decode_tf_variable.hcl",
"decode_tf_variable.json",
}
for _, f := range files {
t.Logf("Testing: %s", f)
var actual rawConfig
err := Decode(&actual, testReadFile(t, f))
if err != nil {
t.Fatalf("Input: %s\n\nerr: %s", f, err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
}
}
}
func TestDecode_interfaceNonPointer(t *testing.T) {
var value interface{}
err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
if err == nil {
t.Fatal("should error")
}
}
func TestDecode_intString(t *testing.T) {
var value struct {
Count int
}
err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
if value.Count != 3 {
t.Fatalf("bad: %#v", value.Count)
}
}
func TestDecode_Node(t *testing.T) {
// given
var value struct {
Content ast.Node
Nested struct {
Content ast.Node
}
}
content := `
content {
hello = "world"
}
`
// when
err := Decode(&value, content)
// then
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
// verify ast.Node can be decoded later
var v map[string]interface{}
err = DecodeObject(&v, value.Content)
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
if v["hello"] != "world" {
t.Errorf("expected mapping to be returned")
}
}
func TestDecode_NestedNode(t *testing.T) {
// given
var value struct {
Nested struct {
Content ast.Node
}
}
content := `
nested "content" {
hello = "world"
}
`
// when
err := Decode(&value, content)
// then
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
// verify ast.Node can be decoded later
var v map[string]interface{}
err = DecodeObject(&v, value.Nested.Content)
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
if v["hello"] != "world" {
t.Errorf("expected mapping to be returned")
}
}

11
Godeps/_workspace/src/github.com/hashicorp/hcl/hcl.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Package hcl decodes HCL into usable Go structures.
//
// hcl input can come in either pure HCL format or JSON format.
// It can be parsed into an AST, and then decoded into a structure,
// or it can be decoded directly from a string into a structure.
//
// If you choose to parse HCL into a raw AST, the benefit is that you
// can write custom visitor implementations to implement custom
// semantic checks. By default, HCL does not perform any semantic
// checks.
package hcl

View File

@ -0,0 +1,204 @@
// Package ast declares the types used to represent syntax trees for HCL
// (HashiCorp Configuration Language)
package ast
import (
"strings"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
// Node is an element in the abstract syntax tree.
type Node interface {
node()
Pos() token.Pos
}
func (File) node() {}
func (ObjectList) node() {}
func (ObjectKey) node() {}
func (ObjectItem) node() {}
func (Comment) node() {}
func (CommentGroup) node() {}
func (ObjectType) node() {}
func (LiteralType) node() {}
func (ListType) node() {}
// File represents a single HCL file
type File struct {
Node Node // usually a *ObjectList
Comments []*CommentGroup // list of all comments in the source
}
func (f *File) Pos() token.Pos {
return f.Node.Pos()
}
// ObjectList represents a list of ObjectItems. An HCL file itself is an
// ObjectList.
type ObjectList struct {
Items []*ObjectItem
}
func (o *ObjectList) Add(item *ObjectItem) {
o.Items = append(o.Items, item)
}
// Filter filters out the objects with the given key list as a prefix.
//
// The returned list of objects contain ObjectItems where the keys have
// this prefix already stripped off. This might result in objects with
// zero-length key lists if they have no children.
//
// If no matches are found, an empty ObjectList (non-nil) is returned.
func (o *ObjectList) Filter(keys ...string) *ObjectList {
var result ObjectList
for _, item := range o.Items {
// If there aren't enough keys, then ignore this
if len(item.Keys) < len(keys) {
continue
}
match := true
for i, key := range item.Keys[:len(keys)] {
key := key.Token.Value().(string)
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
match = false
break
}
}
if !match {
continue
}
// Strip off the prefix from the children
newItem := *item
newItem.Keys = newItem.Keys[len(keys):]
result.Add(&newItem)
}
return &result
}
// Children returns further nested objects (key length > 0) within this
// ObjectList. This should be used with Filter to get at child items.
func (o *ObjectList) Children() *ObjectList {
var result ObjectList
for _, item := range o.Items {
if len(item.Keys) > 0 {
result.Add(item)
}
}
return &result
}
// Elem returns items in the list that are direct element assignments
// (key length == 0). This should be used with Filter to get at elements.
func (o *ObjectList) Elem() *ObjectList {
var result ObjectList
for _, item := range o.Items {
if len(item.Keys) == 0 {
result.Add(item)
}
}
return &result
}
func (o *ObjectList) Pos() token.Pos {
// always returns the uninitiliazed position
return o.Items[0].Pos()
}
// ObjectItem represents a HCL Object Item. An item is represented with a key
// (or keys). It can be an assignment or an object (both normal and nested)
type ObjectItem struct {
// keys is only one length long if it's of type assignment. If it's a
// nested object it can be larger than one. In that case "assign" is
// invalid as there is no assignments for a nested object.
Keys []*ObjectKey
// assign contains the position of "=", if any
Assign token.Pos
// val is the item itself. It can be an object,list, number, bool or a
// string. If key length is larger than one, val can be only of type
// Object.
Val Node
LeadComment *CommentGroup // associated lead comment
LineComment *CommentGroup // associated line comment
}
func (o *ObjectItem) Pos() token.Pos {
return o.Keys[0].Pos()
}
// ObjectKeys are either an identifier or of type string.
type ObjectKey struct {
Token token.Token
}
func (o *ObjectKey) Pos() token.Pos {
return o.Token.Pos
}
// LiteralType represents a literal of basic type. Valid types are:
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
type LiteralType struct {
Token token.Token
// associated line comment, only when used in a list
LineComment *CommentGroup
}
func (l *LiteralType) Pos() token.Pos {
return l.Token.Pos
}
// ListStatement represents a HCL List type
type ListType struct {
Lbrack token.Pos // position of "["
Rbrack token.Pos // position of "]"
List []Node // the elements in lexical order
}
func (l *ListType) Pos() token.Pos {
return l.Lbrack
}
func (l *ListType) Add(node Node) {
l.List = append(l.List, node)
}
// ObjectType represents a HCL Object Type
type ObjectType struct {
Lbrace token.Pos // position of "{"
Rbrace token.Pos // position of "}"
List *ObjectList // the nodes in lexical order
}
func (o *ObjectType) Pos() token.Pos {
return o.Lbrace
}
// Comment node represents a single //, # style or /*- style commment
type Comment struct {
Start token.Pos // position of / or #
Text string
}
func (c *Comment) Pos() token.Pos {
return c.Start
}
// CommentGroup node represents a sequence of comments with no other tokens and
// no empty lines between.
type CommentGroup struct {
List []*Comment // len(List) > 0
}
func (c *CommentGroup) Pos() token.Pos {
return c.List[0].Pos()
}

View File

@ -0,0 +1,200 @@
package ast
import (
"reflect"
"strings"
"testing"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
func TestObjectListFilter(t *testing.T) {
var cases = []struct {
Filter []string
Input []*ObjectItem
Output []*ObjectItem
}{
{
[]string{"foo"},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{
Token: token.Token{Type: token.STRING, Text: `"foo"`},
},
},
},
},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{},
},
},
},
{
[]string{"foo"},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
},
},
}
for _, tc := range cases {
input := &ObjectList{Items: tc.Input}
expected := &ObjectList{Items: tc.Output}
if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
}
}
}
func TestWalk(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
}
node := &ObjectList{Items: items}
order := []string{
"*ast.ObjectList",
"*ast.ObjectItem",
"*ast.ObjectKey",
"*ast.ObjectKey",
"*ast.LiteralType",
"*ast.ObjectItem",
"*ast.ObjectKey",
}
count := 0
Walk(node, func(n Node) (Node, bool) {
if n == nil {
return n, false
}
typeName := reflect.TypeOf(n).String()
if order[count] != typeName {
t.Errorf("expected '%s' got: '%s'", order[count], typeName)
}
count++
return n, true
})
}
func TestWalkEquality(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
}
node := &ObjectList{Items: items}
rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
newNode, ok := rewritten.(*ObjectList)
if !ok {
t.Fatalf("expected Objectlist, got %T", rewritten)
}
if !reflect.DeepEqual(node, newNode) {
t.Fatal("rewritten node is not equal to the given node")
}
if len(newNode.Items) != 2 {
t.Error("expected newNode length 2, got: %d", len(newNode.Items))
}
expected := []string{
`"foo"`,
`"bar"`,
}
for i, item := range newNode.Items {
if len(item.Keys) != 1 {
t.Error("expected keys newNode length 1, got: %d", len(item.Keys))
}
if item.Keys[0].Token.Text != expected[i] {
t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
}
if item.Val != nil {
t.Errorf("expected item value should be nil")
}
}
}
func TestWalkRewrite(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
}
node := &ObjectList{Items: items}
suffix := "_example"
node = Walk(node, func(n Node) (Node, bool) {
switch i := n.(type) {
case *ObjectKey:
i.Token.Text = i.Token.Text + suffix
n = i
}
return n, true
}).(*ObjectList)
Walk(node, func(n Node) (Node, bool) {
switch i := n.(type) {
case *ObjectKey:
if !strings.HasSuffix(i.Token.Text, suffix) {
t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
}
}
return n, true
})
}

View File

@ -0,0 +1,52 @@
package ast
import "fmt"
// WalkFunc describes a function to be called for each node during a Walk. The
// returned node can be used to rewrite the AST. Walking stops the returned
// bool is false.
type WalkFunc func(Node) (Node, bool)
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
// node must not be nil. If fn returns true, Walk invokes fn recursively for
// each of the non-nil children of node, followed by a call of fn(nil). The
// returned node of fn can be used to rewrite the passed node to fn.
func Walk(node Node, fn WalkFunc) Node {
rewritten, ok := fn(node)
if !ok {
return rewritten
}
switch n := node.(type) {
case *File:
n.Node = Walk(n.Node, fn)
case *ObjectList:
for i, item := range n.Items {
n.Items[i] = Walk(item, fn).(*ObjectItem)
}
case *ObjectKey:
// nothing to do
case *ObjectItem:
for i, k := range n.Keys {
n.Keys[i] = Walk(k, fn).(*ObjectKey)
}
if n.Val != nil {
n.Val = Walk(n.Val, fn)
}
case *LiteralType:
// nothing to do
case *ListType:
for i, l := range n.List {
n.List[i] = Walk(l, fn)
}
case *ObjectType:
n.List = Walk(n.List, fn).(*ObjectList)
default:
// should we panic here?
fmt.Printf("unknown type: %T\n", n)
}
fn(nil)
return rewritten
}

View File

@ -0,0 +1,17 @@
package parser
import (
"fmt"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
// PosError is a parse error that contains a position.
type PosError struct {
Pos token.Pos
Err error
}
func (e *PosError) Error() string {
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
}

View File

@ -0,0 +1,9 @@
package parser
import (
"testing"
)
func TestPosError_impl(t *testing.T) {
var _ error = new(PosError)
}

View File

@ -0,0 +1,417 @@
// Package parser implements a parser for HCL (HashiCorp Configuration
// Language)
package parser
import (
"errors"
"fmt"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/ast"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/scanner"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
type Parser struct {
sc *scanner.Scanner
// Last read token
tok token.Token
commaPrev token.Token
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
enableTrace bool
indent int
n int // buffer size (max = 1)
}
func newParser(src []byte) *Parser {
return &Parser{
sc: scanner.New(src),
}
}
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
p := newParser(src)
return p.Parse()
}
var errEofToken = errors.New("EOF token found")
// Parse returns the fully parsed source and returns the abstract syntax tree.
func (p *Parser) Parse() (*ast.File, error) {
f := &ast.File{}
var err, scerr error
p.sc.Error = func(pos token.Pos, msg string) {
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
}
f.Node, err = p.objectList()
if scerr != nil {
return nil, scerr
}
if err != nil {
return nil, err
}
f.Comments = p.comments
return f, nil
}
func (p *Parser) objectList() (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{}
for {
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
}
// we don't return a nil node, because might want to use already
// collected items.
if err != nil {
return node, err
}
node.Add(n)
}
return node, nil
}
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
endline = p.tok.Pos.Line
// count the endline if it's multiline comment, ie starting with /*
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.tok.Text); i++ {
if p.tok.Text[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
p.tok = p.sc.Scan()
return
}
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.tok.Pos.Line
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// objectItem parses a single object item
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if err != nil {
return nil, err
}
o := &ast.ObjectItem{
Keys: keys,
}
if p.leadComment != nil {
o.LeadComment = p.leadComment
p.leadComment = nil
}
switch p.tok.Type {
case token.ASSIGN:
o.Assign = p.tok.Pos
o.Val, err = p.object()
if err != nil {
return nil, err
}
case token.LBRACE:
o.Val, err = p.objectType()
if err != nil {
return nil, err
}
}
// do a look-ahead for line comment
p.scan()
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
o.LineComment = p.lineComment
p.lineComment = nil
}
p.unscan()
return o, nil
}
// objectKey parses an object key and returns a ObjectKey AST
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount := 0
keys := make([]*ast.ObjectKey, 0)
for {
tok := p.scan()
switch tok.Type {
case token.EOF:
return nil, errEofToken
case token.ASSIGN:
// assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}`
if keyCount > 1 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
}
}
if keyCount == 0 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: errors.New("no object keys found!"),
}
}
return keys, nil
case token.LBRACE:
// object
return keys, nil
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
fmt.Println("illegal")
default:
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (ast.Node, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.COMMENT:
// implement comment
case token.EOF:
return nil, errEofToken
}
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("Unknown token: %+v", tok),
}
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{
Lbrace: p.tok.Pos,
}
l, err := p.objectList()
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
o.List = l
o.Rbrace = p.tok.Pos // advanced via parseObjectList
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{
Lbrack: p.tok.Pos,
}
needComma := false
for {
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
if needComma {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA),
}
}
node, err := p.literalType()
if err != nil {
return nil, err
}
l.Add(node)
needComma = true
case token.COMMA:
// get next list item or we are at the end
// do a look-ahead for line comment
p.scan()
if p.lineComment != nil {
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
if ok {
lit.LineComment = p.lineComment
l.List[len(l.List)-1] = lit
p.lineComment = nil
}
}
p.unscan()
needComma = false
continue
case token.BOOL:
// TODO(arslan) should we support? not supported by HCL yet
case token.LBRACK:
// TODO(arslan) should we support nested lists? Even though it's
// written in README of HCL, it's not a part of the grammar
// (not defined in parse.y)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos
return l, nil
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
}
}
}
}
// literalType parses a literal type and returns a LiteralType AST
func (p *Parser) literalType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral"))
return &ast.LiteralType{
Token: p.tok,
}, nil
}
// scan returns the next token from the underlying scanner. If a token has
// been unscanned then read that instead. In the process, it collects any
// comment groups encountered, and remembers the last lead and line comments.
func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it.
if p.n != 0 {
p.n = 0
return p.tok
}
// Otherwise read the next token from the scanner and Save it to the buffer
// in case we unscan later.
prev := p.tok
p.tok = p.sc.Scan()
if p.tok.Type == token.COMMENT {
var comment *ast.CommentGroup
var endline int
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
// p.tok.Pos.Line, prev.Pos.Line, endline)
if p.tok.Pos.Line == prev.Pos.Line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.tok.Pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok.Type == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
switch p.tok.Type {
case token.RBRACE, token.RBRACK:
// Do not count for these cases
default:
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
return p.tok
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
p.n = 1
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *Parser) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *Parser, msg string) *Parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *Parser) {
p.indent--
p.printTrace(")")
}

View File

@ -0,0 +1,323 @@
package parser
import (
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/ast"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
func TestType(t *testing.T) {
var literals = []struct {
typ token.Type
src string
}{
{token.STRING, `foo = "foo"`},
{token.NUMBER, `foo = 123`},
{token.FLOAT, `foo = 123.12`},
{token.FLOAT, `foo = -123.12`},
{token.BOOL, `foo = true`},
{token.HEREDOC, "foo = <<EOF\nHello\nWorld\nEOF"},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
lit, ok := item.Val.(*ast.LiteralType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
if lit.Token.Type != l.typ {
t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
}
}
}
func TestListType(t *testing.T) {
var literals = []struct {
src string
tokens []token.Type
}{
{
`foo = ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`foo = [123, "123",]`,
[]token.Type{token.NUMBER, token.STRING},
},
{
`foo = []`,
[]token.Type{},
},
{
`foo = ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`foo = [1,
"string",
<<EOF
heredoc contents
EOF
]`,
[]token.Type{token.NUMBER, token.STRING, token.HEREDOC},
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
tokens := []token.Type{}
for _, li := range list.List {
if tp, ok := li.(*ast.LiteralType); ok {
tokens = append(tokens, tp.Token.Type)
}
}
equals(t, l.tokens, tokens)
}
}
func TestObjectType(t *testing.T) {
var literals = []struct {
src string
nodeType []ast.Node
itemLen int
}{
{
`foo = {}`,
nil,
0,
},
{
`foo = {
bar = "fatih"
}`,
[]ast.Node{&ast.LiteralType{}},
1,
},
{
`foo = {
bar = "fatih"
baz = ["arslan"]
}`,
[]ast.Node{
&ast.LiteralType{},
&ast.ListType{},
},
2,
},
{
`foo = {
bar {}
}`,
[]ast.Node{
&ast.ObjectType{},
},
1,
},
{
`foo {
bar {}
foo = true
}`,
[]ast.Node{
&ast.ObjectType{},
&ast.LiteralType{},
},
2,
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
// p.enableTrace = true
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
// we know that the ObjectKey name is foo for all cases, what matters
// is the object
obj, ok := item.Val.(*ast.ObjectType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
// check if the total length of items are correct
equals(t, l.itemLen, len(obj.List.Items))
// check if the types are correct
for i, item := range obj.List.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
}
}
}
func TestObjectKey(t *testing.T) {
keys := []struct {
exp []token.Type
src string
}{
{[]token.Type{token.IDENT}, `foo {}`},
{[]token.Type{token.IDENT}, `foo = {}`},
{[]token.Type{token.IDENT}, `foo = bar`},
{[]token.Type{token.IDENT}, `foo = 123`},
{[]token.Type{token.IDENT}, `foo = "${var.bar}`},
{[]token.Type{token.STRING}, `"foo" {}`},
{[]token.Type{token.STRING}, `"foo" = {}`},
{[]token.Type{token.STRING}, `"foo" = "${var.bar}`},
{[]token.Type{token.IDENT, token.IDENT}, `foo bar {}`},
{[]token.Type{token.IDENT, token.STRING}, `foo "bar" {}`},
{[]token.Type{token.STRING, token.IDENT}, `"foo" bar {}`},
{[]token.Type{token.IDENT, token.IDENT, token.IDENT}, `foo bar baz {}`},
}
for _, k := range keys {
p := newParser([]byte(k.src))
keys, err := p.objectKey()
if err != nil {
t.Fatal(err)
}
tokens := []token.Type{}
for _, o := range keys {
tokens = append(tokens, o.Token.Type)
}
equals(t, k.exp, tokens)
}
errKeys := []struct {
src string
}{
{`foo 12 {}`},
{`foo bar = {}`},
{`foo []`},
{`12 {}`},
}
for _, k := range errKeys {
p := newParser([]byte(k.src))
_, err := p.objectKey()
if err == nil {
t.Errorf("case '%s' should give an error", k.src)
}
}
}
// Official HCL tests
func TestParse(t *testing.T) {
cases := []struct {
Name string
Err bool
}{
{
"assign_colon.hcl",
true,
},
{
"comment.hcl",
false,
},
{
"comment_lastline.hcl",
false,
},
{
"comment_single.hcl",
false,
},
{
"empty.hcl",
false,
},
{
"list_comma.hcl",
false,
},
{
"multiple.hcl",
false,
},
{
"structure.hcl",
false,
},
{
"structure_basic.hcl",
false,
},
{
"structure_empty.hcl",
false,
},
{
"complex.hcl",
false,
},
{
"assign_deep.hcl",
true,
},
{
"types.hcl",
false,
},
{
"array_comment.hcl",
false,
},
{
"array_comment_2.hcl",
true,
},
{
"missing_braces.hcl",
true,
},
}
const fixtureDir = "./test-fixtures"
for _, tc := range cases {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
if err != nil {
t.Fatalf("err: %s", err)
}
_, err = Parse(d)
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
}
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View File

@ -0,0 +1,4 @@
foo = [
"1",
"2", # comment
]

View File

@ -0,0 +1,6 @@
provisioner "remote-exec" {
scripts = [
"${path.module}/scripts/install-consul.sh" // missing comma
"${path.module}/scripts/install-haproxy.sh"
]
}

View File

@ -0,0 +1,6 @@
resource = [{
"foo": {
"bar": {},
"baz": [1, 2, "foo"],
}
}]

View File

@ -0,0 +1,5 @@
resource = [{
foo = [{
bar = {}
}]
}]

View File

@ -0,0 +1,15 @@
// Foo
/* Bar */
/*
/*
Baz
*/
# Another
# Multiple
# Lines
foo = "bar"

View File

@ -0,0 +1 @@
#foo

View File

@ -0,0 +1 @@
# Hello

View File

@ -0,0 +1,42 @@
variable "foo" {
default = "bar"
description = "bar"
}
variable "groups" { }
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
"${element(split(\",\", var.groups)}",
]
network_interface = {
device_index = 0
description = "Main network interface"
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View File

@ -0,0 +1 @@
foo.bar = "baz"

View File

@ -0,0 +1 @@
foo = [1, 2, "foo"]

View File

@ -0,0 +1 @@
foo = [1, 2, "foo",]

View File

@ -0,0 +1,4 @@
# should error, but not crash
resource "template_file" "cloud_config" {
template = "$file("${path.module}/some/path")"
}

View File

@ -0,0 +1,2 @@
foo = "bar"
key = 7

View File

@ -0,0 +1,3 @@
default = {
"eu-west-1": "ami-b1cf19c6",
}

View File

@ -0,0 +1,5 @@
// This is a test structure for the lexer
foo bar "baz" {
key = 7
foo = "bar"
}

View File

@ -0,0 +1,5 @@
foo {
value = 7
"value" = 8
"complex::value" = 9
}

View File

@ -0,0 +1 @@
resource "foo" "bar" {}

View File

@ -0,0 +1,7 @@
foo = "bar"
bar = 7
baz = [1,2,3]
foo = -12
bar = 3.14159
foo = true
bar = false

View File

@ -0,0 +1,573 @@
package printer
import (
"bytes"
"fmt"
"sort"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/ast"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
const (
blank = byte(' ')
newline = byte('\n')
tab = byte('\t')
infinity = 1 << 30 // offset or line
)
var (
unindent = []byte("\uE123") // in the private use space
)
type printer struct {
cfg Config
prev token.Pos
comments []*ast.CommentGroup // may be nil, contains all comments
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
enableTrace bool
indentTrace int
}
type ByPosition []*ast.CommentGroup
func (b ByPosition) Len() int { return len(b) }
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
// collectComments comments all standalone comments which are not lead or line
// comment
func (p *printer) collectComments(node ast.Node) {
// first collect all comments. This is already stored in
// ast.File.(comments)
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.File:
p.comments = t.Comments
return nn, false
}
return nn, true
})
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
for _, c := range p.comments {
standaloneComments[c.Pos()] = c
}
// next remove all lead and line comments from the overall comment map.
// This will give us comments which are standalone, comments which are not
// assigned to any kind of node.
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.LiteralType:
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
case *ast.ObjectItem:
if t.LeadComment != nil {
for _, comment := range t.LeadComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
}
return nn, true
})
for _, c := range standaloneComments {
p.standaloneComments = append(p.standaloneComments, c)
}
sort.Sort(ByPosition(p.standaloneComments))
}
// output prints creates b printable HCL output and returns it.
func (p *printer) output(n interface{}) []byte {
var buf bytes.Buffer
switch t := n.(type) {
case *ast.File:
return p.output(t.Node)
case *ast.ObjectList:
var index int
var nextItem token.Pos
var commented bool
for {
// TODO(arslan): refactor below comment printing, we have the same in objectType
for _, c := range p.standaloneComments {
for _, comment := range c.List {
if index != len(t.Items) {
nextItem = t.Items[index].Pos()
} else {
nextItem = token.Pos{Offset: infinity, Line: infinity}
}
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// if we hit the end add newlines so we can print the comment
if index == len(t.Items) {
buf.Write([]byte{newline, newline})
}
buf.WriteString(comment.Text)
buf.WriteByte(newline)
if index != len(t.Items) {
buf.WriteByte(newline)
}
}
}
}
if index == len(t.Items) {
break
}
buf.Write(p.output(t.Items[index]))
if !commented && index != len(t.Items)-1 {
buf.Write([]byte{newline, newline})
}
index++
}
case *ast.ObjectKey:
buf.WriteString(t.Token.Text)
case *ast.ObjectItem:
p.prev = t.Pos()
buf.Write(p.objectItem(t))
case *ast.LiteralType:
buf.Write(p.literalType(t))
case *ast.ListType:
buf.Write(p.list(t))
case *ast.ObjectType:
buf.Write(p.objectType(t))
default:
fmt.Printf(" unknown type: %T\n", n)
}
return buf.Bytes()
}
func (p *printer) literalType(lit *ast.LiteralType) []byte {
result := []byte(lit.Token.Text)
if lit.Token.Type == token.HEREDOC {
// Clear the trailing newline from heredocs
if result[len(result)-1] == '\n' {
result = result[:len(result)-1]
}
// Poison lines 2+ so that we don't indent them
result = p.heredocIndent(result)
}
return result
}
// objectItem returns the printable HCL form of an object item. An object type
// starts with one/multiple keys and has a value. The value might be of any
// type.
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
var buf bytes.Buffer
if o.LeadComment != nil {
for _, comment := range o.LeadComment.List {
buf.WriteString(comment.Text)
buf.WriteByte(newline)
}
}
for i, k := range o.Keys {
buf.WriteString(k.Token.Text)
buf.WriteByte(blank)
// reach end of key
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
buf.WriteString("=")
buf.WriteByte(blank)
}
}
buf.Write(p.output(o.Val))
if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil {
buf.WriteByte(blank)
for _, comment := range o.LineComment.List {
buf.WriteString(comment.Text)
}
}
return buf.Bytes()
}
// objectType returns the printable HCL form of an object type. An object type
// begins with a brace and ends with a brace.
func (p *printer) objectType(o *ast.ObjectType) []byte {
defer un(trace(p, "ObjectType"))
var buf bytes.Buffer
buf.WriteString("{")
buf.WriteByte(newline)
var index int
var nextItem token.Pos
var commented bool
for {
// Print stand alone comments
for _, c := range p.standaloneComments {
for _, comment := range c.List {
// if we hit the end, last item should be the brace
if index != len(o.List.Items) {
nextItem = o.List.Items[index].Pos()
} else {
nextItem = o.Rbrace
}
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// add newline if it's between other printed nodes
if index > 0 {
commented = true
buf.WriteByte(newline)
}
buf.Write(p.indent([]byte(comment.Text)))
buf.WriteByte(newline)
if index != len(o.List.Items) {
buf.WriteByte(newline) // do not print on the end
}
}
}
}
if index == len(o.List.Items) {
p.prev = o.Rbrace
break
}
// check if we have adjacent one liner items. If yes we'll going to align
// the comments.
var aligned []*ast.ObjectItem
for _, item := range o.List.Items[index:] {
// we don't group one line lists
if len(o.List.Items) == 1 {
break
}
// one means a oneliner with out any lead comment
// two means a oneliner with lead comment
// anything else might be something else
cur := lines(string(p.objectItem(item)))
if cur > 2 {
break
}
curPos := item.Pos()
nextPos := token.Pos{}
if index != len(o.List.Items)-1 {
nextPos = o.List.Items[index+1].Pos()
}
prevPos := token.Pos{}
if index != 0 {
prevPos = o.List.Items[index-1].Pos()
}
// fmt.Println("DEBUG ----------------")
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
if curPos.Line+1 == nextPos.Line {
aligned = append(aligned, item)
index++
continue
}
if curPos.Line-1 == prevPos.Line {
aligned = append(aligned, item)
index++
// finish if we have a new line or comment next. This happens
// if the next item is not adjacent
if curPos.Line+1 != nextPos.Line {
break
}
continue
}
break
}
// put newlines if the items are between other non aligned items.
// newlines are also added if there is a standalone comment already, so
// check it too
if !commented && index != len(aligned) {
buf.WriteByte(newline)
}
if len(aligned) >= 1 {
p.prev = aligned[len(aligned)-1].Pos()
items := p.alignedItems(aligned)
buf.Write(p.indent(items))
} else {
p.prev = o.List.Items[index].Pos()
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
index++
}
buf.WriteByte(newline)
}
buf.WriteString("}")
return buf.Bytes()
}
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
var buf bytes.Buffer
// find the longest key and value length, needed for alignment
var longestKeyLen int // longest key length
var longestValLen int // longest value length
for _, item := range items {
key := len(item.Keys[0].Token.Text)
val := len(p.output(item.Val))
if key > longestKeyLen {
longestKeyLen = key
}
if val > longestValLen {
longestValLen = val
}
}
for i, item := range items {
if item.LeadComment != nil {
for _, comment := range item.LeadComment.List {
buf.WriteString(comment.Text)
buf.WriteByte(newline)
}
}
for i, k := range item.Keys {
keyLen := len(k.Token.Text)
buf.WriteString(k.Token.Text)
for i := 0; i < longestKeyLen-keyLen+1; i++ {
buf.WriteByte(blank)
}
// reach end of key
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
buf.WriteString("=")
buf.WriteByte(blank)
}
}
val := p.output(item.Val)
valLen := len(val)
buf.Write(val)
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
for i := 0; i < longestValLen-valLen+1; i++ {
buf.WriteByte(blank)
}
for _, comment := range item.LineComment.List {
buf.WriteString(comment.Text)
}
}
// do not print for the last item
if i != len(items)-1 {
buf.WriteByte(newline)
}
}
return buf.Bytes()
}
// list returns the printable HCL form of an list type.
func (p *printer) list(l *ast.ListType) []byte {
var buf bytes.Buffer
buf.WriteString("[")
var longestLine int
for _, item := range l.List {
// for now we assume that the list only contains literal types
if lit, ok := item.(*ast.LiteralType); ok {
lineLen := len(lit.Token.Text)
if lineLen > longestLine {
longestLine = lineLen
}
}
}
for i, item := range l.List {
if item.Pos().Line != l.Lbrack.Line {
// multiline list, add newline before we add each item
buf.WriteByte(newline)
// also indent each line
val := p.output(item)
curLen := len(val)
buf.Write(p.indent(val))
buf.WriteString(",")
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
// if the next item doesn't have any comments, do not align
buf.WriteByte(blank) // align one space
if i != len(l.List)-1 {
if lit, ok := l.List[i+1].(*ast.LiteralType); ok && lit.LineComment != nil {
for i := 0; i < longestLine-curLen; i++ {
buf.WriteByte(blank)
}
}
}
for _, comment := range lit.LineComment.List {
buf.WriteString(comment.Text)
}
}
if i == len(l.List)-1 {
buf.WriteByte(newline)
}
} else {
buf.Write(p.output(item))
if i != len(l.List)-1 {
buf.WriteString(",")
buf.WriteByte(blank)
}
}
}
buf.WriteString("]")
return buf.Bytes()
}
// indent indents the lines of the given buffer for each non-empty line
func (p *printer) indent(buf []byte) []byte {
var prefix []byte
if p.cfg.SpacesWidth != 0 {
for i := 0; i < p.cfg.SpacesWidth; i++ {
prefix = append(prefix, blank)
}
} else {
prefix = []byte{tab}
}
var res []byte
bol := true
for _, c := range buf {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// unindent removes all the indentation from the tombstoned lines
func (p *printer) unindent(buf []byte) []byte {
var res []byte
for i := 0; i < len(buf); i++ {
skip := len(buf)-i <= len(unindent)
if !skip {
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
}
if skip {
res = append(res, buf[i])
continue
}
// We have a marker. we have to backtrace here and clean out
// any whitespace ahead of our tombstone up to a \n
for j := len(res) - 1; j >= 0; j-- {
if res[j] == '\n' {
break
}
res = res[:j]
}
// Skip the entire unindent marker
i += len(unindent) - 1
}
return res
}
// heredocIndent marks all the 2nd and further lines as unindentable
func (p *printer) heredocIndent(buf []byte) []byte {
var res []byte
bol := false
for _, c := range buf {
if bol && c != '\n' {
res = append(res, unindent...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
func lines(txt string) int {
endline := 1
for i := 0; i < len(txt); i++ {
if txt[i] == '\n' {
endline++
}
}
return endline
}
// ----------------------------------------------------------------------------
// Tracing support
func (p *printer) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
i := 2 * p.indentTrace
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *printer, msg string) *printer {
p.printTrace(msg, "(")
p.indentTrace++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *printer) {
p.indentTrace--
p.printTrace(")")
}

View File

@ -0,0 +1,64 @@
// Package printer implements printing of AST nodes to HCL format.
package printer
import (
"bytes"
"io"
"text/tabwriter"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/ast"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/parser"
)
var DefaultConfig = Config{
SpacesWidth: 2,
}
// A Config node controls the output of Fprint.
type Config struct {
SpacesWidth int // if set, it will use spaces instead of tabs for alignment
}
func (c *Config) Fprint(output io.Writer, node ast.Node) error {
p := &printer{
cfg: *c,
comments: make([]*ast.CommentGroup, 0),
standaloneComments: make([]*ast.CommentGroup, 0),
// enableTrace: true,
}
p.collectComments(node)
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
return err
}
// flush tabwriter, if any
var err error
if tw, _ := output.(*tabwriter.Writer); tw != nil {
err = tw.Flush()
}
return err
}
// Fprint "pretty-prints" an HCL node to output
// It calls Config.Fprint with default settings.
func Fprint(output io.Writer, node ast.Node) error {
return DefaultConfig.Fprint(output, node)
}
// Format formats src HCL and returns the result.
func Format(src []byte) ([]byte, error) {
node, err := parser.Parse(src)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := DefaultConfig.Fprint(&buf, node); err != nil {
return nil, err
}
return buf.Bytes(), nil
}

View File

@ -0,0 +1,143 @@
package printer
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/parser"
)
var update = flag.Bool("update", false, "update golden files")
const (
dataDir = "testdata"
)
type entry struct {
source, golden string
}
// Use go test -update to create/update the respective golden files.
var data = []entry{
{"complexhcl.input", "complexhcl.golden"},
{"list.input", "list.golden"},
{"comment.input", "comment.golden"},
{"comment_aligned.input", "comment_aligned.golden"},
{"comment_standalone.input", "comment_standalone.golden"},
}
func TestFiles(t *testing.T) {
for _, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
check(t, source, golden)
}
}
func check(t *testing.T, source, golden string) {
src, err := ioutil.ReadFile(source)
if err != nil {
t.Error(err)
return
}
res, err := format(src)
if err != nil {
t.Error(err)
return
}
// update golden files if necessary
if *update {
if err := ioutil.WriteFile(golden, res, 0644); err != nil {
t.Error(err)
}
return
}
// get golden
gld, err := ioutil.ReadFile(golden)
if err != nil {
t.Error(err)
return
}
// formatted source and golden must be the same
if err := diff(source, golden, res, gld); err != nil {
t.Error(err)
return
}
}
// diff compares a and b.
func diff(aname, bname string, a, b []byte) error {
var buf bytes.Buffer // holding long error message
// compare lengths
if len(a) != len(b) {
fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
}
// compare contents
line := 1
offs := 1
for i := 0; i < len(a) && i < len(b); i++ {
ch := a[i]
if ch != b[i] {
fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
fmt.Fprintf(&buf, "\n\n")
break
}
if ch == '\n' {
line++
offs = i + 1
}
}
if buf.Len() > 0 {
return errors.New(buf.String())
}
return nil
}
// format parses src, prints the corresponding AST, verifies the resulting
// src is syntactically correct, and returns the resulting src or an error
// if any.
func format(src []byte) ([]byte, error) {
// parse src
node, err := parser.Parse(src)
if err != nil {
return nil, fmt.Errorf("parse: %s\n%s", err, src)
}
var buf bytes.Buffer
cfg := &Config{}
if err := cfg.Fprint(&buf, node); err != nil {
return nil, fmt.Errorf("print: %s", err)
}
// make sure formatted output is syntactically correct
res := buf.Bytes()
if _, err := parser.Parse(src); err != nil {
return nil, fmt.Errorf("parse: %s\n%s", err, src)
}
return res, nil
}
// lineAt returns the line in text starting at offset offs.
func lineAt(text []byte, offs int) []byte {
i := offs
for i < len(text) && text[i] != '\n' {
i++
}
return text[offs:i]
}

View File

@ -0,0 +1,36 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = ["fatih", "arslan"] // fatih arslan
# One line here
numbers = [1, 2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View File

@ -0,0 +1,37 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = [ "fatih", "arslan"] // fatih arslan
# One line here
numbers = [1,2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View File

@ -0,0 +1,25 @@
aligned {
# We have some aligned items below
foo = "fatih" # yoo1
default = "bar" # yoo2
bar = "bar and foo" # yoo3
default = {
bar = "example"
}
#deneme arslan
fatih = ["fatih"] # yoo4
#fatih arslan
fatiharslan = ["arslan"] // yoo5
default = {
bar = "example"
}
security_groups = [
"foo", # kenya 1
"${aws_security_group.firewall.foo}", # kenya 2
]
}

View File

@ -0,0 +1,21 @@
aligned {
# We have some aligned items below
foo = "fatih" # yoo1
default = "bar" # yoo2
bar = "bar and foo" # yoo3
default = {
bar = "example"
}
#deneme arslan
fatih = ["fatih"] # yoo4
#fatih arslan
fatiharslan = ["arslan"] // yoo5
default = {
bar = "example"
}
security_groups = [
"foo", # kenya 1
"${aws_security_group.firewall.foo}", # kenya 2
]
}

View File

@ -0,0 +1,16 @@
// A standalone comment
aligned {
# Standalone 1
a = "bar" # yoo1
default = "bar" # yoo2
# Standalone 2
}
# Standalone 3
numbers = [1, 2] // another line here
# Standalone 4

View File

@ -0,0 +1,16 @@
// A standalone comment
aligned {
# Standalone 1
a = "bar" # yoo1
default = "bar" # yoo2
# Standalone 2
}
# Standalone 3
numbers = [1,2] // another line here
# Standalone 4

View File

@ -0,0 +1,54 @@
variable "foo" {
default = "bar"
description = "bar"
}
developer = ["fatih", "arslan"]
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
]
network_interface {
device_index = 0
description = "Main network interface"
}
network_interface = {
device_index = 1
description = <<EOF
ANOTHER NETWORK INTERFACE
EOF
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = <<EOF
TUBES
EOF
}

View File

@ -0,0 +1,53 @@
variable "foo" {
default = "bar"
description = "bar"
}
developer = [ "fatih", "arslan"]
provider "aws" {
access_key ="foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}"
]
network_interface {
device_index = 0
description = "Main network interface"
}
network_interface = {
device_index = 1
description = <<EOF
ANOTHER NETWORK INTERFACE
EOF
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value=<<EOF
TUBES
EOF
}

View File

@ -0,0 +1,27 @@
foo = ["fatih", "arslan"]
foo = ["bar", "qaz"]
foo = ["zeynep",
"arslan",
]
foo = ["fatih", "zeynep",
"arslan",
]
foo = [
"vim-go",
"golang",
"hcl",
]
foo = []
foo = [1, 2, 3, 4]
foo = [
"kenya",
"ethiopia",
"columbia",
]

View File

@ -0,0 +1,21 @@
foo = ["fatih", "arslan" ]
foo = [ "bar", "qaz", ]
foo = [ "zeynep",
"arslan", ]
foo = ["fatih", "zeynep",
"arslan", ]
foo = [
"vim-go",
"golang", "hcl"]
foo = []
foo = [1, 2,3, 4]
foo = [
"kenya", "ethiopia",
"columbia"]

View File

@ -0,0 +1,612 @@
// Package scanner implements a scanner for HCL (HashiCorp Configuration
// Language) source text.
package scanner
import (
"bytes"
"fmt"
"os"
"unicode"
"unicode/utf8"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
)
// eof represents a marker rune for the end of the reader.
const eof = rune(0)
// Scanner defines a lexical scanner
type Scanner struct {
buf *bytes.Buffer // Source buffer for advancing and scanning
src []byte // Source buffer for immutable access
// Source Position
srcPos token.Pos // current position
prevPos token.Pos // previous position, used for peek() method
lastCharLen int // length of last character in bytes
lastLineLen int // length of last line in characters (for correct column reporting)
tokStart int // token text start position
tokEnd int // token text end position
// Error is called for each error encountered. If no Error
// function is set, the error is reported to os.Stderr.
Error func(pos token.Pos, msg string)
// ErrorCount is incremented by one for each error encountered.
ErrorCount int
// tokPos is the start position of most recently scanned token; set by
// Scan. The Filename field is always left untouched by the Scanner. If
// an error is reported (via Error) and Position is invalid, the scanner is
// not inside a token.
tokPos token.Pos
}
// New creates and initializes a new instance of Scanner using src as
// its source content.
func New(src []byte) *Scanner {
// even though we accept a src, we read from a io.Reader compatible type
// (*bytes.Buffer). So in the future we might easily change it to streaming
// read.
b := bytes.NewBuffer(src)
s := &Scanner{
buf: b,
src: src,
}
// srcPosition always starts with 1
s.srcPos.Line = 1
return s
}
// next reads the next rune from the bufferred reader. Returns the rune(0) if
// an error occurs (or io.EOF is returned).
func (s *Scanner) next() rune {
ch, size, err := s.buf.ReadRune()
if err != nil {
// advance for error reporting
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
return eof
}
if ch == utf8.RuneError && size == 1 {
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
s.err("illegal UTF-8 encoding")
return ch
}
// remember last position
s.prevPos = s.srcPos
s.srcPos.Column++
s.lastCharLen = size
s.srcPos.Offset += size
if ch == '\n' {
s.srcPos.Line++
s.lastLineLen = s.srcPos.Column
s.srcPos.Column = 0
}
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
}
// unread unreads the previous read Rune and updates the source position
func (s *Scanner) unread() {
if err := s.buf.UnreadRune(); err != nil {
panic(err) // this is user fault, we should catch it
}
s.srcPos = s.prevPos // put back last position
}
// peek returns the next rune without advancing the reader.
func (s *Scanner) peek() rune {
peek, _, err := s.buf.ReadRune()
if err != nil {
return eof
}
s.buf.UnreadRune()
return peek
}
// Scan scans the next token and returns the token.
func (s *Scanner) Scan() token.Token {
ch := s.next()
// skip white space
for isWhitespace(ch) {
ch = s.next()
}
var tok token.Type
// token text markings
s.tokStart = s.srcPos.Offset - s.lastCharLen
// token position, initial next() is moving the offset by one(size of rune
// actually), though we are interested with the starting point
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
if s.srcPos.Column > 0 {
// common case: last character was not a '\n'
s.tokPos.Line = s.srcPos.Line
s.tokPos.Column = s.srcPos.Column
} else {
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
s.tokPos.Line = s.srcPos.Line - 1
s.tokPos.Column = s.lastLineLen
}
switch {
case isLetter(ch):
tok = token.IDENT
lit := s.scanIdentifier()
if lit == "true" || lit == "false" {
tok = token.BOOL
}
case isDecimal(ch):
tok = s.scanNumber(ch)
default:
switch ch {
case eof:
tok = token.EOF
case '"':
tok = token.STRING
s.scanString()
case '#', '/':
tok = token.COMMENT
s.scanComment(ch)
case '.':
tok = token.PERIOD
ch = s.peek()
if isDecimal(ch) {
tok = token.FLOAT
ch = s.scanMantissa(ch)
ch = s.scanExponent(ch)
}
case '<':
tok = token.HEREDOC
s.scanHeredoc()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case '{':
tok = token.LBRACE
case '}':
tok = token.RBRACE
case ',':
tok = token.COMMA
case '=':
tok = token.ASSIGN
case '+':
tok = token.ADD
case '-':
if isDecimal(s.peek()) {
ch := s.next()
tok = s.scanNumber(ch)
} else {
tok = token.SUB
}
default:
s.err("illegal char")
}
}
// finish token ending
s.tokEnd = s.srcPos.Offset
// create token literal
var tokenText string
if s.tokStart >= 0 {
tokenText = string(s.src[s.tokStart:s.tokEnd])
}
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
return token.Token{
Type: tok,
Pos: s.tokPos,
Text: tokenText,
}
}
func (s *Scanner) scanComment(ch rune) {
// single line comments
if ch == '#' || (ch == '/' && s.peek() != '*') {
ch = s.next()
for ch != '\n' && ch >= 0 && ch != eof {
ch = s.next()
}
if ch != eof && ch >= 0 {
s.unread()
}
return
}
// be sure we get the character after /* This allows us to find comment's
// that are not erminated
if ch == '/' {
s.next()
ch = s.next() // read character after "/*"
}
// look for /* - style comments
for {
if ch < 0 || ch == eof {
s.err("comment not terminated")
break
}
ch0 := ch
ch = s.next()
if ch0 == '*' && ch == '/' {
break
}
}
}
// scanNumber scans a HCL number definition starting with the given rune
func (s *Scanner) scanNumber(ch rune) token.Type {
if ch == '0' {
// check for hexadecimal, octal or float
ch = s.next()
if ch == 'x' || ch == 'X' {
// hexadecimal
ch = s.next()
found := false
for isHexadecimal(ch) {
ch = s.next()
found = true
}
if !found {
s.err("illegal hexadecimal number")
}
if ch != eof {
s.unread()
}
return token.NUMBER
}
// now it's either something like: 0421(octal) or 0.1231(float)
illegalOctal := false
for isDecimal(ch) {
ch = s.next()
if ch == '8' || ch == '9' {
// this is just a possibility. For example 0159 is illegal, but
// 0159.23 is valid. So we mark a possible illegal octal. If
// the next character is not a period, we'll print the error.
illegalOctal = true
}
}
if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch)
return token.FLOAT
}
if ch == '.' {
ch = s.scanFraction(ch)
if ch == 'e' || ch == 'E' {
ch = s.next()
ch = s.scanExponent(ch)
}
return token.FLOAT
}
if illegalOctal {
s.err("illegal octal number")
}
if ch != eof {
s.unread()
}
return token.NUMBER
}
s.scanMantissa(ch)
ch = s.next() // seek forward
if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch)
return token.FLOAT
}
if ch == '.' {
ch = s.scanFraction(ch)
if ch == 'e' || ch == 'E' {
ch = s.next()
ch = s.scanExponent(ch)
}
return token.FLOAT
}
if ch != eof {
s.unread()
}
return token.NUMBER
}
// scanMantissa scans the mantissa begining from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
for isDecimal(ch) {
ch = s.next()
scanned = true
}
if scanned && ch != eof {
s.unread()
}
return ch
}
// scanFraction scans the fraction after the '.' rune
func (s *Scanner) scanFraction(ch rune) rune {
if ch == '.' {
ch = s.peek() // we peek just to see if we can move forward
ch = s.scanMantissa(ch)
}
return ch
}
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
// rune.
func (s *Scanner) scanExponent(ch rune) rune {
if ch == 'e' || ch == 'E' {
ch = s.next()
if ch == '-' || ch == '+' {
ch = s.next()
}
ch = s.scanMantissa(ch)
}
return ch
}
// scanHeredoc scans a heredoc string.
func (s *Scanner) scanHeredoc() {
// Scan the second '<' in example: '<<EOF'
if s.next() != '<' {
s.err("heredoc expected second '<', didn't see it")
return
}
// Get the original offset so we can read just the heredoc ident
offs := s.srcPos.Offset
// Scan the identifier
ch := s.next()
for isLetter(ch) || isDigit(ch) {
ch = s.next()
}
// If we reached an EOF then that is not good
if ch == eof {
s.err("heredoc not terminated")
return
}
// Ignore the '\r' in Windows line endings
if ch == '\r' {
if s.peek() == '\n' {
ch = s.next()
}
}
// If we didn't reach a newline then that is also not good
if ch != '\n' {
s.err("invalid characters in heredoc anchor")
return
}
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
// Read the actual string value
lineStart := s.srcPos.Offset
for {
ch := s.next()
// Special newline handling.
if ch == '\n' {
// Math is fast, so we first compare the byte counts to
// see if we have a chance of seeing the same identifier. If those
// match, then we compare the string values directly.
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
if lineBytesLen == len(identBytes) &&
bytes.Equal(identBytes, s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
break
}
// Not an anchor match, record the start of a new line
lineStart = s.srcPos.Offset
}
if ch == eof {
s.err("heredoc not terminated")
return
}
}
return
}
// scanString scans a quoted string
func (s *Scanner) scanString() {
braces := 0
for {
// '"' opening already consumed
// read character after quote
ch := s.next()
if ch == '\n' || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
if ch == '"' && braces == 0 {
break
}
// If we're going into a ${} then we can ignore quotes for awhile
if braces == 0 && ch == '$' && s.peek() == '{' {
braces++
s.next()
} else if braces > 0 && ch == '{' {
braces++
}
if braces > 0 && ch == '}' {
braces--
}
if ch == '\\' {
s.scanEscape()
}
}
return
}
// scanEscape scans an escape sequence
func (s *Scanner) scanEscape() rune {
// http://en.cppreference.com/w/cpp/language/escape
ch := s.next() // read character after '/'
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
// nothing to do
case '0', '1', '2', '3', '4', '5', '6', '7':
// octal notation
ch = s.scanDigits(ch, 8, 3)
case 'x':
// hexademical notation
ch = s.scanDigits(s.next(), 16, 2)
case 'u':
// universal character name
ch = s.scanDigits(s.next(), 16, 4)
case 'U':
// universal character name
ch = s.scanDigits(s.next(), 16, 8)
default:
s.err("illegal char escape")
}
return ch
}
// scanDigits scans a rune with the given base for n times. For example an
// octal notation \184 would yield in scanDigits(ch, 8, 3)
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
for n > 0 && digitVal(ch) < base {
ch = s.next()
n--
}
if n > 0 {
s.err("illegal char escape")
}
// we scanned all digits, put the last non digit char back
s.unread()
return ch
}
// scanIdentifier scans an identifier and returns the literal string
func (s *Scanner) scanIdentifier() string {
offs := s.srcPos.Offset - s.lastCharLen
ch := s.next()
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
ch = s.next()
}
if ch != eof {
s.unread() // we got identifier, put back latest char
}
return string(s.src[offs:s.srcPos.Offset])
}
// recentPosition returns the position of the character immediately after the
// character or token returned by the last call to Scan.
func (s *Scanner) recentPosition() (pos token.Pos) {
pos.Offset = s.srcPos.Offset - s.lastCharLen
switch {
case s.srcPos.Column > 0:
// common case: last character was not a '\n'
pos.Line = s.srcPos.Line
pos.Column = s.srcPos.Column
case s.lastLineLen > 0:
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
pos.Line = s.srcPos.Line - 1
pos.Column = s.lastLineLen
default:
// at the beginning of the source
pos.Line = 1
pos.Column = 1
}
return
}
// err prints the error of any scanning to s.Error function. If the function is
// not defined, by default it prints them to os.Stderr
func (s *Scanner) err(msg string) {
s.ErrorCount++
pos := s.recentPosition()
if s.Error != nil {
s.Error(pos, msg)
return
}
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
}
// isHexadecimal returns true if the given rune is a letter
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
// isDigit returns true if the given rune is a decimal digit
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
// isDecimal returns true if the given rune is a decimal number
func isDecimal(ch rune) bool {
return '0' <= ch && ch <= '9'
}
// isHexadecimal returns true if the given rune is an hexadecimal number
func isHexadecimal(ch rune) bool {
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
}
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
}
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch - '0')
case 'a' <= ch && ch <= 'f':
return int(ch - 'a' + 10)
case 'A' <= ch && ch <= 'F':
return int(ch - 'A' + 10)
}
return 16 // larger than any legal digit val
}

View File

@ -0,0 +1,535 @@
package scanner
import (
"bytes"
"fmt"
"testing"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/token"
"strings"
)
var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
type tokenPair struct {
tok token.Type
text string
}
var tokenLists = map[string][]tokenPair{
"comment": []tokenPair{
{token.COMMENT, "//"},
{token.COMMENT, "////"},
{token.COMMENT, "// comment"},
{token.COMMENT, "// /* comment */"},
{token.COMMENT, "// // comment //"},
{token.COMMENT, "//" + f100},
{token.COMMENT, "#"},
{token.COMMENT, "##"},
{token.COMMENT, "# comment"},
{token.COMMENT, "# /* comment */"},
{token.COMMENT, "# # comment #"},
{token.COMMENT, "#" + f100},
{token.COMMENT, "/**/"},
{token.COMMENT, "/***/"},
{token.COMMENT, "/* comment */"},
{token.COMMENT, "/* // comment */"},
{token.COMMENT, "/* /* comment */"},
{token.COMMENT, "/*\n comment\n*/"},
{token.COMMENT, "/*" + f100 + "*/"},
},
"operator": []tokenPair{
{token.LBRACK, "["},
{token.LBRACE, "{"},
{token.COMMA, ","},
{token.PERIOD, "."},
{token.RBRACK, "]"},
{token.RBRACE, "}"},
{token.ASSIGN, "="},
{token.ADD, "+"},
{token.SUB, "-"},
},
"bool": []tokenPair{
{token.BOOL, "true"},
{token.BOOL, "false"},
},
"ident": []tokenPair{
{token.IDENT, "a"},
{token.IDENT, "a0"},
{token.IDENT, "foobar"},
{token.IDENT, "foo-bar"},
{token.IDENT, "abc123"},
{token.IDENT, "LGTM"},
{token.IDENT, "_"},
{token.IDENT, "_abc123"},
{token.IDENT, "abc123_"},
{token.IDENT, "_abc_123_"},
{token.IDENT, "_äöü"},
{token.IDENT, "_本"},
{token.IDENT, "äöü"},
{token.IDENT, "本"},
{token.IDENT, "a۰۱۸"},
{token.IDENT, "foo६४"},
{token.IDENT, "bar"},
},
"heredoc": []tokenPair{
{token.HEREDOC, "<<EOF\nhello\nworld\nEOF"},
{token.HEREDOC, "<<EOF123\nhello\nworld\nEOF123"},
},
"string": []tokenPair{
{token.STRING, `" "`},
{token.STRING, `"a"`},
{token.STRING, `"本"`},
{token.STRING, `"${file("foo")}"`},
{token.STRING, `"${file(\"foo\")}"`},
{token.STRING, `"\a"`},
{token.STRING, `"\b"`},
{token.STRING, `"\f"`},
{token.STRING, `"\n"`},
{token.STRING, `"\r"`},
{token.STRING, `"\t"`},
{token.STRING, `"\v"`},
{token.STRING, `"\""`},
{token.STRING, `"\000"`},
{token.STRING, `"\777"`},
{token.STRING, `"\x00"`},
{token.STRING, `"\xff"`},
{token.STRING, `"\u0000"`},
{token.STRING, `"\ufA16"`},
{token.STRING, `"\U00000000"`},
{token.STRING, `"\U0000ffAB"`},
{token.STRING, `"` + f100 + `"`},
},
"number": []tokenPair{
{token.NUMBER, "0"},
{token.NUMBER, "1"},
{token.NUMBER, "9"},
{token.NUMBER, "42"},
{token.NUMBER, "1234567890"},
{token.NUMBER, "00"},
{token.NUMBER, "01"},
{token.NUMBER, "07"},
{token.NUMBER, "042"},
{token.NUMBER, "01234567"},
{token.NUMBER, "0x0"},
{token.NUMBER, "0x1"},
{token.NUMBER, "0xf"},
{token.NUMBER, "0x42"},
{token.NUMBER, "0x123456789abcDEF"},
{token.NUMBER, "0x" + f100},
{token.NUMBER, "0X0"},
{token.NUMBER, "0X1"},
{token.NUMBER, "0XF"},
{token.NUMBER, "0X42"},
{token.NUMBER, "0X123456789abcDEF"},
{token.NUMBER, "0X" + f100},
{token.NUMBER, "-0"},
{token.NUMBER, "-1"},
{token.NUMBER, "-9"},
{token.NUMBER, "-42"},
{token.NUMBER, "-1234567890"},
{token.NUMBER, "-00"},
{token.NUMBER, "-01"},
{token.NUMBER, "-07"},
{token.NUMBER, "-042"},
{token.NUMBER, "-01234567"},
{token.NUMBER, "-0x0"},
{token.NUMBER, "-0x1"},
{token.NUMBER, "-0xf"},
{token.NUMBER, "-0x42"},
{token.NUMBER, "-0x123456789abcDEF"},
{token.NUMBER, "-0x" + f100},
{token.NUMBER, "-0X0"},
{token.NUMBER, "-0X1"},
{token.NUMBER, "-0XF"},
{token.NUMBER, "-0X42"},
{token.NUMBER, "-0X123456789abcDEF"},
{token.NUMBER, "-0X" + f100},
},
"float": []tokenPair{
{token.FLOAT, "0."},
{token.FLOAT, "1."},
{token.FLOAT, "42."},
{token.FLOAT, "01234567890."},
{token.FLOAT, ".0"},
{token.FLOAT, ".1"},
{token.FLOAT, ".42"},
{token.FLOAT, ".0123456789"},
{token.FLOAT, "0.0"},
{token.FLOAT, "1.0"},
{token.FLOAT, "42.0"},
{token.FLOAT, "01234567890.0"},
{token.FLOAT, "0e0"},
{token.FLOAT, "1e0"},
{token.FLOAT, "42e0"},
{token.FLOAT, "01234567890e0"},
{token.FLOAT, "0E0"},
{token.FLOAT, "1E0"},
{token.FLOAT, "42E0"},
{token.FLOAT, "01234567890E0"},
{token.FLOAT, "0e+10"},
{token.FLOAT, "1e-10"},
{token.FLOAT, "42e+10"},
{token.FLOAT, "01234567890e-10"},
{token.FLOAT, "0E+10"},
{token.FLOAT, "1E-10"},
{token.FLOAT, "42E+10"},
{token.FLOAT, "01234567890E-10"},
{token.FLOAT, "01.8e0"},
{token.FLOAT, "1.4e0"},
{token.FLOAT, "42.2e0"},
{token.FLOAT, "01234567890.12e0"},
{token.FLOAT, "0.E0"},
{token.FLOAT, "1.12E0"},
{token.FLOAT, "42.123E0"},
{token.FLOAT, "01234567890.213E0"},
{token.FLOAT, "0.2e+10"},
{token.FLOAT, "1.2e-10"},
{token.FLOAT, "42.54e+10"},
{token.FLOAT, "01234567890.98e-10"},
{token.FLOAT, "0.1E+10"},
{token.FLOAT, "1.1E-10"},
{token.FLOAT, "42.1E+10"},
{token.FLOAT, "01234567890.1E-10"},
{token.FLOAT, "-0.0"},
{token.FLOAT, "-1.0"},
{token.FLOAT, "-42.0"},
{token.FLOAT, "-01234567890.0"},
{token.FLOAT, "-0e0"},
{token.FLOAT, "-1e0"},
{token.FLOAT, "-42e0"},
{token.FLOAT, "-01234567890e0"},
{token.FLOAT, "-0E0"},
{token.FLOAT, "-1E0"},
{token.FLOAT, "-42E0"},
{token.FLOAT, "-01234567890E0"},
{token.FLOAT, "-0e+10"},
{token.FLOAT, "-1e-10"},
{token.FLOAT, "-42e+10"},
{token.FLOAT, "-01234567890e-10"},
{token.FLOAT, "-0E+10"},
{token.FLOAT, "-1E-10"},
{token.FLOAT, "-42E+10"},
{token.FLOAT, "-01234567890E-10"},
{token.FLOAT, "-01.8e0"},
{token.FLOAT, "-1.4e0"},
{token.FLOAT, "-42.2e0"},
{token.FLOAT, "-01234567890.12e0"},
{token.FLOAT, "-0.E0"},
{token.FLOAT, "-1.12E0"},
{token.FLOAT, "-42.123E0"},
{token.FLOAT, "-01234567890.213E0"},
{token.FLOAT, "-0.2e+10"},
{token.FLOAT, "-1.2e-10"},
{token.FLOAT, "-42.54e+10"},
{token.FLOAT, "-01234567890.98e-10"},
{token.FLOAT, "-0.1E+10"},
{token.FLOAT, "-1.1E-10"},
{token.FLOAT, "-42.1E+10"},
{token.FLOAT, "-01234567890.1E-10"},
},
}
var orderedTokenLists = []string{
"comment",
"operator",
"bool",
"ident",
"heredoc",
"string",
"number",
"float",
}
func TestPosition(t *testing.T) {
// create artifical source code
buf := new(bytes.Buffer)
for _, listName := range orderedTokenLists {
for _, ident := range tokenLists[listName] {
fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
}
}
s := New(buf.Bytes())
pos := token.Pos{"", 4, 1, 5}
s.Scan()
for _, listName := range orderedTokenLists {
for _, k := range tokenLists[listName] {
curPos := s.tokPos
// fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
if curPos.Offset != pos.Offset {
t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
}
if curPos.Line != pos.Line {
t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
}
if curPos.Column != pos.Column {
t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
}
pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
pos.Line += countNewlines(k.text) + 1 // each token is on a new line
s.Scan()
}
}
// make sure there were no token-internal errors reported by scanner
if s.ErrorCount != 0 {
t.Errorf("%d errors", s.ErrorCount)
}
}
func TestComment(t *testing.T) {
testTokenList(t, tokenLists["comment"])
}
func TestOperator(t *testing.T) {
testTokenList(t, tokenLists["operator"])
}
func TestBool(t *testing.T) {
testTokenList(t, tokenLists["bool"])
}
func TestIdent(t *testing.T) {
testTokenList(t, tokenLists["ident"])
}
func TestString(t *testing.T) {
testTokenList(t, tokenLists["string"])
}
func TestNumber(t *testing.T) {
testTokenList(t, tokenLists["number"])
}
func TestFloat(t *testing.T) {
testTokenList(t, tokenLists["float"])
}
func TestWindowsLineEndings(t *testing.T) {
hcl := `// This should have Windows line endings
resource "aws_instance" "foo" {
user_data=<<HEREDOC
test script
HEREDOC
}`
hclWindowsEndings := strings.Replace(hcl, "\n", "\r\n", -1)
literals := []struct {
tokenType token.Type
literal string
}{
{token.COMMENT, "// This should have Windows line endings\r"},
{token.IDENT, `resource`},
{token.STRING, `"aws_instance"`},
{token.STRING, `"foo"`},
{token.LBRACE, `{`},
{token.IDENT, `user_data`},
{token.ASSIGN, `=`},
{token.HEREDOC, "<<HEREDOC\r\n test script\r\nHEREDOC\r\n"},
{token.RBRACE, `}`},
}
s := New([]byte(hclWindowsEndings))
for _, l := range literals {
tok := s.Scan()
if l.tokenType != tok.Type {
t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
}
if l.literal != tok.Text {
t.Errorf("got:\n%v\nwant:\n%v\n", []byte(tok.Text), []byte(l.literal))
}
}
}
func TestRealExample(t *testing.T) {
complexHCL := `// This comes from Terraform, as a test
variable "foo" {
default = "bar"
description = "bar"
}
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}"
]
network_interface {
device_index = 0
description = <<EOF
Main interface
EOF
}
}`
literals := []struct {
tokenType token.Type
literal string
}{
{token.COMMENT, `// This comes from Terraform, as a test`},
{token.IDENT, `variable`},
{token.STRING, `"foo"`},
{token.LBRACE, `{`},
{token.IDENT, `default`},
{token.ASSIGN, `=`},
{token.STRING, `"bar"`},
{token.IDENT, `description`},
{token.ASSIGN, `=`},
{token.STRING, `"bar"`},
{token.RBRACE, `}`},
{token.IDENT, `provider`},
{token.STRING, `"aws"`},
{token.LBRACE, `{`},
{token.IDENT, `access_key`},
{token.ASSIGN, `=`},
{token.STRING, `"foo"`},
{token.IDENT, `secret_key`},
{token.ASSIGN, `=`},
{token.STRING, `"bar"`},
{token.RBRACE, `}`},
{token.IDENT, `resource`},
{token.STRING, `"aws_security_group"`},
{token.STRING, `"firewall"`},
{token.LBRACE, `{`},
{token.IDENT, `count`},
{token.ASSIGN, `=`},
{token.NUMBER, `5`},
{token.RBRACE, `}`},
{token.IDENT, `resource`},
{token.IDENT, `aws_instance`},
{token.STRING, `"web"`},
{token.LBRACE, `{`},
{token.IDENT, `ami`},
{token.ASSIGN, `=`},
{token.STRING, `"${var.foo}"`},
{token.IDENT, `security_groups`},
{token.ASSIGN, `=`},
{token.LBRACK, `[`},
{token.STRING, `"foo"`},
{token.COMMA, `,`},
{token.STRING, `"${aws_security_group.firewall.foo}"`},
{token.RBRACK, `]`},
{token.IDENT, `network_interface`},
{token.LBRACE, `{`},
{token.IDENT, `device_index`},
{token.ASSIGN, `=`},
{token.NUMBER, `0`},
{token.IDENT, `description`},
{token.ASSIGN, `=`},
{token.HEREDOC, "<<EOF\nMain interface\nEOF\n"},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.EOF, ``},
}
s := New([]byte(complexHCL))
for _, l := range literals {
tok := s.Scan()
if l.tokenType != tok.Type {
t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
}
if l.literal != tok.Text {
t.Errorf("got: %s want %s\n", tok, l.literal)
}
}
}
func TestError(t *testing.T) {
testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", token.IDENT)
testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", token.IDENT)
testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
testError(t, `01238`, "1:6", "illegal octal number", token.NUMBER)
testError(t, `01238123`, "1:9", "illegal octal number", token.NUMBER)
testError(t, `0x`, "1:3", "illegal hexadecimal number", token.NUMBER)
testError(t, `0xg`, "1:3", "illegal hexadecimal number", token.NUMBER)
testError(t, `'aa'`, "1:1", "illegal char", token.ILLEGAL)
testError(t, `"`, "1:2", "literal not terminated", token.STRING)
testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT)
}
func testError(t *testing.T, src, pos, msg string, tok token.Type) {
s := New([]byte(src))
errorCalled := false
s.Error = func(p token.Pos, m string) {
if !errorCalled {
if pos != p.String() {
t.Errorf("pos = %q, want %q for %q", p, pos, src)
}
if m != msg {
t.Errorf("msg = %q, want %q for %q", m, msg, src)
}
errorCalled = true
}
}
tk := s.Scan()
if tk.Type != tok {
t.Errorf("tok = %s, want %s for %q", tk, tok, src)
}
if !errorCalled {
t.Errorf("error handler not called for %q", src)
}
if s.ErrorCount == 0 {
t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
}
}
func testTokenList(t *testing.T, tokenList []tokenPair) {
// create artifical source code
buf := new(bytes.Buffer)
for _, ident := range tokenList {
fmt.Fprintf(buf, "%s\n", ident.text)
}
s := New(buf.Bytes())
for _, ident := range tokenList {
tok := s.Scan()
if tok.Type != ident.tok {
t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
}
if tok.Text != ident.text {
t.Errorf("text = %q want %q", tok.String(), ident.text)
}
}
}
func countNewlines(s string) int {
n := 0
for _, ch := range s {
if ch == '\n' {
n++
}
}
return n
}

View File

@ -0,0 +1,245 @@
package strconv
import (
"errors"
"unicode/utf8"
)
// ErrSyntax indicates that a value does not have the right syntax for the target type.
var ErrSyntax = errors.New("invalid syntax")
// Unquote interprets s as a single-quoted, double-quoted,
// or backquoted Go string literal, returning the string value
// that s quotes. (If s is single-quoted, it would be a Go
// character literal; Unquote returns the corresponding
// one-character string.)
func Unquote(s string) (t string, err error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' {
return "", ErrSyntax
}
if contains(s, '\n') {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
switch quote {
case '"':
return s, nil
case '\'':
r, size := utf8.DecodeRuneInString(s)
if size == len(s) && (r != utf8.RuneError || size != 1) {
return s, nil
}
}
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
// If we're starting a '${}' then let it through un-unquoted.
// Specifically: we don't unquote any characters within the `${}`
// section, except for escaped quotes, which we handle specifically.
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
buf = append(buf, '$', '{')
s = s[2:]
// Continue reading until we find the closing brace, copying as-is
braces := 1
for len(s) > 0 && braces > 0 {
r, size := utf8.DecodeRuneInString(s)
if r == utf8.RuneError {
return "", ErrSyntax
}
s = s[size:]
// We special case escaped double quotes in interpolations, converting
// them to straight double quotes.
if r == '\\' {
if q, _ := utf8.DecodeRuneInString(s); q == '"' {
continue
}
}
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
switch r {
case '{':
braces++
case '}':
braces--
}
}
if braces != 0 {
return "", ErrSyntax
}
if len(s) == 0 {
// If there's no string left, we're done!
break
} else {
// If there's more left, we need to pop back up to the top of the loop
// in case there's another interpolation in this string.
continue
}
}
c, multibyte, ss, err := unquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
if quote == '\'' && len(s) != 0 {
// single-quoted must be single character
return "", ErrSyntax
}
}
return string(buf), nil
}
// contains reports whether the string contains the byte c.
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}
func unhex(b byte) (v rune, ok bool) {
c := rune(b)
switch {
case '0' <= c && c <= '9':
return c - '0', true
case 'a' <= c && c <= 'f':
return c - 'a' + 10, true
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
return
}
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
// easy cases
switch c := s[0]; {
case c == quote && (quote == '\'' || quote == '"'):
err = ErrSyntax
return
case c >= utf8.RuneSelf:
r, size := utf8.DecodeRuneInString(s)
return r, true, s[size:], nil
case c != '\\':
return rune(s[0]), false, s[1:], nil
}
// hard case: c is backslash
if len(s) <= 1 {
err = ErrSyntax
return
}
c := s[1]
s = s[2:]
switch c {
case 'a':
value = '\a'
case 'b':
value = '\b'
case 'f':
value = '\f'
case 'n':
value = '\n'
case 'r':
value = '\r'
case 't':
value = '\t'
case 'v':
value = '\v'
case 'x', 'u', 'U':
n := 0
switch c {
case 'x':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
var v rune
if len(s) < n {
err = ErrSyntax
return
}
for j := 0; j < n; j++ {
x, ok := unhex(s[j])
if !ok {
err = ErrSyntax
return
}
v = v<<4 | x
}
s = s[n:]
if c == 'x' {
// single-byte string, possibly not UTF-8
value = v
break
}
if v > utf8.MaxRune {
err = ErrSyntax
return
}
value = v
multibyte = true
case '0', '1', '2', '3', '4', '5', '6', '7':
v := rune(c) - '0'
if len(s) < 2 {
err = ErrSyntax
return
}
for j := 0; j < 2; j++ { // one digit already; two more
x := rune(s[j]) - '0'
if x < 0 || x > 7 {
err = ErrSyntax
return
}
v = (v << 3) | x
}
s = s[2:]
if v > 255 {
err = ErrSyntax
return
}
value = v
case '\\':
value = '\\'
case '\'', '"':
if c != quote {
err = ErrSyntax
return
}
value = rune(c)
default:
err = ErrSyntax
return
}
tail = s
return
}

View File

@ -0,0 +1,93 @@
package strconv
import "testing"
type quoteTest struct {
in string
out string
ascii string
}
var quotetests = []quoteTest{
{"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
{"\\", `"\\"`, `"\\"`},
{"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
{"\u263a", `"☺"`, `"\u263a"`},
{"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
{"\x04", `"\x04"`, `"\x04"`},
}
type unQuoteTest struct {
in string
out string
}
var unquotetests = []unQuoteTest{
{`""`, ""},
{`"a"`, "a"},
{`"abc"`, "abc"},
{`"☺"`, "☺"},
{`"hello world"`, "hello world"},
{`"\xFF"`, "\xFF"},
{`"\377"`, "\377"},
{`"\u1234"`, "\u1234"},
{`"\U00010111"`, "\U00010111"},
{`"\U0001011111"`, "\U0001011111"},
{`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
{`"'"`, "'"},
{`"${file("foo")}"`, `${file("foo")}`},
{`"${file(\"foo\")}"`, `${file("foo")}`},
{`"echo ${var.region}${element(split(",",var.zones),0)}"`,
`echo ${var.region}${element(split(",",var.zones),0)}`},
}
var misquoted = []string{
``,
`"`,
`"a`,
`"'`,
`b"`,
`"\"`,
`"\9"`,
`"\19"`,
`"\129"`,
`'\'`,
`'\9'`,
`'\19'`,
`'\129'`,
`'ab'`,
`"\x1!"`,
`"\U12345678"`,
`"\z"`,
"`",
"`xxx",
"`\"",
`"\'"`,
`'\"'`,
"\"\n\"",
"\"\\n\n\"",
"'\n'",
`"${"`,
`"${foo{}"`,
}
func TestUnquote(t *testing.T) {
for _, tt := range unquotetests {
if out, err := Unquote(tt.in); err != nil || out != tt.out {
t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
}
}
// run the quote tests too, backward
for _, tt := range quotetests {
if in, err := Unquote(tt.out); in != tt.in {
t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
}
}
for _, s := range misquoted {
if out, err := Unquote(s); out != "" || err != ErrSyntax {
t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
}
}
}

Some files were not shown because too many files have changed in this diff Show More