Upgrade server dependencies, manage them with govendor

This commit is contained in:
Ken-Håvard Lieng 2017-04-18 03:02:51 +02:00
parent ebee2746d6
commit 971278e7e5
1748 changed files with 196165 additions and 194500 deletions

View file

@ -6,6 +6,7 @@ fmt: generate
go fmt ./...
test: generate
go get -t ./...
go test $(TEST) $(TESTARGS)
generate:

View file

@ -29,7 +29,7 @@ and some people wanted machine-friendly languages.
JSON fits a nice balance in this, but is fairly verbose and most
importantly doesn't support comments. With YAML, we found that beginners
had a really hard time determining what the actual structure was, and
ended up guessing more than not whether to use a hyphen, colon, etc.
ended up guessing more often than not whether to use a hyphen, colon, etc.
in order to represent some configuration key.
Full programming languages such as Ruby enable complex behavior
@ -81,9 +81,20 @@ FOO
* Boolean values: `true`, `false`
* Arrays can be made by wrapping it in `[]`. Example:
`["foo", "bar", 42]`. Arrays can contain primitives
and other arrays, but cannot contain objects. Objects must
use the block syntax shown below.
`["foo", "bar", 42]`. Arrays can contain primitives,
other arrays, and objects. As an alternative, lists
of objects can be created with repeated blocks, using
this structure:
```hcl
service {
key = "value"
}
service {
key = "value"
}
```
Objects and nested objects are created using the structure shown below:
@ -92,6 +103,16 @@ variable "ami" {
description = "the AMI to use"
}
```
This would be equivalent to the following json:
``` json
{
"variable": {
"ami": {
"description": "the AMI to use"
}
}
}
```
## Thanks

19
vendor/github.com/hashicorp/hcl/appveyor.yml generated vendored Normal file
View file

@ -0,0 +1,19 @@
version: "build-{branch}-{build}"
image: Visual Studio 2015
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
environment:
GOPATH: c:\gopath
init:
- git config --global core.autocrlf true
install:
- cmd: >-
echo %Path%
go version
go env
go get -t ./...
build_script:
- cmd: go test -v ./...

View file

@ -21,6 +21,17 @@ var (
nodeType reflect.Type = findNodeType()
)
// Unmarshal accepts a byte slice as input and writes the
// data to the value pointed to by v.
func Unmarshal(bs []byte, v interface{}) error {
root, err := parse(bs)
if err != nil {
return err
}
return DecodeObject(v, root)
}
// Decode reads the given input and decodes it into the structure
// given by `out`.
func Decode(out interface{}, in string) error {
@ -80,7 +91,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
return d.decodeBool(name, node, result)
case reflect.Float64:
return d.decodeFloat(name, node, result)
case reflect.Int:
case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result)
case reflect.Interface:
// When we see an interface, we make our own thing
@ -153,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err
}
result.Set(reflect.ValueOf(int(v)))
if result.Kind() == reflect.Interface {
result.Set(reflect.ValueOf(int(v)))
} else {
result.SetInt(v)
}
return nil
case token.STRING:
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
@ -161,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err
}
result.Set(reflect.ValueOf(int(v)))
if result.Kind() == reflect.Interface {
result.Set(reflect.ValueOf(int(v)))
} else {
result.SetInt(v)
}
return nil
}
}
@ -326,6 +345,14 @@ func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) er
continue
}
// github.com/hashicorp/terraform/issue/5740
if len(item.Keys) == 0 {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: map must have string keys", name),
}
}
// Get the key we're dealing with, which is the first item
keyStr := item.Keys[0].Token.Value().(string)
@ -390,7 +417,6 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
if result.Kind() == reflect.Interface {
result = result.Elem()
}
// Create the slice if it isn't nil
resultType := result.Type()
resultElemType := resultType.Elem()
@ -424,6 +450,12 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
// Decode
val := reflect.Indirect(reflect.New(resultElemType))
// if item is an object that was decoded from ambiguous JSON and
// flattened, make sure it's expanded if it needs to decode into a
// defined structure.
item := expandObject(item, val)
if err := d.decode(fieldName, item, val); err != nil {
return err
}
@ -436,6 +468,57 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
return nil
}
// expandObject detects if an ambiguous JSON object was flattened to a List which
// should be decoded into a struct, and expands the ast to properly deocode.
func expandObject(node ast.Node, result reflect.Value) ast.Node {
item, ok := node.(*ast.ObjectItem)
if !ok {
return node
}
elemType := result.Type()
// our target type must be a struct
switch elemType.Kind() {
case reflect.Ptr:
switch elemType.Elem().Kind() {
case reflect.Struct:
//OK
default:
return node
}
case reflect.Struct:
//OK
default:
return node
}
// A list value will have a key and field name. If it had more fields,
// it wouldn't have been flattened.
if len(item.Keys) != 2 {
return node
}
keyToken := item.Keys[0].Token
item.Keys = item.Keys[1:]
// we need to un-flatten the ast enough to decode
newNode := &ast.ObjectItem{
Keys: []*ast.ObjectKey{
&ast.ObjectKey{
Token: keyToken,
},
},
Val: &ast.ObjectType{
List: &ast.ObjectList{
Items: []*ast.ObjectItem{item},
},
},
}
return newNode
}
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
@ -466,6 +549,14 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
node = ot.List
}
// Handle the special case where the object itself is a literal. Previously
// the yacc parser would always ensure top-level elements were arrays. The new
// parser does not make the same guarantees, thus we need to convert any
// top-level literal elements into a list.
if _, ok := node.(*ast.LiteralType); ok && item != nil {
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
}
list, ok := node.(*ast.ObjectList)
if !ok {
return &parser.PosError{
@ -490,6 +581,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
structType := structVal.Type()
for i := 0; i < structType.NumField(); i++ {
fieldType := structType.Field(i)
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
// Ignore fields with tag name "-"
if tagParts[0] == "-" {
continue
}
if fieldType.Anonymous {
fieldKind := fieldType.Type.Kind()
@ -504,7 +601,6 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// We have an embedded field. We "squash" the fields down
// if specified in the tag.
squash := false
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
@ -574,6 +670,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// match (only object with the field), then we decode it exactly.
// If it is a prefix match, then we decode the matches.
filter := list.Filter(fieldName)
prefixMatches := filter.Children()
matches := filter.Elem()
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {

View file

@ -1,669 +0,0 @@
package hcl
import (
"io/ioutil"
"path/filepath"
"reflect"
"testing"
"github.com/hashicorp/hcl/hcl/ast"
)
func TestDecode_interface(t *testing.T) {
cases := []struct {
File string
Err bool
Out interface{}
}{
{
"basic.hcl",
false,
map[string]interface{}{
"foo": "bar",
"bar": "${file(\"bing/bong.txt\")}",
},
},
{
"basic_squish.hcl",
false,
map[string]interface{}{
"foo": "bar",
"bar": "${file(\"bing/bong.txt\")}",
"foo-bar": "baz",
},
},
{
"empty.hcl",
false,
map[string]interface{}{
"resource": []map[string]interface{}{
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{},
},
},
},
},
},
{
"tfvars.hcl",
false,
map[string]interface{}{
"regularvar": "Should work",
"map.key1": "Value",
"map.key2": "Other value",
},
},
{
"escape.hcl",
false,
map[string]interface{}{
"foo": "bar\"baz\\n",
},
},
{
"interpolate_escape.hcl",
false,
map[string]interface{}{
"foo": "${file(\"bing/bong.txt\")}",
},
},
{
"float.hcl",
false,
map[string]interface{}{
"a": 1.02,
},
},
{
"multiline_bad.hcl",
true,
nil,
},
{
"multiline_no_marker.hcl",
true,
nil,
},
{
"multiline.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n"},
},
{
"multiline_no_eof.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
},
{
"multiline.json",
false,
map[string]interface{}{"foo": "bar\nbaz"},
},
{
"scientific.json",
false,
map[string]interface{}{
"a": 1e-10,
"b": 1e+10,
"c": 1e10,
"d": 1.2e-10,
"e": 1.2e+10,
"f": 1.2e10,
},
},
{
"scientific.hcl",
false,
map[string]interface{}{
"a": 1e-10,
"b": 1e+10,
"c": 1e10,
"d": 1.2e-10,
"e": 1.2e+10,
"f": 1.2e10,
},
},
{
"terraform_heroku.hcl",
false,
map[string]interface{}{
"name": "terraform-test-app",
"config_vars": []map[string]interface{}{
map[string]interface{}{
"FOO": "bar",
},
},
},
},
{
"structure_multi.hcl",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"baz": []map[string]interface{}{
map[string]interface{}{"key": 7},
},
},
map[string]interface{}{
"bar": []map[string]interface{}{
map[string]interface{}{"key": 12},
},
},
},
},
},
{
"structure_multi.json",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"baz": []map[string]interface{}{
map[string]interface{}{"key": 7},
},
},
map[string]interface{}{
"bar": []map[string]interface{}{
map[string]interface{}{"key": 12},
},
},
},
},
},
{
"structure_list.hcl",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"key": 7,
},
map[string]interface{}{
"key": 12,
},
},
},
},
{
"structure_list.json",
false,
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"key": 7,
},
map[string]interface{}{
"key": 12,
},
},
},
},
{
"structure_list_deep.json",
false,
map[string]interface{}{
"bar": []map[string]interface{}{
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"name": "terraform_example",
"ingress": []map[string]interface{}{
map[string]interface{}{
"from_port": 22,
},
map[string]interface{}{
"from_port": 80,
},
},
},
},
},
},
},
},
{
"nested_block_comment.hcl",
false,
map[string]interface{}{
"bar": "value",
},
},
{
"unterminated_block_comment.hcl",
true,
nil,
},
{
"unterminated_brace.hcl",
true,
nil,
},
{
"object_list.json",
false,
map[string]interface{}{
"resource": []map[string]interface{}{
map[string]interface{}{
"aws_instance": []map[string]interface{}{
map[string]interface{}{
"db": []map[string]interface{}{
map[string]interface{}{
"vpc": "foo",
"provisioner": []map[string]interface{}{
map[string]interface{}{
"file": []map[string]interface{}{
map[string]interface{}{
"source": "foo",
"destination": "bar",
},
},
},
},
},
},
},
},
},
},
},
},
}
for _, tc := range cases {
t.Logf("Testing: %s", tc.File)
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
if err != nil {
t.Fatalf("err: %s", err)
}
var out interface{}
err = Decode(&out, string(d))
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
}
if !reflect.DeepEqual(out, tc.Out) {
t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
}
}
}
func TestDecode_equal(t *testing.T) {
cases := []struct {
One, Two string
}{
{
"basic.hcl",
"basic.json",
},
{
"float.hcl",
"float.json",
},
/*
{
"structure.hcl",
"structure.json",
},
*/
{
"structure.hcl",
"structure_flat.json",
},
{
"terraform_heroku.hcl",
"terraform_heroku.json",
},
}
for _, tc := range cases {
p1 := filepath.Join(fixtureDir, tc.One)
p2 := filepath.Join(fixtureDir, tc.Two)
d1, err := ioutil.ReadFile(p1)
if err != nil {
t.Fatalf("err: %s", err)
}
d2, err := ioutil.ReadFile(p2)
if err != nil {
t.Fatalf("err: %s", err)
}
var i1, i2 interface{}
err = Decode(&i1, string(d1))
if err != nil {
t.Fatalf("err: %s", err)
}
err = Decode(&i2, string(d2))
if err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(i1, i2) {
t.Fatalf(
"%s != %s\n\n%#v\n\n%#v",
tc.One, tc.Two,
i1, i2)
}
}
}
func TestDecode_flatMap(t *testing.T) {
var val map[string]map[string]string
err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := map[string]map[string]string{
"foo": map[string]string{
"foo": "bar",
"key": "7",
},
}
if !reflect.DeepEqual(val, expected) {
t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
}
}
func TestDecode_structure(t *testing.T) {
type V struct {
Key int
Foo string
}
var actual V
err := Decode(&actual, testReadFile(t, "flat.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := V{
Key: 7,
Foo: "bar",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
}
}
func TestDecode_structurePtr(t *testing.T) {
type V struct {
Key int
Foo string
}
var actual *V
err := Decode(&actual, testReadFile(t, "flat.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &V{
Key: 7,
Foo: "bar",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
}
}
func TestDecode_structureArray(t *testing.T) {
// This test is extracted from a failure in Consul (consul.io),
// hence the interesting structure naming.
type KeyPolicyType string
type KeyPolicy struct {
Prefix string `hcl:",key"`
Policy KeyPolicyType
}
type Policy struct {
Keys []KeyPolicy `hcl:"key,expand"`
}
expected := Policy{
Keys: []KeyPolicy{
KeyPolicy{
Prefix: "",
Policy: "read",
},
KeyPolicy{
Prefix: "foo/",
Policy: "write",
},
KeyPolicy{
Prefix: "foo/bar/",
Policy: "read",
},
KeyPolicy{
Prefix: "foo/bar/baz",
Policy: "deny",
},
},
}
files := []string{
"decode_policy.hcl",
"decode_policy.json",
}
for _, f := range files {
var actual Policy
err := Decode(&actual, testReadFile(t, f))
if err != nil {
t.Fatalf("Input: %s\n\nerr: %s", f, err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
}
}
}
func TestDecode_sliceExpand(t *testing.T) {
type testInner struct {
Name string `hcl:",key"`
Key string
}
type testStruct struct {
Services []testInner `hcl:"service,expand"`
}
expected := testStruct{
Services: []testInner{
testInner{
Name: "my-service-0",
Key: "value",
},
testInner{
Name: "my-service-1",
Key: "value",
},
},
}
files := []string{
"slice_expand.hcl",
}
for _, f := range files {
t.Logf("Testing: %s", f)
var actual testStruct
err := Decode(&actual, testReadFile(t, f))
if err != nil {
t.Fatalf("Input: %s\n\nerr: %s", f, err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
}
}
}
func TestDecode_structureMap(t *testing.T) {
// This test is extracted from a failure in Terraform (terraform.io),
// hence the interesting structure naming.
type hclVariable struct {
Default interface{}
Description string
Fields []string `hcl:",decodedFields"`
}
type rawConfig struct {
Variable map[string]hclVariable
}
expected := rawConfig{
Variable: map[string]hclVariable{
"foo": hclVariable{
Default: "bar",
Description: "bar",
Fields: []string{"Default", "Description"},
},
"amis": hclVariable{
Default: []map[string]interface{}{
map[string]interface{}{
"east": "foo",
},
},
Fields: []string{"Default"},
},
},
}
files := []string{
"decode_tf_variable.hcl",
"decode_tf_variable.json",
}
for _, f := range files {
t.Logf("Testing: %s", f)
var actual rawConfig
err := Decode(&actual, testReadFile(t, f))
if err != nil {
t.Fatalf("Input: %s\n\nerr: %s", f, err)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
}
}
}
func TestDecode_interfaceNonPointer(t *testing.T) {
var value interface{}
err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
if err == nil {
t.Fatal("should error")
}
}
func TestDecode_intString(t *testing.T) {
var value struct {
Count int
}
err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
if err != nil {
t.Fatalf("err: %s", err)
}
if value.Count != 3 {
t.Fatalf("bad: %#v", value.Count)
}
}
func TestDecode_Node(t *testing.T) {
// given
var value struct {
Content ast.Node
Nested struct {
Content ast.Node
}
}
content := `
content {
hello = "world"
}
`
// when
err := Decode(&value, content)
// then
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
// verify ast.Node can be decoded later
var v map[string]interface{}
err = DecodeObject(&v, value.Content)
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
if v["hello"] != "world" {
t.Errorf("expected mapping to be returned")
}
}
func TestDecode_NestedNode(t *testing.T) {
// given
var value struct {
Nested struct {
Content ast.Node
}
}
content := `
nested "content" {
hello = "world"
}
`
// when
err := Decode(&value, content)
// then
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
// verify ast.Node can be decoded later
var v map[string]interface{}
err = DecodeObject(&v, value.Nested.Content)
if err != nil {
t.Errorf("unable to decode content, %v", err)
return
}
if v["hello"] != "world" {
t.Errorf("expected mapping to be returned")
}
}

View file

@ -133,6 +133,12 @@ type ObjectItem struct {
}
func (o *ObjectItem) Pos() token.Pos {
// I'm not entirely sure what causes this, but removing this causes
// a test failure. We should investigate at some point.
if len(o.Keys) == 0 {
return token.Pos{}
}
return o.Keys[0].Pos()
}
@ -150,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos {
type LiteralType struct {
Token token.Token
// associated line comment, only when used in a list
// comment types, only used when in a list
LeadComment *CommentGroup
LineComment *CommentGroup
}
@ -208,4 +215,5 @@ func (c *CommentGroup) Pos() token.Pos {
// GoStringer
//-------------------------------------------------------------------
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }

View file

@ -1,200 +0,0 @@
package ast
import (
"reflect"
"strings"
"testing"
"github.com/hashicorp/hcl/hcl/token"
)
func TestObjectListFilter(t *testing.T) {
var cases = []struct {
Filter []string
Input []*ObjectItem
Output []*ObjectItem
}{
{
[]string{"foo"},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{
Token: token.Token{Type: token.STRING, Text: `"foo"`},
},
},
},
},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{},
},
},
},
{
[]string{"foo"},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
},
},
}
for _, tc := range cases {
input := &ObjectList{Items: tc.Input}
expected := &ObjectList{Items: tc.Output}
if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
}
}
}
func TestWalk(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
}
node := &ObjectList{Items: items}
order := []string{
"*ast.ObjectList",
"*ast.ObjectItem",
"*ast.ObjectKey",
"*ast.ObjectKey",
"*ast.LiteralType",
"*ast.ObjectItem",
"*ast.ObjectKey",
}
count := 0
Walk(node, func(n Node) (Node, bool) {
if n == nil {
return n, false
}
typeName := reflect.TypeOf(n).String()
if order[count] != typeName {
t.Errorf("expected '%s' got: '%s'", order[count], typeName)
}
count++
return n, true
})
}
func TestWalkEquality(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
}
node := &ObjectList{Items: items}
rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
newNode, ok := rewritten.(*ObjectList)
if !ok {
t.Fatalf("expected Objectlist, got %T", rewritten)
}
if !reflect.DeepEqual(node, newNode) {
t.Fatal("rewritten node is not equal to the given node")
}
if len(newNode.Items) != 2 {
t.Error("expected newNode length 2, got: %d", len(newNode.Items))
}
expected := []string{
`"foo"`,
`"bar"`,
}
for i, item := range newNode.Items {
if len(item.Keys) != 1 {
t.Error("expected keys newNode length 1, got: %d", len(item.Keys))
}
if item.Keys[0].Token.Text != expected[i] {
t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
}
if item.Val != nil {
t.Errorf("expected item value should be nil")
}
}
}
func TestWalkRewrite(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
}
node := &ObjectList{Items: items}
suffix := "_example"
node = Walk(node, func(n Node) (Node, bool) {
switch i := n.(type) {
case *ObjectKey:
i.Token.Text = i.Token.Text + suffix
n = i
}
return n, true
}).(*ObjectList)
Walk(node, func(n Node) (Node, bool) {
switch i := n.(type) {
case *ObjectKey:
if !strings.HasSuffix(i.Token.Text, suffix) {
t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
}
}
return n, true
})
}

View file

@ -1,164 +0,0 @@
// Derivative work from:
// - https://golang.org/src/cmd/gofmt/gofmt.go
// - https://github.com/fatih/hclfmt
package fmtcmd
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/hashicorp/hcl/hcl/printer"
)
var (
ErrWriteStdin = errors.New("cannot use write option with standard input")
)
type Options struct {
List bool // list files whose formatting differs
Write bool // write result to (source) file instead of stdout
Diff bool // display diffs instead of rewriting files
}
func isValidFile(f os.FileInfo, extensions []string) bool {
if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
for _, ext := range extensions {
if strings.HasSuffix(f.Name(), "."+ext) {
return true
}
}
}
return false
}
// If in == nil, the source is the contents of the file with the given filename.
func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
if in == nil {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
in = f
}
src, err := ioutil.ReadAll(in)
if err != nil {
return err
}
res, err := printer.Format(src)
if err != nil {
return err
}
// Files should end with newlines
res = append(res, []byte("\n")...)
if !bytes.Equal(src, res) {
// formatting has changed
if opts.List {
fmt.Fprintln(out, filename)
}
if opts.Write {
err = ioutil.WriteFile(filename, res, 0644)
if err != nil {
return err
}
}
if opts.Diff {
data, err := diff(src, res)
if err != nil {
return fmt.Errorf("computing diff: %s", err)
}
fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
out.Write(data)
}
}
if !opts.List && !opts.Write && !opts.Diff {
_, err = out.Write(res)
}
return err
}
func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
visitFile := func(path string, f os.FileInfo, err error) error {
if err == nil && isValidFile(f, extensions) {
err = processFile(path, nil, stdout, false, opts)
}
return err
}
return filepath.Walk(path, visitFile)
}
func Run(
paths, extensions []string,
stdin io.Reader,
stdout io.Writer,
opts Options,
) error {
if len(paths) == 0 {
if opts.Write {
return ErrWriteStdin
}
if err := processFile("<standard input>", stdin, stdout, true, opts); err != nil {
return err
}
return nil
}
for _, path := range paths {
switch dir, err := os.Stat(path); {
case err != nil:
return err
case dir.IsDir():
if err := walkDir(path, extensions, stdout, opts); err != nil {
return err
}
default:
if err := processFile(path, nil, stdout, false, opts); err != nil {
return err
}
}
}
return nil
}
func diff(b1, b2 []byte) (data []byte, err error) {
f1, err := ioutil.TempFile("", "")
if err != nil {
return
}
defer os.Remove(f1.Name())
defer f1.Close()
f2, err := ioutil.TempFile("", "")
if err != nil {
return
}
defer os.Remove(f2.Name())
defer f2.Close()
f1.Write(b1)
f2.Write(b2)
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
err = nil
}
return
}

View file

@ -1,431 +0,0 @@
package fmtcmd
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"syscall"
"testing"
)
var fixtureExtensions = []string{"hcl"}
func init() {
sort.Sort(ByFilename(fixtures))
}
func TestIsValidFile(t *testing.T) {
const fixtureDir = "./test-fixtures"
cases := []struct {
Path string
Expected bool
}{
{"good.hcl", true},
{".hidden.ignore", false},
{"file.ignore", false},
{"dir.ignore", false},
}
for _, tc := range cases {
file, err := os.Stat(filepath.Join(fixtureDir, tc.Path))
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if res := isValidFile(file, fixtureExtensions); res != tc.Expected {
t.Errorf("want: %b, got: %b", tc.Expected, res)
}
}
}
func TestRunMultiplePaths(t *testing.T) {
path1, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path1)
path2, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path2)
var expectedOut bytes.Buffer
for _, path := range []string{path1, path2} {
for _, fixture := range fixtures {
if !bytes.Equal(fixture.golden, fixture.input) {
expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
}
}
}
_, stdout := mockIO()
err = Run(
[]string{path1, path2},
fixtureExtensions,
nil, stdout,
Options{
List: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunSubDirectories(t *testing.T) {
pathParent, err := ioutil.TempDir("", "")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(pathParent)
path1, err := renderFixtures(pathParent)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
path2, err := renderFixtures(pathParent)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
paths := []string{path1, path2}
sort.Strings(paths)
var expectedOut bytes.Buffer
for _, path := range paths {
for _, fixture := range fixtures {
if !bytes.Equal(fixture.golden, fixture.input) {
expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
}
}
}
_, stdout := mockIO()
err = Run(
[]string{pathParent},
fixtureExtensions,
nil, stdout,
Options{
List: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunStdin(t *testing.T) {
var expectedOut bytes.Buffer
for i, fixture := range fixtures {
if i != 0 {
expectedOut.WriteString("\n")
}
expectedOut.Write(fixture.golden)
}
stdin, stdout := mockIO()
for _, fixture := range fixtures {
stdin.Write(fixture.input)
}
err := Run(
[]string{},
fixtureExtensions,
stdin, stdout,
Options{},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunStdinAndWrite(t *testing.T) {
var expectedOut = []byte{}
stdin, stdout := mockIO()
stdin.WriteString("")
err := Run(
[]string{}, []string{},
stdin, stdout,
Options{
Write: true,
},
)
if err != ErrWriteStdin {
t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err)
}
if !bytes.Equal(stdout.Bytes(), expectedOut) {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunFileError(t *testing.T) {
path, err := ioutil.TempDir("", "")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
filename := filepath.Join(path, "unreadable.hcl")
var expectedError = &os.PathError{
Op: "open",
Path: filename,
Err: syscall.EACCES,
}
err = ioutil.WriteFile(filename, []byte{}, 0000)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{},
)
if !reflect.DeepEqual(err, expectedError) {
t.Errorf("error want: %#v, got: %#v", expectedError, err)
}
}
func TestRunNoOptions(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
var expectedOut bytes.Buffer
for _, fixture := range fixtures {
expectedOut.Write(fixture.golden)
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunList(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
var expectedOut bytes.Buffer
for _, fixture := range fixtures {
if !bytes.Equal(fixture.golden, fixture.input) {
expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename)))
}
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{
List: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunWrite(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{
Write: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
for _, fixture := range fixtures {
res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename))
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !bytes.Equal(res, fixture.golden) {
t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res)
}
}
}
func TestRunDiff(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
var expectedOut bytes.Buffer
for _, fixture := range fixtures {
if len(fixture.diff) > 0 {
expectedOut.WriteString(
regexp.QuoteMeta(
fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename),
),
)
// Need to use regex to ignore datetimes in diff.
expectedOut.WriteString(`--- .+?\n`)
expectedOut.WriteString(`\+\+\+ .+?\n`)
expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff)))
}
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{
Diff: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !regexp.MustCompile(expectedOut.String()).Match(stdout.Bytes()) {
t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOut, stdout)
}
}
func mockIO() (stdin, stdout *bytes.Buffer) {
return new(bytes.Buffer), new(bytes.Buffer)
}
type fixture struct {
filename string
input, golden, diff []byte
}
type ByFilename []fixture
func (s ByFilename) Len() int { return len(s) }
func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) }
var fixtures = []fixture{
{
"noop.hcl",
[]byte(`resource "aws_security_group" "firewall" {
count = 5
}
`),
[]byte(`resource "aws_security_group" "firewall" {
count = 5
}
`),
[]byte(``),
}, {
"align_equals.hcl",
[]byte(`variable "foo" {
default = "bar"
description = "bar"
}
`),
[]byte(`variable "foo" {
default = "bar"
description = "bar"
}
`),
[]byte(`@@ -1,4 +1,4 @@
variable "foo" {
- default = "bar"
+ default = "bar"
description = "bar"
}
`),
}, {
"indentation.hcl",
[]byte(`provider "aws" {
access_key = "foo"
secret_key = "bar"
}
`),
[]byte(`provider "aws" {
access_key = "foo"
secret_key = "bar"
}
`),
[]byte(`@@ -1,4 +1,4 @@
provider "aws" {
- access_key = "foo"
- secret_key = "bar"
+ access_key = "foo"
+ secret_key = "bar"
}
`),
},
}
// parent can be an empty string, in which case the system's default
// temporary directory will be used.
func renderFixtures(parent string) (path string, err error) {
path, err = ioutil.TempDir(parent, "")
if err != nil {
return "", err
}
for _, fixture := range fixtures {
err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644)
if err != nil {
os.RemoveAll(path)
return "", err
}
}
return path, nil
}

View file

@ -1 +0,0 @@
invalid

View file

@ -1,9 +0,0 @@
package parser
import (
"testing"
)
func TestPosError_impl(t *testing.T) {
var _ error = new(PosError)
}

View file

@ -5,6 +5,7 @@ package parser
import (
"errors"
"fmt"
"strings"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/scanner"
@ -49,7 +50,7 @@ func (p *Parser) Parse() (*ast.File, error) {
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
}
f.Node, err = p.objectList()
f.Node, err = p.objectList(false)
if scerr != nil {
return nil, scerr
}
@ -61,11 +62,23 @@ func (p *Parser) Parse() (*ast.File, error) {
return f, nil
}
func (p *Parser) objectList() (*ast.ObjectList, error) {
// objectList parses a list of items within an object (generally k/v pairs).
// The parameter" obj" tells this whether to we are within an object (braces:
// '{', '}') or just at the top level. If we're within an object, we end
// at an RBRACE.
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{}
for {
if obj {
tok := p.scan()
p.unscan()
if tok.Type == token.RBRACE {
break
}
}
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
@ -78,6 +91,13 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
}
node.Add(n)
// object lists can be optionally comma-delimited e.g. when a list of maps
// is being expressed, so a comma is allowed here - it's simply consumed
tok := p.scan()
if tok.Type != token.COMMA {
p.unscan()
}
}
return node, nil
}
@ -122,6 +142,24 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if len(keys) > 0 && err == errEofToken {
// We ignore eof token here since it is an error if we didn't
// receive a value (but we did receive a key) for the item.
err = nil
}
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
// This is a strange boolean statement, but what it means is:
// We have keys with no value, and we're likely in an object
// (since RBrace ends an object). For this, we set err to nil so
// we continue and get the error below of having the wrong value
// type.
err = nil
// Reset the token type so we don't think it completed fine. See
// objectType which uses p.tok.Type to check if we're done with
// the object.
p.tok.Type = token.EOF
}
if err != nil {
return nil, err
}
@ -147,6 +185,15 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
if err != nil {
return nil, err
}
default:
keyStr := make([]string, 0, len(keys))
for _, k := range keys {
keyStr = append(keyStr, k.Token.Text)
}
return nil, fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " "))
}
// do a look-ahead for line comment
@ -168,7 +215,11 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
tok := p.scan()
switch tok.Type {
case token.EOF:
return nil, errEofToken
// It is very important to also return the keys here as well as
// the error. This is because we need to be able to tell if we
// did parse keys prior to finding the EOF, or if we just found
// a bare EOF.
return keys, errEofToken
case token.ASSIGN:
// assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}`
@ -188,15 +239,29 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
return keys, nil
case token.LBRACE:
var err error
// If we have no keys, then it is a syntax error. i.e. {{}} is not
// allowed.
if len(keys) == 0 {
err = &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
}
}
// object
return keys, nil
return keys, err
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
fmt.Println("illegal")
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default:
return nil, &PosError{
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
@ -238,7 +303,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
Lbrace: p.tok.Pos,
}
l, err := p.objectList()
l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
@ -246,9 +311,9 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
return nil, err
}
// If there is no error, we should be at a RBRACE to end the object
if p.tok.Type != token.RBRACE {
return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
}
o.List = l
@ -268,27 +333,38 @@ func (p *Parser) listType() (*ast.ListType, error) {
needComma := false
for {
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
if needComma {
if needComma {
switch tok.Type {
case token.COMMA, token.RBRACK:
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA),
Err: fmt.Errorf(
"error parsing list, expected comma or list end, got: %s",
tok.Type),
}
}
}
switch tok.Type {
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType()
if err != nil {
return nil, err
}
// If there is a lead comment, apply it
if p.leadComment != nil {
node.LeadComment = p.leadComment
p.leadComment = nil
}
l.Add(node)
needComma = true
case token.COMMA:
// get next list item or we are at the end
// do a look-ahead for line comment
p.scan()
if p.lineComment != nil {
if p.lineComment != nil && len(l.List) > 0 {
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
if ok {
lit.LineComment = p.lineComment
@ -300,12 +376,28 @@ func (p *Parser) listType() (*ast.ListType, error) {
needComma = false
continue
case token.BOOL:
// TODO(arslan) should we support? not supported by HCL yet
case token.LBRACE:
// Looks like a nested object, so parse it out
node, err := p.objectType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse object within list: %s", err),
}
}
l.Add(node)
needComma = true
case token.LBRACK:
// TODO(arslan) should we support nested lists? Even though it's
// written in README of HCL, it's not a part of the grammar
// (not defined in parse.y)
node, err := p.listType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse list within list: %s", err),
}
}
l.Add(node)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos

View file

@ -1,330 +0,0 @@
package parser
import (
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
func TestType(t *testing.T) {
var literals = []struct {
typ token.Type
src string
}{
{token.STRING, `foo = "foo"`},
{token.NUMBER, `foo = 123`},
{token.NUMBER, `foo = -29`},
{token.FLOAT, `foo = 123.12`},
{token.FLOAT, `foo = -123.12`},
{token.BOOL, `foo = true`},
{token.HEREDOC, "foo = <<EOF\nHello\nWorld\nEOF"},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
lit, ok := item.Val.(*ast.LiteralType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
if lit.Token.Type != l.typ {
t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
}
}
}
func TestListType(t *testing.T) {
var literals = []struct {
src string
tokens []token.Type
}{
{
`foo = ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`foo = [123, "123",]`,
[]token.Type{token.NUMBER, token.STRING},
},
{
`foo = []`,
[]token.Type{},
},
{
`foo = ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`foo = [1,
"string",
<<EOF
heredoc contents
EOF
]`,
[]token.Type{token.NUMBER, token.STRING, token.HEREDOC},
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
tokens := []token.Type{}
for _, li := range list.List {
if tp, ok := li.(*ast.LiteralType); ok {
tokens = append(tokens, tp.Token.Type)
}
}
equals(t, l.tokens, tokens)
}
}
func TestObjectType(t *testing.T) {
var literals = []struct {
src string
nodeType []ast.Node
itemLen int
}{
{
`foo = {}`,
nil,
0,
},
{
`foo = {
bar = "fatih"
}`,
[]ast.Node{&ast.LiteralType{}},
1,
},
{
`foo = {
bar = "fatih"
baz = ["arslan"]
}`,
[]ast.Node{
&ast.LiteralType{},
&ast.ListType{},
},
2,
},
{
`foo = {
bar {}
}`,
[]ast.Node{
&ast.ObjectType{},
},
1,
},
{
`foo {
bar {}
foo = true
}`,
[]ast.Node{
&ast.ObjectType{},
&ast.LiteralType{},
},
2,
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
// p.enableTrace = true
item, err := p.objectItem()
if err != nil {
t.Error(err)
continue
}
// we know that the ObjectKey name is foo for all cases, what matters
// is the object
obj, ok := item.Val.(*ast.ObjectType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
continue
}
// check if the total length of items are correct
equals(t, l.itemLen, len(obj.List.Items))
// check if the types are correct
for i, item := range obj.List.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
}
}
}
func TestObjectKey(t *testing.T) {
keys := []struct {
exp []token.Type
src string
}{
{[]token.Type{token.IDENT}, `foo {}`},
{[]token.Type{token.IDENT}, `foo = {}`},
{[]token.Type{token.IDENT}, `foo = bar`},
{[]token.Type{token.IDENT}, `foo = 123`},
{[]token.Type{token.IDENT}, `foo = "${var.bar}`},
{[]token.Type{token.STRING}, `"foo" {}`},
{[]token.Type{token.STRING}, `"foo" = {}`},
{[]token.Type{token.STRING}, `"foo" = "${var.bar}`},
{[]token.Type{token.IDENT, token.IDENT}, `foo bar {}`},
{[]token.Type{token.IDENT, token.STRING}, `foo "bar" {}`},
{[]token.Type{token.STRING, token.IDENT}, `"foo" bar {}`},
{[]token.Type{token.IDENT, token.IDENT, token.IDENT}, `foo bar baz {}`},
}
for _, k := range keys {
p := newParser([]byte(k.src))
keys, err := p.objectKey()
if err != nil {
t.Fatal(err)
}
tokens := []token.Type{}
for _, o := range keys {
tokens = append(tokens, o.Token.Type)
}
equals(t, k.exp, tokens)
}
errKeys := []struct {
src string
}{
{`foo 12 {}`},
{`foo bar = {}`},
{`foo []`},
{`12 {}`},
}
for _, k := range errKeys {
p := newParser([]byte(k.src))
_, err := p.objectKey()
if err == nil {
t.Errorf("case '%s' should give an error", k.src)
}
}
}
// Official HCL tests
func TestParse(t *testing.T) {
cases := []struct {
Name string
Err bool
}{
{
"assign_colon.hcl",
true,
},
{
"comment.hcl",
false,
},
{
"comment_lastline.hcl",
false,
},
{
"comment_single.hcl",
false,
},
{
"empty.hcl",
false,
},
{
"list_comma.hcl",
false,
},
{
"multiple.hcl",
false,
},
{
"structure.hcl",
false,
},
{
"structure_basic.hcl",
false,
},
{
"structure_empty.hcl",
false,
},
{
"complex.hcl",
false,
},
{
"assign_deep.hcl",
true,
},
{
"types.hcl",
false,
},
{
"array_comment.hcl",
false,
},
{
"array_comment_2.hcl",
true,
},
{
"missing_braces.hcl",
true,
},
{
"unterminated_object.hcl",
true,
},
}
const fixtureDir = "./test-fixtures"
for _, tc := range cases {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
if err != nil {
t.Fatalf("err: %s", err)
}
_, err = Parse(d)
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
}
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View file

@ -1,4 +0,0 @@
foo = [
"1",
"2", # comment
]

View file

@ -1,6 +0,0 @@
provisioner "remote-exec" {
scripts = [
"${path.module}/scripts/install-consul.sh" // missing comma
"${path.module}/scripts/install-haproxy.sh"
]
}

View file

@ -1,6 +0,0 @@
resource = [{
"foo": {
"bar": {},
"baz": [1, 2, "foo"],
}
}]

View file

@ -1,5 +0,0 @@
resource = [{
foo = [{
bar = {}
}]
}]

View file

@ -1,15 +0,0 @@
// Foo
/* Bar */
/*
/*
Baz
*/
# Another
# Multiple
# Lines
foo = "bar"

View file

@ -1 +0,0 @@
#foo

View file

@ -1 +0,0 @@
# Hello

View file

@ -1,42 +0,0 @@
variable "foo" {
default = "bar"
description = "bar"
}
variable "groups" { }
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
"${element(split(\",\", var.groups)}",
]
network_interface = {
device_index = 0
description = "Main network interface"
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View file

@ -1 +0,0 @@
foo.bar = "baz"

View file

@ -1 +0,0 @@
foo = [1, 2, "foo"]

View file

@ -1 +0,0 @@
foo = [1, 2, "foo",]

View file

@ -1,4 +0,0 @@
# should error, but not crash
resource "template_file" "cloud_config" {
template = "$file("${path.module}/some/path")"
}

View file

@ -1,2 +0,0 @@
foo = "bar"
key = 7

View file

@ -1,3 +0,0 @@
default = {
"eu-west-1": "ami-b1cf19c6",
}

View file

@ -1,5 +0,0 @@
// This is a test structure for the lexer
foo bar "baz" {
key = 7
foo = "bar"
}

View file

@ -1,5 +0,0 @@
foo {
value = 7
"value" = 8
"complex::value" = 9
}

View file

@ -1 +0,0 @@
resource "foo" "bar" {}

View file

@ -1,7 +0,0 @@
foo = "bar"
bar = 7
baz = [1,2,3]
foo = -12
bar = 3.14159
foo = true
bar = false

View file

@ -1,2 +0,0 @@
foo "baz" {
bar = "baz"

View file

@ -1,575 +0,0 @@
package printer
import (
"bytes"
"fmt"
"sort"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
const (
blank = byte(' ')
newline = byte('\n')
tab = byte('\t')
infinity = 1 << 30 // offset or line
)
var (
unindent = []byte("\uE123") // in the private use space
)
type printer struct {
cfg Config
prev token.Pos
comments []*ast.CommentGroup // may be nil, contains all comments
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
enableTrace bool
indentTrace int
}
type ByPosition []*ast.CommentGroup
func (b ByPosition) Len() int { return len(b) }
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
// collectComments comments all standalone comments which are not lead or line
// comment
func (p *printer) collectComments(node ast.Node) {
// first collect all comments. This is already stored in
// ast.File.(comments)
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.File:
p.comments = t.Comments
return nn, false
}
return nn, true
})
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
for _, c := range p.comments {
standaloneComments[c.Pos()] = c
}
// next remove all lead and line comments from the overall comment map.
// This will give us comments which are standalone, comments which are not
// assigned to any kind of node.
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.LiteralType:
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
case *ast.ObjectItem:
if t.LeadComment != nil {
for _, comment := range t.LeadComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
}
return nn, true
})
for _, c := range standaloneComments {
p.standaloneComments = append(p.standaloneComments, c)
}
sort.Sort(ByPosition(p.standaloneComments))
}
// output prints creates b printable HCL output and returns it.
func (p *printer) output(n interface{}) []byte {
var buf bytes.Buffer
switch t := n.(type) {
case *ast.File:
return p.output(t.Node)
case *ast.ObjectList:
var index int
var nextItem token.Pos
var commented bool
for {
// TODO(arslan): refactor below comment printing, we have the same in objectType
for _, c := range p.standaloneComments {
for _, comment := range c.List {
if index != len(t.Items) {
nextItem = t.Items[index].Pos()
} else {
nextItem = token.Pos{Offset: infinity, Line: infinity}
}
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// if we hit the end add newlines so we can print the comment
if index == len(t.Items) {
buf.Write([]byte{newline, newline})
}
buf.WriteString(comment.Text)
buf.WriteByte(newline)
if index != len(t.Items) {
buf.WriteByte(newline)
}
}
}
}
if index == len(t.Items) {
break
}
buf.Write(p.output(t.Items[index]))
if !commented && index != len(t.Items)-1 {
buf.Write([]byte{newline, newline})
}
index++
}
case *ast.ObjectKey:
buf.WriteString(t.Token.Text)
case *ast.ObjectItem:
p.prev = t.Pos()
buf.Write(p.objectItem(t))
case *ast.LiteralType:
buf.Write(p.literalType(t))
case *ast.ListType:
buf.Write(p.list(t))
case *ast.ObjectType:
buf.Write(p.objectType(t))
default:
fmt.Printf(" unknown type: %T\n", n)
}
return buf.Bytes()
}
func (p *printer) literalType(lit *ast.LiteralType) []byte {
result := []byte(lit.Token.Text)
if lit.Token.Type == token.HEREDOC {
// Clear the trailing newline from heredocs
if result[len(result)-1] == '\n' {
result = result[:len(result)-1]
}
// Poison lines 2+ so that we don't indent them
result = p.heredocIndent(result)
}
return result
}
// objectItem returns the printable HCL form of an object item. An object type
// starts with one/multiple keys and has a value. The value might be of any
// type.
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
var buf bytes.Buffer
if o.LeadComment != nil {
for _, comment := range o.LeadComment.List {
buf.WriteString(comment.Text)
buf.WriteByte(newline)
}
}
for i, k := range o.Keys {
buf.WriteString(k.Token.Text)
buf.WriteByte(blank)
// reach end of key
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
buf.WriteString("=")
buf.WriteByte(blank)
}
}
buf.Write(p.output(o.Val))
if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil {
buf.WriteByte(blank)
for _, comment := range o.LineComment.List {
buf.WriteString(comment.Text)
}
}
return buf.Bytes()
}
// objectType returns the printable HCL form of an object type. An object type
// begins with a brace and ends with a brace.
func (p *printer) objectType(o *ast.ObjectType) []byte {
defer un(trace(p, "ObjectType"))
var buf bytes.Buffer
buf.WriteString("{")
buf.WriteByte(newline)
var index int
var nextItem token.Pos
var commented bool
for {
// Print stand alone comments
for _, c := range p.standaloneComments {
for _, comment := range c.List {
// if we hit the end, last item should be the brace
if index != len(o.List.Items) {
nextItem = o.List.Items[index].Pos()
} else {
nextItem = o.Rbrace
}
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// add newline if it's between other printed nodes
if index > 0 {
commented = true
buf.WriteByte(newline)
}
buf.Write(p.indent([]byte(comment.Text)))
buf.WriteByte(newline)
if index != len(o.List.Items) {
buf.WriteByte(newline) // do not print on the end
}
}
}
}
if index == len(o.List.Items) {
p.prev = o.Rbrace
break
}
// check if we have adjacent one liner items. If yes we'll going to align
// the comments.
var aligned []*ast.ObjectItem
for _, item := range o.List.Items[index:] {
// we don't group one line lists
if len(o.List.Items) == 1 {
break
}
// one means a oneliner with out any lead comment
// two means a oneliner with lead comment
// anything else might be something else
cur := lines(string(p.objectItem(item)))
if cur > 2 {
break
}
curPos := item.Pos()
nextPos := token.Pos{}
if index != len(o.List.Items)-1 {
nextPos = o.List.Items[index+1].Pos()
}
prevPos := token.Pos{}
if index != 0 {
prevPos = o.List.Items[index-1].Pos()
}
// fmt.Println("DEBUG ----------------")
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
if curPos.Line+1 == nextPos.Line {
aligned = append(aligned, item)
index++
continue
}
if curPos.Line-1 == prevPos.Line {
aligned = append(aligned, item)
index++
// finish if we have a new line or comment next. This happens
// if the next item is not adjacent
if curPos.Line+1 != nextPos.Line {
break
}
continue
}
break
}
// put newlines if the items are between other non aligned items.
// newlines are also added if there is a standalone comment already, so
// check it too
if !commented && index != len(aligned) {
buf.WriteByte(newline)
}
if len(aligned) >= 1 {
p.prev = aligned[len(aligned)-1].Pos()
items := p.alignedItems(aligned)
buf.Write(p.indent(items))
} else {
p.prev = o.List.Items[index].Pos()
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
index++
}
buf.WriteByte(newline)
}
buf.WriteString("}")
return buf.Bytes()
}
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
var buf bytes.Buffer
// find the longest key and value length, needed for alignment
var longestKeyLen int // longest key length
var longestValLen int // longest value length
for _, item := range items {
key := len(item.Keys[0].Token.Text)
val := len(p.output(item.Val))
if key > longestKeyLen {
longestKeyLen = key
}
if val > longestValLen {
longestValLen = val
}
}
for i, item := range items {
if item.LeadComment != nil {
for _, comment := range item.LeadComment.List {
buf.WriteString(comment.Text)
buf.WriteByte(newline)
}
}
for i, k := range item.Keys {
keyLen := len(k.Token.Text)
buf.WriteString(k.Token.Text)
for i := 0; i < longestKeyLen-keyLen+1; i++ {
buf.WriteByte(blank)
}
// reach end of key
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
buf.WriteString("=")
buf.WriteByte(blank)
}
}
val := p.output(item.Val)
valLen := len(val)
buf.Write(val)
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
for i := 0; i < longestValLen-valLen+1; i++ {
buf.WriteByte(blank)
}
for _, comment := range item.LineComment.List {
buf.WriteString(comment.Text)
}
}
// do not print for the last item
if i != len(items)-1 {
buf.WriteByte(newline)
}
}
return buf.Bytes()
}
// list returns the printable HCL form of an list type.
func (p *printer) list(l *ast.ListType) []byte {
var buf bytes.Buffer
buf.WriteString("[")
var longestLine int
for _, item := range l.List {
// for now we assume that the list only contains literal types
if lit, ok := item.(*ast.LiteralType); ok {
lineLen := len(lit.Token.Text)
if lineLen > longestLine {
longestLine = lineLen
}
}
}
insertSpaceBeforeItem := false
for i, item := range l.List {
if item.Pos().Line != l.Lbrack.Line {
// multiline list, add newline before we add each item
buf.WriteByte(newline)
insertSpaceBeforeItem = false
// also indent each line
val := p.output(item)
curLen := len(val)
buf.Write(p.indent(val))
buf.WriteString(",")
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
// if the next item doesn't have any comments, do not align
buf.WriteByte(blank) // align one space
for i := 0; i < longestLine-curLen; i++ {
buf.WriteByte(blank)
}
for _, comment := range lit.LineComment.List {
buf.WriteString(comment.Text)
}
}
if i == len(l.List)-1 {
buf.WriteByte(newline)
}
} else {
if insertSpaceBeforeItem {
buf.WriteByte(blank)
insertSpaceBeforeItem = false
}
buf.Write(p.output(item))
if i != len(l.List)-1 {
buf.WriteString(",")
insertSpaceBeforeItem = true
}
}
}
buf.WriteString("]")
return buf.Bytes()
}
// indent indents the lines of the given buffer for each non-empty line
func (p *printer) indent(buf []byte) []byte {
var prefix []byte
if p.cfg.SpacesWidth != 0 {
for i := 0; i < p.cfg.SpacesWidth; i++ {
prefix = append(prefix, blank)
}
} else {
prefix = []byte{tab}
}
var res []byte
bol := true
for _, c := range buf {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// unindent removes all the indentation from the tombstoned lines
func (p *printer) unindent(buf []byte) []byte {
var res []byte
for i := 0; i < len(buf); i++ {
skip := len(buf)-i <= len(unindent)
if !skip {
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
}
if skip {
res = append(res, buf[i])
continue
}
// We have a marker. we have to backtrace here and clean out
// any whitespace ahead of our tombstone up to a \n
for j := len(res) - 1; j >= 0; j-- {
if res[j] == '\n' {
break
}
res = res[:j]
}
// Skip the entire unindent marker
i += len(unindent) - 1
}
return res
}
// heredocIndent marks all the 2nd and further lines as unindentable
func (p *printer) heredocIndent(buf []byte) []byte {
var res []byte
bol := false
for _, c := range buf {
if bol && c != '\n' {
res = append(res, unindent...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
func lines(txt string) int {
endline := 1
for i := 0; i < len(txt); i++ {
if txt[i] == '\n' {
endline++
}
}
return endline
}
// ----------------------------------------------------------------------------
// Tracing support
func (p *printer) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
i := 2 * p.indentTrace
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *printer, msg string) *printer {
p.printTrace(msg, "(")
p.indentTrace++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *printer) {
p.indentTrace--
p.printTrace(")")
}

View file

@ -1,64 +0,0 @@
// Package printer implements printing of AST nodes to HCL format.
package printer
import (
"bytes"
"io"
"text/tabwriter"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/parser"
)
var DefaultConfig = Config{
SpacesWidth: 2,
}
// A Config node controls the output of Fprint.
type Config struct {
SpacesWidth int // if set, it will use spaces instead of tabs for alignment
}
func (c *Config) Fprint(output io.Writer, node ast.Node) error {
p := &printer{
cfg: *c,
comments: make([]*ast.CommentGroup, 0),
standaloneComments: make([]*ast.CommentGroup, 0),
// enableTrace: true,
}
p.collectComments(node)
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
return err
}
// flush tabwriter, if any
var err error
if tw, _ := output.(*tabwriter.Writer); tw != nil {
err = tw.Flush()
}
return err
}
// Fprint "pretty-prints" an HCL node to output
// It calls Config.Fprint with default settings.
func Fprint(output io.Writer, node ast.Node) error {
return DefaultConfig.Fprint(output, node)
}
// Format formats src HCL and returns the result.
func Format(src []byte) ([]byte, error) {
node, err := parser.Parse(src)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := DefaultConfig.Fprint(&buf, node); err != nil {
return nil, err
}
return buf.Bytes(), nil
}

View file

@ -1,143 +0,0 @@
package printer
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
"github.com/hashicorp/hcl/hcl/parser"
)
var update = flag.Bool("update", false, "update golden files")
const (
dataDir = "testdata"
)
type entry struct {
source, golden string
}
// Use go test -update to create/update the respective golden files.
var data = []entry{
{"complexhcl.input", "complexhcl.golden"},
{"list.input", "list.golden"},
{"comment.input", "comment.golden"},
{"comment_aligned.input", "comment_aligned.golden"},
{"comment_standalone.input", "comment_standalone.golden"},
}
func TestFiles(t *testing.T) {
for _, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
check(t, source, golden)
}
}
func check(t *testing.T, source, golden string) {
src, err := ioutil.ReadFile(source)
if err != nil {
t.Error(err)
return
}
res, err := format(src)
if err != nil {
t.Error(err)
return
}
// update golden files if necessary
if *update {
if err := ioutil.WriteFile(golden, res, 0644); err != nil {
t.Error(err)
}
return
}
// get golden
gld, err := ioutil.ReadFile(golden)
if err != nil {
t.Error(err)
return
}
// formatted source and golden must be the same
if err := diff(source, golden, res, gld); err != nil {
t.Error(err)
return
}
}
// diff compares a and b.
func diff(aname, bname string, a, b []byte) error {
var buf bytes.Buffer // holding long error message
// compare lengths
if len(a) != len(b) {
fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
}
// compare contents
line := 1
offs := 1
for i := 0; i < len(a) && i < len(b); i++ {
ch := a[i]
if ch != b[i] {
fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
fmt.Fprintf(&buf, "\n\n")
break
}
if ch == '\n' {
line++
offs = i + 1
}
}
if buf.Len() > 0 {
return errors.New(buf.String())
}
return nil
}
// format parses src, prints the corresponding AST, verifies the resulting
// src is syntactically correct, and returns the resulting src or an error
// if any.
func format(src []byte) ([]byte, error) {
// parse src
node, err := parser.Parse(src)
if err != nil {
return nil, fmt.Errorf("parse: %s\n%s", err, src)
}
var buf bytes.Buffer
cfg := &Config{}
if err := cfg.Fprint(&buf, node); err != nil {
return nil, fmt.Errorf("print: %s", err)
}
// make sure formatted output is syntactically correct
res := buf.Bytes()
if _, err := parser.Parse(src); err != nil {
return nil, fmt.Errorf("parse: %s\n%s", err, src)
}
return res, nil
}
// lineAt returns the line in text starting at offset offs.
func lineAt(text []byte, offs int) []byte {
i := offs
for i < len(text) && text[i] != '\n' {
i++
}
return text[offs:i]
}

View file

@ -1,36 +0,0 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = ["fatih", "arslan"] // fatih arslan
# One line here
numbers = [1, 2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View file

@ -1,37 +0,0 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = [ "fatih", "arslan"] // fatih arslan
# One line here
numbers = [1,2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View file

@ -1,32 +0,0 @@
aligned {
# We have some aligned items below
foo = "fatih" # yoo1
default = "bar" # yoo2
bar = "bar and foo" # yoo3
default = {
bar = "example"
}
#deneme arslan
fatih = ["fatih"] # yoo4
#fatih arslan
fatiharslan = ["arslan"] // yoo5
default = {
bar = "example"
}
security_groups = [
"foo", # kenya 1
"${aws_security_group.firewall.foo}", # kenya 2
]
security_groups2 = [
"foo", # kenya 1
"bar", # kenya 1.5
"${aws_security_group.firewall.foo}", # kenya 2
"foobar", # kenya 3
]
}

View file

@ -1,28 +0,0 @@
aligned {
# We have some aligned items below
foo = "fatih" # yoo1
default = "bar" # yoo2
bar = "bar and foo" # yoo3
default = {
bar = "example"
}
#deneme arslan
fatih = ["fatih"] # yoo4
#fatih arslan
fatiharslan = ["arslan"] // yoo5
default = {
bar = "example"
}
security_groups = [
"foo", # kenya 1
"${aws_security_group.firewall.foo}", # kenya 2
]
security_groups2 = [
"foo", # kenya 1
"bar", # kenya 1.5
"${aws_security_group.firewall.foo}", # kenya 2
"foobar", # kenya 3
]
}

View file

@ -1,16 +0,0 @@
// A standalone comment
aligned {
# Standalone 1
a = "bar" # yoo1
default = "bar" # yoo2
# Standalone 2
}
# Standalone 3
numbers = [1, 2] // another line here
# Standalone 4

View file

@ -1,16 +0,0 @@
// A standalone comment
aligned {
# Standalone 1
a = "bar" # yoo1
default = "bar" # yoo2
# Standalone 2
}
# Standalone 3
numbers = [1,2] // another line here
# Standalone 4

View file

@ -1,54 +0,0 @@
variable "foo" {
default = "bar"
description = "bar"
}
developer = ["fatih", "arslan"]
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
]
network_interface {
device_index = 0
description = "Main network interface"
}
network_interface = {
device_index = 1
description = <<EOF
ANOTHER NETWORK INTERFACE
EOF
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = <<EOF
TUBES
EOF
}

View file

@ -1,53 +0,0 @@
variable "foo" {
default = "bar"
description = "bar"
}
developer = [ "fatih", "arslan"]
provider "aws" {
access_key ="foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}"
]
network_interface {
device_index = 0
description = "Main network interface"
}
network_interface = {
device_index = 1
description = <<EOF
ANOTHER NETWORK INTERFACE
EOF
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value=<<EOF
TUBES
EOF
}

View file

@ -1,27 +0,0 @@
foo = ["fatih", "arslan"]
foo = ["bar", "qaz"]
foo = ["zeynep",
"arslan",
]
foo = ["fatih", "zeynep",
"arslan",
]
foo = [
"vim-go",
"golang",
"hcl",
]
foo = []
foo = [1, 2, 3, 4]
foo = [
"kenya",
"ethiopia",
"columbia",
]

View file

@ -1,21 +0,0 @@
foo = ["fatih", "arslan" ]
foo = [ "bar", "qaz", ]
foo = [ "zeynep",
"arslan", ]
foo = ["fatih", "zeynep",
"arslan", ]
foo = [
"vim-go",
"golang", "hcl"]
foo = []
foo = [1, 2,3, 4]
foo = [
"kenya", "ethiopia",
"columbia"]

View file

@ -6,6 +6,7 @@ import (
"bytes"
"fmt"
"os"
"regexp"
"unicode"
"unicode/utf8"
@ -94,6 +95,12 @@ func (s *Scanner) next() rune {
s.srcPos.Column = 0
}
// If we see a null character with data left, then that is an error
if ch == '\x00' && s.buf.Len() > 0 {
s.err("unexpected null character (0x00)")
return eof
}
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
@ -223,6 +230,11 @@ func (s *Scanner) Scan() token.Token {
func (s *Scanner) scanComment(ch rune) {
// single line comments
if ch == '#' || (ch == '/' && s.peek() != '*') {
if ch == '/' && s.peek() != '/' {
s.err("expected '/' for comment")
return
}
ch = s.next()
for ch != '\n' && ch >= 0 && ch != eof {
ch = s.next()
@ -376,7 +388,7 @@ func (s *Scanner) scanExponent(ch rune) rune {
return ch
}
// scanHeredoc scans a heredoc string.
// scanHeredoc scans a heredoc string
func (s *Scanner) scanHeredoc() {
// Scan the second '<' in example: '<<EOF'
if s.next() != '<' {
@ -389,6 +401,12 @@ func (s *Scanner) scanHeredoc() {
// Scan the identifier
ch := s.next()
// Indented heredoc syntax
if ch == '-' {
ch = s.next()
}
for isLetter(ch) || isDigit(ch) {
ch = s.next()
}
@ -414,6 +432,17 @@ func (s *Scanner) scanHeredoc() {
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
if len(identBytes) == 0 {
s.err("zero-length heredoc anchor")
return
}
var identRegexp *regexp.Regexp
if identBytes[0] == '-' {
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
} else {
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
}
// Read the actual string value
lineStart := s.srcPos.Offset
@ -422,12 +451,11 @@ func (s *Scanner) scanHeredoc() {
// Special newline handling.
if ch == '\n' {
// Math is fast, so we first compare the byte counts to
// see if we have a chance of seeing the same identifier. If those
// match, then we compare the string values directly.
// Math is fast, so we first compare the byte counts to see if we have a chance
// of seeing the same identifier - if the length is less than the number of bytes
// in the identifier, this cannot be a valid terminator.
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
if lineBytesLen == len(identBytes) &&
bytes.Equal(identBytes, s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
break
}
@ -452,7 +480,7 @@ func (s *Scanner) scanString() {
// read character after quote
ch := s.next()
if ch == '\n' || ch < 0 || ch == eof {
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
@ -508,16 +536,27 @@ func (s *Scanner) scanEscape() rune {
// scanDigits scans a rune with the given base for n times. For example an
// octal notation \184 would yield in scanDigits(ch, 8, 3)
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
start := n
for n > 0 && digitVal(ch) < base {
ch = s.next()
if ch == eof {
// If we see an EOF, we halt any more scanning of digits
// immediately.
break
}
n--
}
if n > 0 {
s.err("illegal char escape")
}
// we scanned all digits, put the last non digit char back
s.unread()
if n != start {
// we scanned all digits, put the last non digit char back,
// only if we read anything at all
s.unread()
}
return ch
}

View file

@ -1,536 +0,0 @@
package scanner
import (
"bytes"
"fmt"
"testing"
"github.com/hashicorp/hcl/hcl/token"
"strings"
)
var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
type tokenPair struct {
tok token.Type
text string
}
var tokenLists = map[string][]tokenPair{
"comment": []tokenPair{
{token.COMMENT, "//"},
{token.COMMENT, "////"},
{token.COMMENT, "// comment"},
{token.COMMENT, "// /* comment */"},
{token.COMMENT, "// // comment //"},
{token.COMMENT, "//" + f100},
{token.COMMENT, "#"},
{token.COMMENT, "##"},
{token.COMMENT, "# comment"},
{token.COMMENT, "# /* comment */"},
{token.COMMENT, "# # comment #"},
{token.COMMENT, "#" + f100},
{token.COMMENT, "/**/"},
{token.COMMENT, "/***/"},
{token.COMMENT, "/* comment */"},
{token.COMMENT, "/* // comment */"},
{token.COMMENT, "/* /* comment */"},
{token.COMMENT, "/*\n comment\n*/"},
{token.COMMENT, "/*" + f100 + "*/"},
},
"operator": []tokenPair{
{token.LBRACK, "["},
{token.LBRACE, "{"},
{token.COMMA, ","},
{token.PERIOD, "."},
{token.RBRACK, "]"},
{token.RBRACE, "}"},
{token.ASSIGN, "="},
{token.ADD, "+"},
{token.SUB, "-"},
},
"bool": []tokenPair{
{token.BOOL, "true"},
{token.BOOL, "false"},
},
"ident": []tokenPair{
{token.IDENT, "a"},
{token.IDENT, "a0"},
{token.IDENT, "foobar"},
{token.IDENT, "foo-bar"},
{token.IDENT, "abc123"},
{token.IDENT, "LGTM"},
{token.IDENT, "_"},
{token.IDENT, "_abc123"},
{token.IDENT, "abc123_"},
{token.IDENT, "_abc_123_"},
{token.IDENT, "_äöü"},
{token.IDENT, "_本"},
{token.IDENT, "äöü"},
{token.IDENT, "本"},
{token.IDENT, "a۰۱۸"},
{token.IDENT, "foo६४"},
{token.IDENT, "bar"},
},
"heredoc": []tokenPair{
{token.HEREDOC, "<<EOF\nhello\nworld\nEOF"},
{token.HEREDOC, "<<EOF123\nhello\nworld\nEOF123"},
},
"string": []tokenPair{
{token.STRING, `" "`},
{token.STRING, `"a"`},
{token.STRING, `"本"`},
{token.STRING, `"${file("foo")}"`},
{token.STRING, `"${file(\"foo\")}"`},
{token.STRING, `"\a"`},
{token.STRING, `"\b"`},
{token.STRING, `"\f"`},
{token.STRING, `"\n"`},
{token.STRING, `"\r"`},
{token.STRING, `"\t"`},
{token.STRING, `"\v"`},
{token.STRING, `"\""`},
{token.STRING, `"\000"`},
{token.STRING, `"\777"`},
{token.STRING, `"\x00"`},
{token.STRING, `"\xff"`},
{token.STRING, `"\u0000"`},
{token.STRING, `"\ufA16"`},
{token.STRING, `"\U00000000"`},
{token.STRING, `"\U0000ffAB"`},
{token.STRING, `"` + f100 + `"`},
},
"number": []tokenPair{
{token.NUMBER, "0"},
{token.NUMBER, "1"},
{token.NUMBER, "9"},
{token.NUMBER, "42"},
{token.NUMBER, "1234567890"},
{token.NUMBER, "00"},
{token.NUMBER, "01"},
{token.NUMBER, "07"},
{token.NUMBER, "042"},
{token.NUMBER, "01234567"},
{token.NUMBER, "0x0"},
{token.NUMBER, "0x1"},
{token.NUMBER, "0xf"},
{token.NUMBER, "0x42"},
{token.NUMBER, "0x123456789abcDEF"},
{token.NUMBER, "0x" + f100},
{token.NUMBER, "0X0"},
{token.NUMBER, "0X1"},
{token.NUMBER, "0XF"},
{token.NUMBER, "0X42"},
{token.NUMBER, "0X123456789abcDEF"},
{token.NUMBER, "0X" + f100},
{token.NUMBER, "-0"},
{token.NUMBER, "-1"},
{token.NUMBER, "-9"},
{token.NUMBER, "-42"},
{token.NUMBER, "-1234567890"},
{token.NUMBER, "-00"},
{token.NUMBER, "-01"},
{token.NUMBER, "-07"},
{token.NUMBER, "-29"},
{token.NUMBER, "-042"},
{token.NUMBER, "-01234567"},
{token.NUMBER, "-0x0"},
{token.NUMBER, "-0x1"},
{token.NUMBER, "-0xf"},
{token.NUMBER, "-0x42"},
{token.NUMBER, "-0x123456789abcDEF"},
{token.NUMBER, "-0x" + f100},
{token.NUMBER, "-0X0"},
{token.NUMBER, "-0X1"},
{token.NUMBER, "-0XF"},
{token.NUMBER, "-0X42"},
{token.NUMBER, "-0X123456789abcDEF"},
{token.NUMBER, "-0X" + f100},
},
"float": []tokenPair{
{token.FLOAT, "0."},
{token.FLOAT, "1."},
{token.FLOAT, "42."},
{token.FLOAT, "01234567890."},
{token.FLOAT, ".0"},
{token.FLOAT, ".1"},
{token.FLOAT, ".42"},
{token.FLOAT, ".0123456789"},
{token.FLOAT, "0.0"},
{token.FLOAT, "1.0"},
{token.FLOAT, "42.0"},
{token.FLOAT, "01234567890.0"},
{token.FLOAT, "0e0"},
{token.FLOAT, "1e0"},
{token.FLOAT, "42e0"},
{token.FLOAT, "01234567890e0"},
{token.FLOAT, "0E0"},
{token.FLOAT, "1E0"},
{token.FLOAT, "42E0"},
{token.FLOAT, "01234567890E0"},
{token.FLOAT, "0e+10"},
{token.FLOAT, "1e-10"},
{token.FLOAT, "42e+10"},
{token.FLOAT, "01234567890e-10"},
{token.FLOAT, "0E+10"},
{token.FLOAT, "1E-10"},
{token.FLOAT, "42E+10"},
{token.FLOAT, "01234567890E-10"},
{token.FLOAT, "01.8e0"},
{token.FLOAT, "1.4e0"},
{token.FLOAT, "42.2e0"},
{token.FLOAT, "01234567890.12e0"},
{token.FLOAT, "0.E0"},
{token.FLOAT, "1.12E0"},
{token.FLOAT, "42.123E0"},
{token.FLOAT, "01234567890.213E0"},
{token.FLOAT, "0.2e+10"},
{token.FLOAT, "1.2e-10"},
{token.FLOAT, "42.54e+10"},
{token.FLOAT, "01234567890.98e-10"},
{token.FLOAT, "0.1E+10"},
{token.FLOAT, "1.1E-10"},
{token.FLOAT, "42.1E+10"},
{token.FLOAT, "01234567890.1E-10"},
{token.FLOAT, "-0.0"},
{token.FLOAT, "-1.0"},
{token.FLOAT, "-42.0"},
{token.FLOAT, "-01234567890.0"},
{token.FLOAT, "-0e0"},
{token.FLOAT, "-1e0"},
{token.FLOAT, "-42e0"},
{token.FLOAT, "-01234567890e0"},
{token.FLOAT, "-0E0"},
{token.FLOAT, "-1E0"},
{token.FLOAT, "-42E0"},
{token.FLOAT, "-01234567890E0"},
{token.FLOAT, "-0e+10"},
{token.FLOAT, "-1e-10"},
{token.FLOAT, "-42e+10"},
{token.FLOAT, "-01234567890e-10"},
{token.FLOAT, "-0E+10"},
{token.FLOAT, "-1E-10"},
{token.FLOAT, "-42E+10"},
{token.FLOAT, "-01234567890E-10"},
{token.FLOAT, "-01.8e0"},
{token.FLOAT, "-1.4e0"},
{token.FLOAT, "-42.2e0"},
{token.FLOAT, "-01234567890.12e0"},
{token.FLOAT, "-0.E0"},
{token.FLOAT, "-1.12E0"},
{token.FLOAT, "-42.123E0"},
{token.FLOAT, "-01234567890.213E0"},
{token.FLOAT, "-0.2e+10"},
{token.FLOAT, "-1.2e-10"},
{token.FLOAT, "-42.54e+10"},
{token.FLOAT, "-01234567890.98e-10"},
{token.FLOAT, "-0.1E+10"},
{token.FLOAT, "-1.1E-10"},
{token.FLOAT, "-42.1E+10"},
{token.FLOAT, "-01234567890.1E-10"},
},
}
var orderedTokenLists = []string{
"comment",
"operator",
"bool",
"ident",
"heredoc",
"string",
"number",
"float",
}
func TestPosition(t *testing.T) {
// create artifical source code
buf := new(bytes.Buffer)
for _, listName := range orderedTokenLists {
for _, ident := range tokenLists[listName] {
fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
}
}
s := New(buf.Bytes())
pos := token.Pos{"", 4, 1, 5}
s.Scan()
for _, listName := range orderedTokenLists {
for _, k := range tokenLists[listName] {
curPos := s.tokPos
// fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
if curPos.Offset != pos.Offset {
t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
}
if curPos.Line != pos.Line {
t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
}
if curPos.Column != pos.Column {
t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
}
pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
pos.Line += countNewlines(k.text) + 1 // each token is on a new line
s.Scan()
}
}
// make sure there were no token-internal errors reported by scanner
if s.ErrorCount != 0 {
t.Errorf("%d errors", s.ErrorCount)
}
}
func TestComment(t *testing.T) {
testTokenList(t, tokenLists["comment"])
}
func TestOperator(t *testing.T) {
testTokenList(t, tokenLists["operator"])
}
func TestBool(t *testing.T) {
testTokenList(t, tokenLists["bool"])
}
func TestIdent(t *testing.T) {
testTokenList(t, tokenLists["ident"])
}
func TestString(t *testing.T) {
testTokenList(t, tokenLists["string"])
}
func TestNumber(t *testing.T) {
testTokenList(t, tokenLists["number"])
}
func TestFloat(t *testing.T) {
testTokenList(t, tokenLists["float"])
}
func TestWindowsLineEndings(t *testing.T) {
hcl := `// This should have Windows line endings
resource "aws_instance" "foo" {
user_data=<<HEREDOC
test script
HEREDOC
}`
hclWindowsEndings := strings.Replace(hcl, "\n", "\r\n", -1)
literals := []struct {
tokenType token.Type
literal string
}{
{token.COMMENT, "// This should have Windows line endings\r"},
{token.IDENT, `resource`},
{token.STRING, `"aws_instance"`},
{token.STRING, `"foo"`},
{token.LBRACE, `{`},
{token.IDENT, `user_data`},
{token.ASSIGN, `=`},
{token.HEREDOC, "<<HEREDOC\r\n test script\r\nHEREDOC\r\n"},
{token.RBRACE, `}`},
}
s := New([]byte(hclWindowsEndings))
for _, l := range literals {
tok := s.Scan()
if l.tokenType != tok.Type {
t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
}
if l.literal != tok.Text {
t.Errorf("got:\n%v\nwant:\n%v\n", []byte(tok.Text), []byte(l.literal))
}
}
}
func TestRealExample(t *testing.T) {
complexHCL := `// This comes from Terraform, as a test
variable "foo" {
default = "bar"
description = "bar"
}
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}"
]
network_interface {
device_index = 0
description = <<EOF
Main interface
EOF
}
}`
literals := []struct {
tokenType token.Type
literal string
}{
{token.COMMENT, `// This comes from Terraform, as a test`},
{token.IDENT, `variable`},
{token.STRING, `"foo"`},
{token.LBRACE, `{`},
{token.IDENT, `default`},
{token.ASSIGN, `=`},
{token.STRING, `"bar"`},
{token.IDENT, `description`},
{token.ASSIGN, `=`},
{token.STRING, `"bar"`},
{token.RBRACE, `}`},
{token.IDENT, `provider`},
{token.STRING, `"aws"`},
{token.LBRACE, `{`},
{token.IDENT, `access_key`},
{token.ASSIGN, `=`},
{token.STRING, `"foo"`},
{token.IDENT, `secret_key`},
{token.ASSIGN, `=`},
{token.STRING, `"bar"`},
{token.RBRACE, `}`},
{token.IDENT, `resource`},
{token.STRING, `"aws_security_group"`},
{token.STRING, `"firewall"`},
{token.LBRACE, `{`},
{token.IDENT, `count`},
{token.ASSIGN, `=`},
{token.NUMBER, `5`},
{token.RBRACE, `}`},
{token.IDENT, `resource`},
{token.IDENT, `aws_instance`},
{token.STRING, `"web"`},
{token.LBRACE, `{`},
{token.IDENT, `ami`},
{token.ASSIGN, `=`},
{token.STRING, `"${var.foo}"`},
{token.IDENT, `security_groups`},
{token.ASSIGN, `=`},
{token.LBRACK, `[`},
{token.STRING, `"foo"`},
{token.COMMA, `,`},
{token.STRING, `"${aws_security_group.firewall.foo}"`},
{token.RBRACK, `]`},
{token.IDENT, `network_interface`},
{token.LBRACE, `{`},
{token.IDENT, `device_index`},
{token.ASSIGN, `=`},
{token.NUMBER, `0`},
{token.IDENT, `description`},
{token.ASSIGN, `=`},
{token.HEREDOC, "<<EOF\nMain interface\nEOF\n"},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.EOF, ``},
}
s := New([]byte(complexHCL))
for _, l := range literals {
tok := s.Scan()
if l.tokenType != tok.Type {
t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
}
if l.literal != tok.Text {
t.Errorf("got: %s want %s\n", tok, l.literal)
}
}
}
func TestError(t *testing.T) {
testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", token.IDENT)
testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", token.IDENT)
testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
testError(t, `01238`, "1:6", "illegal octal number", token.NUMBER)
testError(t, `01238123`, "1:9", "illegal octal number", token.NUMBER)
testError(t, `0x`, "1:3", "illegal hexadecimal number", token.NUMBER)
testError(t, `0xg`, "1:3", "illegal hexadecimal number", token.NUMBER)
testError(t, `'aa'`, "1:1", "illegal char", token.ILLEGAL)
testError(t, `"`, "1:2", "literal not terminated", token.STRING)
testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT)
}
func testError(t *testing.T, src, pos, msg string, tok token.Type) {
s := New([]byte(src))
errorCalled := false
s.Error = func(p token.Pos, m string) {
if !errorCalled {
if pos != p.String() {
t.Errorf("pos = %q, want %q for %q", p, pos, src)
}
if m != msg {
t.Errorf("msg = %q, want %q for %q", m, msg, src)
}
errorCalled = true
}
}
tk := s.Scan()
if tk.Type != tok {
t.Errorf("tok = %s, want %s for %q", tk, tok, src)
}
if !errorCalled {
t.Errorf("error handler not called for %q", src)
}
if s.ErrorCount == 0 {
t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
}
}
func testTokenList(t *testing.T, tokenList []tokenPair) {
// create artifical source code
buf := new(bytes.Buffer)
for _, ident := range tokenList {
fmt.Fprintf(buf, "%s\n", ident.text)
}
s := New(buf.Bytes())
for _, ident := range tokenList {
tok := s.Scan()
if tok.Type != ident.tok {
t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
}
if tok.Text != ident.text {
t.Errorf("text = %q want %q", tok.String(), ident.text)
}
}
}
func countNewlines(s string) int {
n := 0
for _, ch := range s {
if ch == '\n' {
n++
}
}
return n
}

View file

@ -27,7 +27,7 @@ func Unquote(s string) (t string, err error) {
if quote != '"' {
return "", ErrSyntax
}
if contains(s, '\n') {
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
return "", ErrSyntax
}
@ -49,7 +49,7 @@ func Unquote(s string) (t string, err error) {
for len(s) > 0 {
// If we're starting a '${}' then let it through un-unquoted.
// Specifically: we don't unquote any characters within the `${}`
// section, except for escaped quotes, which we handle specifically.
// section.
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
buf = append(buf, '$', '{')
s = s[2:]
@ -64,14 +64,6 @@ func Unquote(s string) (t string, err error) {
s = s[size:]
// We special case escaped double quotes in interpolations, converting
// them to straight double quotes.
if r == '\\' {
if q, _ := utf8.DecodeRuneInString(s); q == '"' {
continue
}
}
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
@ -95,6 +87,10 @@ func Unquote(s string) (t string, err error) {
}
}
if s[0] == '\n' {
return "", ErrSyntax
}
c, multibyte, ss, err := unquoteChar(s, quote)
if err != nil {
return "", err

View file

@ -1,93 +0,0 @@
package strconv
import "testing"
type quoteTest struct {
in string
out string
ascii string
}
var quotetests = []quoteTest{
{"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
{"\\", `"\\"`, `"\\"`},
{"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
{"\u263a", `"☺"`, `"\u263a"`},
{"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
{"\x04", `"\x04"`, `"\x04"`},
}
type unQuoteTest struct {
in string
out string
}
var unquotetests = []unQuoteTest{
{`""`, ""},
{`"a"`, "a"},
{`"abc"`, "abc"},
{`"☺"`, "☺"},
{`"hello world"`, "hello world"},
{`"\xFF"`, "\xFF"},
{`"\377"`, "\377"},
{`"\u1234"`, "\u1234"},
{`"\U00010111"`, "\U00010111"},
{`"\U0001011111"`, "\U0001011111"},
{`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
{`"'"`, "'"},
{`"${file("foo")}"`, `${file("foo")}`},
{`"${file(\"foo\")}"`, `${file("foo")}`},
{`"echo ${var.region}${element(split(",",var.zones),0)}"`,
`echo ${var.region}${element(split(",",var.zones),0)}`},
}
var misquoted = []string{
``,
`"`,
`"a`,
`"'`,
`b"`,
`"\"`,
`"\9"`,
`"\19"`,
`"\129"`,
`'\'`,
`'\9'`,
`'\19'`,
`'\129'`,
`'ab'`,
`"\x1!"`,
`"\U12345678"`,
`"\z"`,
"`",
"`xxx",
"`\"",
`"\'"`,
`'\"'`,
"\"\n\"",
"\"\\n\n\"",
"'\n'",
`"${"`,
`"${foo{}"`,
}
func TestUnquote(t *testing.T) {
for _, tt := range unquotetests {
if out, err := Unquote(tt.in); err != nil || out != tt.out {
t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
}
}
// run the quote tests too, backward
for _, tt := range quotetests {
if in, err := Unquote(tt.out); in != tt.in {
t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
}
}
for _, s := range misquoted {
if out, err := Unquote(s); out != "" || err != ErrSyntax {
t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
}
}
}

View file

@ -1,4 +0,0 @@
foo = [
"1",
"2", # comment
]

View file

@ -1,6 +0,0 @@
resource = [{
"foo": {
"bar": {},
"baz": [1, 2, "foo"],
}
}]

View file

@ -1,5 +0,0 @@
resource = [{
foo = [{
bar = {}
}]
}]

View file

@ -1,15 +0,0 @@
// Foo
/* Bar */
/*
/*
Baz
*/
# Another
# Multiple
# Lines
foo = "bar"

View file

@ -1 +0,0 @@
# Hello

View file

@ -1,42 +0,0 @@
// This comes from Terraform, as a test
variable "foo" {
default = "bar"
description = "bar"
}
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}"
]
network_interface {
device_index = 0
description = "Main network interface"
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View file

@ -1 +0,0 @@
foo.bar = "baz"

View file

@ -1 +0,0 @@
foo = [1, 2, "foo"]

View file

@ -1 +0,0 @@
foo = [1, 2, "foo",]

View file

@ -1,2 +0,0 @@
foo = "bar"
key = 7

View file

@ -1,3 +0,0 @@
default = {
"eu-west-1": "ami-b1cf19c6",
}

View file

@ -1,5 +0,0 @@
// This is a test structure for the lexer
foo bar "baz" {
key = 7
foo = "bar"
}

View file

@ -1,5 +0,0 @@
foo {
value = 7
"value" = 8
"complex::value" = 9
}

View file

@ -1 +0,0 @@
resource "foo" "bar" {}

View file

@ -1,7 +0,0 @@
foo = "bar"
bar = 7
baz = [1,2,3]
foo = -12
bar = 3.14159
foo = true
bar = false

View file

@ -142,13 +142,7 @@ func (t Token) Value() interface{} {
case IDENT:
return t.Text
case HEREDOC:
// We need to find the end of the marker
idx := strings.IndexByte(t.Text, '\n')
if idx == -1 {
panic("heredoc doesn't contain newline")
}
return string(t.Text[idx+1 : len(t.Text)-idx+1])
return unindentHeredoc(t.Text)
case STRING:
// Determine the Unquote method to use. If it came from JSON,
// then we need to use the built-in unquote since we have to
@ -158,6 +152,11 @@ func (t Token) Value() interface{} {
f = strconv.Unquote
}
// This case occurs if json null is used
if t.Text == "" {
return ""
}
v, err := f(t.Text)
if err != nil {
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
@ -168,3 +167,53 @@ func (t Token) Value() interface{} {
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
}
}
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
// and the content of a HEREDOC with the hanging indent removed if it is started with
// a <<-, and the terminating line is at least as indented as the least indented line.
func unindentHeredoc(heredoc string) string {
// We need to find the end of the marker
idx := strings.IndexByte(heredoc, '\n')
if idx == -1 {
panic("heredoc doesn't contain newline")
}
unindent := heredoc[2] == '-'
// We can optimize if the heredoc isn't marked for indentation
if !unindent {
return string(heredoc[idx+1 : len(heredoc)-idx+1])
}
// We need to unindent each line based on the indentation level of the marker
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
whitespacePrefix := lines[len(lines)-1]
isIndented := true
for _, v := range lines {
if strings.HasPrefix(v, whitespacePrefix) {
continue
}
isIndented = false
break
}
// If all lines are not at least as indented as the terminating mark, return the
// heredoc as is, but trim the leading space from the marker on the final line.
if !isIndented {
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
}
unindentedLines := make([]string, len(lines))
for k, v := range lines {
if k == len(lines)-1 {
unindentedLines[k] = ""
break
}
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
}
return strings.Join(unindentedLines, "\n")
}

View file

@ -1,63 +0,0 @@
package token
import (
"reflect"
"testing"
)
func TestTypeString(t *testing.T) {
var tokens = []struct {
tt Type
str string
}{
{ILLEGAL, "ILLEGAL"},
{EOF, "EOF"},
{COMMENT, "COMMENT"},
{IDENT, "IDENT"},
{NUMBER, "NUMBER"},
{FLOAT, "FLOAT"},
{BOOL, "BOOL"},
{STRING, "STRING"},
{HEREDOC, "HEREDOC"},
{LBRACK, "LBRACK"},
{LBRACE, "LBRACE"},
{COMMA, "COMMA"},
{PERIOD, "PERIOD"},
{RBRACK, "RBRACK"},
{RBRACE, "RBRACE"},
{ASSIGN, "ASSIGN"},
{ADD, "ADD"},
{SUB, "SUB"},
}
for _, token := range tokens {
if token.tt.String() != token.str {
t.Errorf("want: %q got:%q\n", token.str, token.tt)
}
}
}
func TestTokenValue(t *testing.T) {
var tokens = []struct {
tt Token
v interface{}
}{
{Token{Type: BOOL, Text: `true`}, true},
{Token{Type: BOOL, Text: `false`}, false},
{Token{Type: FLOAT, Text: `3.14`}, float64(3.14)},
{Token{Type: NUMBER, Text: `42`}, int64(42)},
{Token{Type: IDENT, Text: `foo`}, "foo"},
{Token{Type: STRING, Text: `"foo"`}, "foo"},
{Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"},
{Token{Type: STRING, Text: `"${file(\"foo\")}"`}, `${file("foo")}`},
{Token{Type: HEREDOC, Text: "<<EOF\nfoo\nbar\nEOF"}, "foo\nbar"},
}
for _, token := range tokens {
if val := token.tt.Value(); !reflect.DeepEqual(val, token.v) {
t.Errorf("want: %v got:%v\n", token.v, val)
}
}
}

View file

@ -1,19 +0,0 @@
package hcl
import (
"io/ioutil"
"path/filepath"
"testing"
)
// This is the directory where our test fixtures are.
const fixtureDir = "./test-fixtures"
func testReadFile(t *testing.T, n string) string {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, n))
if err != nil {
t.Fatalf("err: %s", err)
}
return string(d)
}

View file

@ -48,6 +48,12 @@ func flattenListType(
item *ast.ObjectItem,
items []*ast.ObjectItem,
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
// If the list is empty, keep the original list
if len(ot.List) == 0 {
items = append(items, item)
return items, frontier
}
// All the elements of this object must also be objects!
for _, subitem := range ot.List {
if _, ok := subitem.(*ast.ObjectType); !ok {

View file

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/hashicorp/hcl/hcl/ast"
hcltoken "github.com/hashicorp/hcl/hcl/token"
"github.com/hashicorp/hcl/json/scanner"
"github.com/hashicorp/hcl/json/token"
)
@ -85,6 +86,7 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
break
}
}
return node, nil
}
@ -103,6 +105,14 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
switch p.tok.Type {
case token.COLON:
pos := p.tok.Pos
o.Assign = hcltoken.Pos{
Filename: pos.Filename,
Offset: pos.Offset,
Line: pos.Line,
Column: pos.Column,
}
o.Val, err = p.objectValue()
if err != nil {
return nil, err
@ -128,6 +138,12 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
Token: p.tok.HCLToken(),
})
case token.COLON:
// If we have a zero keycount it means that we never got
// an object key, i.e. `{ :`. This is a syntax error.
if keyCount == 0 {
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
}
// Done
return keys, nil
case token.ILLEGAL:

View file

@ -1,338 +0,0 @@
package parser
import (
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
func TestType(t *testing.T) {
var literals = []struct {
typ token.Type
src string
}{
{token.STRING, `"foo": "bar"`},
{token.NUMBER, `"foo": 123`},
{token.FLOAT, `"foo": 123.12`},
{token.FLOAT, `"foo": -123.12`},
{token.BOOL, `"foo": true`},
{token.STRING, `"foo": null`},
}
for _, l := range literals {
t.Logf("Testing: %s", l.src)
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
lit, ok := item.Val.(*ast.LiteralType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
if lit.Token.Type != l.typ {
t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
}
}
}
func TestListType(t *testing.T) {
var literals = []struct {
src string
tokens []token.Type
}{
{
`"foo": ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`"foo": [123, "123",]`,
[]token.Type{token.NUMBER, token.STRING},
},
{
`"foo": []`,
[]token.Type{},
},
{
`"foo": ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`"foo": ["123", {}]`,
[]token.Type{token.STRING, token.LBRACE},
},
}
for _, l := range literals {
t.Logf("Testing: %s", l.src)
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
tokens := []token.Type{}
for _, li := range list.List {
switch v := li.(type) {
case *ast.LiteralType:
tokens = append(tokens, v.Token.Type)
case *ast.ObjectType:
tokens = append(tokens, token.LBRACE)
}
}
equals(t, l.tokens, tokens)
}
}
func TestObjectType(t *testing.T) {
var literals = []struct {
src string
nodeType []ast.Node
itemLen int
}{
{
`"foo": {}`,
nil,
0,
},
{
`"foo": {
"bar": "fatih"
}`,
[]ast.Node{&ast.LiteralType{}},
1,
},
{
`"foo": {
"bar": "fatih",
"baz": ["arslan"]
}`,
[]ast.Node{
&ast.LiteralType{},
&ast.ListType{},
},
2,
},
{
`"foo": {
"bar": {}
}`,
[]ast.Node{
&ast.ObjectType{},
},
1,
},
{
`"foo": {
"bar": {},
"foo": true
}`,
[]ast.Node{
&ast.ObjectType{},
&ast.LiteralType{},
},
2,
},
}
for _, l := range literals {
t.Logf("Testing:\n%s\n", l.src)
p := newParser([]byte(l.src))
// p.enableTrace = true
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
// we know that the ObjectKey name is foo for all cases, what matters
// is the object
obj, ok := item.Val.(*ast.ObjectType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
// check if the total length of items are correct
equals(t, l.itemLen, len(obj.List.Items))
// check if the types are correct
for i, item := range obj.List.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
}
}
}
func TestFlattenObjects(t *testing.T) {
var literals = []struct {
src string
nodeType []ast.Node
itemLen int
}{
{
`{
"foo": [
{
"foo": "svh",
"bar": "fatih"
}
]
}`,
[]ast.Node{
&ast.ObjectType{},
&ast.LiteralType{},
&ast.LiteralType{},
},
3,
},
{
`{
"variable": {
"foo": {}
}
}`,
[]ast.Node{
&ast.ObjectType{},
},
1,
},
}
for _, l := range literals {
t.Logf("Testing:\n%s\n", l.src)
f, err := Parse([]byte(l.src))
if err != nil {
t.Error(err)
}
// the first object is always an ObjectList so just assert that one
// so we can use it as such
obj, ok := f.Node.(*ast.ObjectList)
if !ok {
t.Errorf("node should be *ast.ObjectList, got: %T", f.Node)
}
// check if the types are correct
var i int
for _, item := range obj.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
i++
if obj, ok := item.Val.(*ast.ObjectType); ok {
for _, item := range obj.List.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
i++
}
}
}
// check if the number of items is correct
equals(t, l.itemLen, i)
}
}
func TestObjectKey(t *testing.T) {
keys := []struct {
exp []token.Type
src string
}{
{[]token.Type{token.STRING}, `"foo": {}`},
}
for _, k := range keys {
p := newParser([]byte(k.src))
keys, err := p.objectKey()
if err != nil {
t.Fatal(err)
}
tokens := []token.Type{}
for _, o := range keys {
tokens = append(tokens, o.Token.Type)
}
equals(t, k.exp, tokens)
}
errKeys := []struct {
src string
}{
{`foo 12 {}`},
{`foo bar = {}`},
{`foo []`},
{`12 {}`},
}
for _, k := range errKeys {
p := newParser([]byte(k.src))
_, err := p.objectKey()
if err == nil {
t.Errorf("case '%s' should give an error", k.src)
}
}
}
// Official HCL tests
func TestParse(t *testing.T) {
cases := []struct {
Name string
Err bool
}{
{
"array.json",
false,
},
{
"basic.json",
false,
},
{
"object.json",
false,
},
{
"types.json",
false,
},
}
const fixtureDir = "./test-fixtures"
for _, tc := range cases {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
if err != nil {
t.Fatalf("err: %s", err)
}
_, err = Parse(d)
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
}
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View file

@ -1,4 +0,0 @@
{
"foo": [1, 2, "bar"],
"bar": "baz"
}

View file

@ -1,3 +0,0 @@
{
"foo": "bar"
}

View file

@ -1,5 +0,0 @@
{
"foo": {
"bar": [1,2]
}
}

View file

@ -1,10 +0,0 @@
{
"foo": "bar",
"bar": 7,
"baz": [1,2,3],
"foo": -12,
"bar": 3.14159,
"foo": true,
"bar": false,
"foo": null
}

View file

@ -296,7 +296,7 @@ func (s *Scanner) scanString() {
return
}
if ch == '"' && braces == 0 {
if ch == '"' {
break
}

View file

@ -1,363 +0,0 @@
package scanner
import (
"bytes"
"fmt"
"testing"
"github.com/hashicorp/hcl/json/token"
)
var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
type tokenPair struct {
tok token.Type
text string
}
var tokenLists = map[string][]tokenPair{
"operator": []tokenPair{
{token.LBRACK, "["},
{token.LBRACE, "{"},
{token.COMMA, ","},
{token.PERIOD, "."},
{token.RBRACK, "]"},
{token.RBRACE, "}"},
},
"bool": []tokenPair{
{token.BOOL, "true"},
{token.BOOL, "false"},
},
"string": []tokenPair{
{token.STRING, `" "`},
{token.STRING, `"a"`},
{token.STRING, `"本"`},
{token.STRING, `"${file("foo")}"`},
{token.STRING, `"${file(\"foo\")}"`},
{token.STRING, `"\a"`},
{token.STRING, `"\b"`},
{token.STRING, `"\f"`},
{token.STRING, `"\n"`},
{token.STRING, `"\r"`},
{token.STRING, `"\t"`},
{token.STRING, `"\v"`},
{token.STRING, `"\""`},
{token.STRING, `"\000"`},
{token.STRING, `"\777"`},
{token.STRING, `"\x00"`},
{token.STRING, `"\xff"`},
{token.STRING, `"\u0000"`},
{token.STRING, `"\ufA16"`},
{token.STRING, `"\U00000000"`},
{token.STRING, `"\U0000ffAB"`},
{token.STRING, `"` + f100 + `"`},
},
"number": []tokenPair{
{token.NUMBER, "0"},
{token.NUMBER, "1"},
{token.NUMBER, "9"},
{token.NUMBER, "42"},
{token.NUMBER, "1234567890"},
{token.NUMBER, "-0"},
{token.NUMBER, "-1"},
{token.NUMBER, "-9"},
{token.NUMBER, "-42"},
{token.NUMBER, "-1234567890"},
},
"float": []tokenPair{
{token.FLOAT, "0."},
{token.FLOAT, "1."},
{token.FLOAT, "42."},
{token.FLOAT, "01234567890."},
{token.FLOAT, ".0"},
{token.FLOAT, ".1"},
{token.FLOAT, ".42"},
{token.FLOAT, ".0123456789"},
{token.FLOAT, "0.0"},
{token.FLOAT, "1.0"},
{token.FLOAT, "42.0"},
{token.FLOAT, "01234567890.0"},
{token.FLOAT, "0e0"},
{token.FLOAT, "1e0"},
{token.FLOAT, "42e0"},
{token.FLOAT, "01234567890e0"},
{token.FLOAT, "0E0"},
{token.FLOAT, "1E0"},
{token.FLOAT, "42E0"},
{token.FLOAT, "01234567890E0"},
{token.FLOAT, "0e+10"},
{token.FLOAT, "1e-10"},
{token.FLOAT, "42e+10"},
{token.FLOAT, "01234567890e-10"},
{token.FLOAT, "0E+10"},
{token.FLOAT, "1E-10"},
{token.FLOAT, "42E+10"},
{token.FLOAT, "01234567890E-10"},
{token.FLOAT, "01.8e0"},
{token.FLOAT, "1.4e0"},
{token.FLOAT, "42.2e0"},
{token.FLOAT, "01234567890.12e0"},
{token.FLOAT, "0.E0"},
{token.FLOAT, "1.12E0"},
{token.FLOAT, "42.123E0"},
{token.FLOAT, "01234567890.213E0"},
{token.FLOAT, "0.2e+10"},
{token.FLOAT, "1.2e-10"},
{token.FLOAT, "42.54e+10"},
{token.FLOAT, "01234567890.98e-10"},
{token.FLOAT, "0.1E+10"},
{token.FLOAT, "1.1E-10"},
{token.FLOAT, "42.1E+10"},
{token.FLOAT, "01234567890.1E-10"},
{token.FLOAT, "-0.0"},
{token.FLOAT, "-1.0"},
{token.FLOAT, "-42.0"},
{token.FLOAT, "-01234567890.0"},
{token.FLOAT, "-0e0"},
{token.FLOAT, "-1e0"},
{token.FLOAT, "-42e0"},
{token.FLOAT, "-01234567890e0"},
{token.FLOAT, "-0E0"},
{token.FLOAT, "-1E0"},
{token.FLOAT, "-42E0"},
{token.FLOAT, "-01234567890E0"},
{token.FLOAT, "-0e+10"},
{token.FLOAT, "-1e-10"},
{token.FLOAT, "-42e+10"},
{token.FLOAT, "-01234567890e-10"},
{token.FLOAT, "-0E+10"},
{token.FLOAT, "-1E-10"},
{token.FLOAT, "-42E+10"},
{token.FLOAT, "-01234567890E-10"},
{token.FLOAT, "-01.8e0"},
{token.FLOAT, "-1.4e0"},
{token.FLOAT, "-42.2e0"},
{token.FLOAT, "-01234567890.12e0"},
{token.FLOAT, "-0.E0"},
{token.FLOAT, "-1.12E0"},
{token.FLOAT, "-42.123E0"},
{token.FLOAT, "-01234567890.213E0"},
{token.FLOAT, "-0.2e+10"},
{token.FLOAT, "-1.2e-10"},
{token.FLOAT, "-42.54e+10"},
{token.FLOAT, "-01234567890.98e-10"},
{token.FLOAT, "-0.1E+10"},
{token.FLOAT, "-1.1E-10"},
{token.FLOAT, "-42.1E+10"},
{token.FLOAT, "-01234567890.1E-10"},
},
}
var orderedTokenLists = []string{
"comment",
"operator",
"bool",
"string",
"number",
"float",
}
func TestPosition(t *testing.T) {
// create artifical source code
buf := new(bytes.Buffer)
for _, listName := range orderedTokenLists {
for _, ident := range tokenLists[listName] {
fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
}
}
s := New(buf.Bytes())
pos := token.Pos{"", 4, 1, 5}
s.Scan()
for _, listName := range orderedTokenLists {
for _, k := range tokenLists[listName] {
curPos := s.tokPos
// fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
if curPos.Offset != pos.Offset {
t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
}
if curPos.Line != pos.Line {
t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
}
if curPos.Column != pos.Column {
t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
}
pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
pos.Line += countNewlines(k.text) + 1 // each token is on a new line
s.Error = func(pos token.Pos, msg string) {
t.Errorf("error %q for %q", msg, k.text)
}
s.Scan()
}
}
// make sure there were no token-internal errors reported by scanner
if s.ErrorCount != 0 {
t.Errorf("%d errors", s.ErrorCount)
}
}
func TestComment(t *testing.T) {
testTokenList(t, tokenLists["comment"])
}
func TestOperator(t *testing.T) {
testTokenList(t, tokenLists["operator"])
}
func TestBool(t *testing.T) {
testTokenList(t, tokenLists["bool"])
}
func TestIdent(t *testing.T) {
testTokenList(t, tokenLists["ident"])
}
func TestString(t *testing.T) {
testTokenList(t, tokenLists["string"])
}
func TestNumber(t *testing.T) {
testTokenList(t, tokenLists["number"])
}
func TestFloat(t *testing.T) {
testTokenList(t, tokenLists["float"])
}
func TestRealExample(t *testing.T) {
complexReal := `
{
"variable": {
"foo": {
"default": "bar",
"description": "bar",
"depends_on": ["something"]
}
}
}`
literals := []struct {
tokenType token.Type
literal string
}{
{token.LBRACE, `{`},
{token.STRING, `"variable"`},
{token.COLON, `:`},
{token.LBRACE, `{`},
{token.STRING, `"foo"`},
{token.COLON, `:`},
{token.LBRACE, `{`},
{token.STRING, `"default"`},
{token.COLON, `:`},
{token.STRING, `"bar"`},
{token.COMMA, `,`},
{token.STRING, `"description"`},
{token.COLON, `:`},
{token.STRING, `"bar"`},
{token.COMMA, `,`},
{token.STRING, `"depends_on"`},
{token.COLON, `:`},
{token.LBRACK, `[`},
{token.STRING, `"something"`},
{token.RBRACK, `]`},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.EOF, ``},
}
s := New([]byte(complexReal))
for _, l := range literals {
tok := s.Scan()
if l.tokenType != tok.Type {
t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
}
if l.literal != tok.Text {
t.Errorf("got: %s want %s\n", tok, l.literal)
}
}
}
func TestError(t *testing.T) {
testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER)
testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER)
testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL)
testError(t, `"`, "1:2", "literal not terminated", token.STRING)
testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
}
func testError(t *testing.T, src, pos, msg string, tok token.Type) {
s := New([]byte(src))
errorCalled := false
s.Error = func(p token.Pos, m string) {
if !errorCalled {
if pos != p.String() {
t.Errorf("pos = %q, want %q for %q", p, pos, src)
}
if m != msg {
t.Errorf("msg = %q, want %q for %q", m, msg, src)
}
errorCalled = true
}
}
tk := s.Scan()
if tk.Type != tok {
t.Errorf("tok = %s, want %s for %q", tk, tok, src)
}
if !errorCalled {
t.Errorf("error handler not called for %q", src)
}
if s.ErrorCount == 0 {
t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
}
}
func testTokenList(t *testing.T, tokenList []tokenPair) {
// create artifical source code
buf := new(bytes.Buffer)
for _, ident := range tokenList {
fmt.Fprintf(buf, "%s\n", ident.text)
}
s := New(buf.Bytes())
for _, ident := range tokenList {
tok := s.Scan()
if tok.Type != ident.tok {
t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
}
if tok.Text != ident.text {
t.Errorf("text = %q want %q", tok.String(), ident.text)
}
}
}
func countNewlines(s string) int {
n := 0
for _, ch := range s {
if ch == '\n' {
n++
}
}
return n
}

View file

@ -1,4 +0,0 @@
{
"foo": [1, 2, "bar"],
"bar": "baz"
}

View file

@ -1,3 +0,0 @@
{
"foo": "bar"
}

View file

@ -1,5 +0,0 @@
{
"foo": {
"bar": [1,2]
}
}

View file

@ -1,10 +0,0 @@
{
"foo": "bar",
"bar": 7,
"baz": [1,2,3],
"foo": -12,
"bar": 3.14159,
"foo": true,
"bar": false,
"foo": null
}

View file

@ -1,34 +0,0 @@
package token
import (
"testing"
)
func TestTypeString(t *testing.T) {
var tokens = []struct {
tt Type
str string
}{
{ILLEGAL, "ILLEGAL"},
{EOF, "EOF"},
{NUMBER, "NUMBER"},
{FLOAT, "FLOAT"},
{BOOL, "BOOL"},
{STRING, "STRING"},
{NULL, "NULL"},
{LBRACK, "LBRACK"},
{LBRACE, "LBRACE"},
{COMMA, "COMMA"},
{PERIOD, "PERIOD"},
{RBRACK, "RBRACK"},
{RBRACE, "RBRACE"},
}
for _, token := range tokens {
if token.tt.String() != token.str {
t.Errorf("want: %q got:%q\n", token.str, token.tt)
}
}
}

View file

@ -2,6 +2,7 @@ package hcl
import (
"unicode"
"unicode/utf8"
)
type lexModeValue byte
@ -14,17 +15,23 @@ const (
// lexMode returns whether we're going to be parsing in JSON
// mode or HCL mode.
func lexMode(v string) lexModeValue {
for _, r := range v {
func lexMode(v []byte) lexModeValue {
var (
r rune
w int
offset int
)
for {
r, w = utf8.DecodeRune(v[offset:])
offset += w
if unicode.IsSpace(r) {
continue
}
if r == '{' {
return lexModeJson
} else {
return lexModeHcl
}
break
}
return lexModeHcl

View file

@ -1,37 +0,0 @@
package hcl
import (
"testing"
)
func TestLexMode(t *testing.T) {
cases := []struct {
Input string
Mode lexModeValue
}{
{
"",
lexModeHcl,
},
{
"foo",
lexModeHcl,
},
{
"{}",
lexModeJson,
},
{
" {}",
lexModeJson,
},
}
for i, tc := range cases {
actual := lexMode(tc.Input)
if actual != tc.Mode {
t.Fatalf("%d: %#v", i, actual)
}
}
}

View file

@ -8,16 +8,32 @@ import (
jsonParser "github.com/hashicorp/hcl/json/parser"
)
// Parse parses the given input and returns the root object.
// ParseBytes accepts as input byte slice and returns ast tree.
//
// The input format can be either HCL or JSON.
func Parse(input string) (*ast.File, error) {
switch lexMode(input) {
// Input can be either JSON or HCL
func ParseBytes(in []byte) (*ast.File, error) {
return parse(in)
}
// ParseString accepts input as a string and returns ast tree.
func ParseString(input string) (*ast.File, error) {
return parse([]byte(input))
}
func parse(in []byte) (*ast.File, error) {
switch lexMode(in) {
case lexModeHcl:
return hclParser.Parse([]byte(input))
return hclParser.Parse(in)
case lexModeJson:
return jsonParser.Parse([]byte(input))
return jsonParser.Parse(in)
}
return nil, fmt.Errorf("unknown config format")
}
// Parse parses the given input and returns the root object.
//
// The input format can be either HCL or JSON.
func Parse(input string) (*ast.File, error) {
return parse([]byte(input))
}

View file

@ -1,2 +0,0 @@
foo = "bar"
bar = "${file("bing/bong.txt")}"

View file

@ -1,4 +0,0 @@
{
"foo": "bar",
"bar": "${file(\"bing/bong.txt\")}"
}

View file

@ -1 +0,0 @@
count = "3"

View file

@ -1,3 +0,0 @@
foo="bar"
bar="${file("bing/bong.txt")}"
foo-bar="baz"

View file

@ -1,15 +0,0 @@
key "" {
policy = "read"
}
key "foo/" {
policy = "write"
}
key "foo/bar/" {
policy = "read"
}
key "foo/bar/baz" {
policy = "deny"
}

View file

@ -1,19 +0,0 @@
{
"key": {
"": {
"policy": "read"
},
"foo/": {
"policy": "write"
},
"foo/bar/": {
"policy": "read"
},
"foo/bar/baz": {
"policy": "deny"
}
}
}

View file

@ -1,10 +0,0 @@
variable "foo" {
default = "bar"
description = "bar"
}
variable "amis" {
default = {
east = "foo"
}
}

View file

@ -1,14 +0,0 @@
{
"variable": {
"foo": {
"default": "bar",
"description": "bar"
},
"amis": {
"default": {
"east": "foo"
}
}
}
}

View file

@ -1 +0,0 @@
resource "foo" {}

View file

@ -1 +0,0 @@
foo = "bar\"baz\\n"

View file

@ -1,2 +0,0 @@
foo = "bar"
Key = 7

View file

@ -1 +0,0 @@
a = 1.02

Some files were not shown because too many files have changed in this diff Show more