Switch from Godep to go vendoring
This commit is contained in:
parent
6b37713bc0
commit
cd317761c5
@ -8,13 +8,14 @@ go:
|
|||||||
install:
|
install:
|
||||||
- go get github.com/jteeuwen/go-bindata/...
|
- go get github.com/jteeuwen/go-bindata/...
|
||||||
- cd client
|
- cd client
|
||||||
- nvm install 5.0
|
- nvm install 4.3.1
|
||||||
- nvm use 5.0
|
- nvm use 4.3.1
|
||||||
- npm install -g gulp
|
- npm install -g gulp
|
||||||
- npm install
|
- npm install
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- gulp build
|
- gulp build
|
||||||
- cd ..
|
- cd ..
|
||||||
- go vet ./...
|
- export GO15VENDOREXPERIMENT=1
|
||||||
- go test -v -race ./...
|
- go vet $(go list ./... | grep -v '/vendor/')
|
||||||
|
- go test -v -race $(go list ./... | grep -v '/vendor/')
|
||||||
|
167
Godeps/Godeps.json
generated
167
Godeps/Godeps.json
generated
@ -1,167 +0,0 @@
|
|||||||
{
|
|
||||||
"ImportPath": "github.com/khlieng/dispatch",
|
|
||||||
"GoVersion": "go1.5.3",
|
|
||||||
"Packages": [
|
|
||||||
"./..."
|
|
||||||
],
|
|
||||||
"Deps": [
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/BurntSushi/toml",
|
|
||||||
"Comment": "v0.1.0-21-g056c9bc",
|
|
||||||
"Rev": "056c9bc7be7190eaa7715723883caffa5f8fa3e4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/blevesearch/bleve",
|
|
||||||
"Rev": "16f538d7b76dd85c935a3104c390307cae5cbf79"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/blevesearch/go-porterstemmer",
|
|
||||||
"Comment": "v1.0.1-9-g23a2c8e",
|
|
||||||
"Rev": "23a2c8e5cf1f380f27722c6d2ae8896431dc7d0e"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/blevesearch/segment",
|
|
||||||
"Rev": "9588637ce3caba8516208ccc17193ddedd741418"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/boltdb/bolt",
|
|
||||||
"Comment": "v1.1.0-61-g6465994",
|
|
||||||
"Rev": "6465994716bf6400605746e79224cf1e7ed68725"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/cznic/b",
|
|
||||||
"Rev": "c4adf3a58579a2d57cd3097f455dcdf75edcdfd8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/dgrijalva/jwt-go",
|
|
||||||
"Comment": "v2.4.0-4-gafef698",
|
|
||||||
"Rev": "afef698c326bfd906b11659432544e5aae441d44"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
|
||||||
"Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gorilla/websocket",
|
|
||||||
"Rev": "3986be78bf859e01f01af631ad76da5b269d270c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/hashicorp/hcl",
|
|
||||||
"Rev": "197e8d3cf42199cfd53cd775deb37f3637234635"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/jpillora/backoff",
|
|
||||||
"Rev": "2ff7c4694083b5dbd71b21fd7cb7577477a74b31"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/kr/pretty",
|
|
||||||
"Comment": "go.weekly.2011-12-22-27-ge6ac2fc",
|
|
||||||
"Rev": "e6ac2fc51e89a3249e82157fa0bb7a18ef9dd5bb"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/kr/text",
|
|
||||||
"Rev": "e373e137fafd8abd480af49182dea0513914adb4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/magiconair/properties",
|
|
||||||
"Comment": "v1.5.2",
|
|
||||||
"Rev": "d5929c67198951106f49f7ea425198d0f1a08f7f"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/matryer/resync",
|
|
||||||
"Rev": "3d7f7ed881e1fcb5d89be33f3eb4717ed379e7b1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
|
||||||
"Rev": "d682a8f0cf139663a984ff12528da460ca963de9"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
|
||||||
"Rev": "f7d28d5aeab42b9b95d2e6d6b956f73a290077fc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/ryszard/goskiplist/skiplist",
|
|
||||||
"Rev": "2dfbae5fcf46374f166f8969cb07e167f1be6273"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/cast",
|
|
||||||
"Rev": "ee7b3e0353166ab1f3a605294ac8cd2b77953778"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/cobra",
|
|
||||||
"Rev": "9c9300901990faada0c5fb3b5730f452585c7c2b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/jwalterweatherman",
|
|
||||||
"Rev": "3d60171a64319ef63c78bd45bd60e6eab1e75f8b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/pflag",
|
|
||||||
"Rev": "7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/viper",
|
|
||||||
"Rev": "a212099cbe6fbe8d07476bfda8d2d39b6ff8f325"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/square/go-jose",
|
|
||||||
"Rev": "37934a899dd03635373fd1e143936d32cfe48d31"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/steveyen/gtreap",
|
|
||||||
"Rev": "72cd76f34c91f8d64a031af97b499e4a0b1a6e0c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/stretchr/testify/assert",
|
|
||||||
"Comment": "v1.0-12-g7e4a149",
|
|
||||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
|
||||||
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
|
||||||
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/willf/bitset",
|
|
||||||
"Comment": "v1.0.0-17-g4b22041",
|
|
||||||
"Rev": "4b220417a489359f934045d0509d941a7a2a1038"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/xenolf/lego/acme",
|
|
||||||
"Comment": "v0.2.0-6-gdb3a956",
|
|
||||||
"Rev": "db3a956d52bf23cc5201fe98bc9c9787d3b32c2d"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/crypto/ocsp",
|
|
||||||
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/crypto/sha3",
|
|
||||||
"Rev": "644910e6da851dcd66a424c71d068d971cfacba5"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
|
||||||
"Rev": "f1d3149ecb40ffadf4a28d39a30f9a125fe57bdf"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
|
||||||
"Rev": "c92eb3cd6e70951a111680995e651ea4b2c35539"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
|
||||||
"Rev": "c92eb3cd6e70951a111680995e651ea4b2c35539"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "gopkg.in/fsnotify.v1",
|
|
||||||
"Comment": "v1.2.9",
|
|
||||||
"Rev": "8611c35ab31c1c28aa903d33cf8b6e44a399b09e"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "gopkg.in/yaml.v2",
|
|
||||||
"Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
5
Godeps/Readme
generated
5
Godeps/Readme
generated
@ -1,5 +0,0 @@
|
|||||||
This directory tree is generated automatically by godep.
|
|
||||||
|
|
||||||
Please do not edit.
|
|
||||||
|
|
||||||
See https://github.com/tools/godep for more information.
|
|
2
Godeps/_workspace/.gitignore
generated
vendored
2
Godeps/_workspace/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
/pkg
|
|
||||||
/bin
|
|
5
Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore
generated
vendored
5
Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
TAGS
|
|
||||||
tags
|
|
||||||
.*.swp
|
|
||||||
tomlcheck/tomlcheck
|
|
||||||
toml.test
|
|
12
Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml
generated
vendored
12
Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.1
|
|
||||||
- 1.2
|
|
||||||
- tip
|
|
||||||
install:
|
|
||||||
- go install ./...
|
|
||||||
- go get github.com/BurntSushi/toml-test
|
|
||||||
script:
|
|
||||||
- export PATH="$PATH:$HOME/gopath/bin"
|
|
||||||
- make test
|
|
||||||
|
|
17
Godeps/_workspace/src/github.com/blevesearch/bleve/.gitignore
generated
vendored
17
Godeps/_workspace/src/github.com/blevesearch/bleve/.gitignore
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
#*
|
|
||||||
*.sublime-*
|
|
||||||
*~
|
|
||||||
.#*
|
|
||||||
.project
|
|
||||||
.settings
|
|
||||||
.DS_Store
|
|
||||||
/analysis/token_filters/cld2/cld2-read-only
|
|
||||||
/analysis/token_filters/cld2/libcld2_full.a
|
|
||||||
/utils/bleve_create/bleve_create
|
|
||||||
/utils/bleve_dump/bleve_dump
|
|
||||||
/utils/bleve_index/bleve_index
|
|
||||||
/utils/bleve_bulkindex/bleve_bulkindex
|
|
||||||
/utils/bleve_index/index.bleve/
|
|
||||||
/utils/bleve_query/bleve_query
|
|
||||||
/utils/bleve_registry/bleve_registry
|
|
||||||
/y.output
|
|
19
Godeps/_workspace/src/github.com/blevesearch/bleve/.travis.yml
generated
vendored
19
Godeps/_workspace/src/github.com/blevesearch/bleve/.travis.yml
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go get golang.org/x/tools/cmd/vet
|
|
||||||
- go get golang.org/x/tools/cmd/cover
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
- go get github.com/kisielk/errcheck
|
|
||||||
- go test -v ./...
|
|
||||||
- go vet ./...
|
|
||||||
- errcheck ./...
|
|
||||||
- docs/project-code-coverage.sh
|
|
||||||
- docs/build_children.sh
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
- marty.schoch@gmail.com
|
|
167
Godeps/_workspace/src/github.com/blevesearch/bleve/config.go
generated
vendored
167
Godeps/_workspace/src/github.com/blevesearch/bleve/config.go
generated
vendored
@ -1,167 +0,0 @@
|
|||||||
// Copyright (c) 2014 Couchbase, Inc.
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
|
||||||
// except in compliance with the License. You may obtain a copy of the License at
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
|
||||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
||||||
// either express or implied. See the License for the specific language governing permissions
|
|
||||||
// and limitations under the License.
|
|
||||||
|
|
||||||
package bleve
|
|
||||||
|
|
||||||
import (
|
|
||||||
"expvar"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/analyzers/custom_analyzer"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/analyzers/keyword_analyzer"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/analyzers/simple_analyzer"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/analyzers/standard_analyzer"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/byte_array_converters/ignore" // token filters
|
|
||||||
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/byte_array_converters/json"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/byte_array_converters/string"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/char_filters/html_char_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/char_filters/regexp_char_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/char_filters/zero_width_non_joiner"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/datetime_parsers/datetime_optional" // analyzers
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/datetime_parsers/flexible_go"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/ar" // languages
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/bg"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/ca"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/cjk"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/ckb"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/cs"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/da"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/de"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/el"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/en"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/es"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/eu"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/fa"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/fi"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/fr"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/ga"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/gl"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/hi"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/hu"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/hy"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/id"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/in"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/it"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/nl"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/no"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/pt"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/ro"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/ru"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/sv"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/th"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/language/tr"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/apostrophe_filter" // kv stores
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/compound"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/edge_ngram_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/elision_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/keyword_marker_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/length_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/lower_case_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/ngram_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/shingle"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/stop_tokens_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/truncate_token_filter"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_filters/unicode_normalize"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/token_map" // tokenizers
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/tokenizers/exception" // fragment formatters
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/tokenizers/regexp_tokenizer"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/tokenizers/single_token"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/tokenizers/unicode"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/analysis/tokenizers/whitespace_tokenizer"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/index/store/boltdb" // date time parsers
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/index/store/goleveldb"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/index/store/gtreap"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/index/store/inmem"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/index/upside_down" // byte array converters
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/registry"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/search/highlight/fragment_formatters/ansi"
|
|
||||||
|
|
||||||
// token maps
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/search/highlight/fragment_formatters/html"
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/search/highlight/fragmenters/simple" // fragmenters
|
|
||||||
_ "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve/search/highlight/highlighters/simple" // highlighters
|
|
||||||
)
|
|
||||||
|
|
||||||
var bleveExpVar = expvar.NewMap("bleve")
|
|
||||||
|
|
||||||
type configuration struct // char filters
|
|
||||||
{
|
|
||||||
Cache *registry.Cache
|
|
||||||
DefaultHighlighter string
|
|
||||||
DefaultKVStore string
|
|
||||||
SlowSearchLogThreshold time.Duration
|
|
||||||
analysisQueue *upside_down.AnalysisQueue
|
|
||||||
}
|
|
||||||
|
|
||||||
func newConfiguration() *configuration {
|
|
||||||
return &configuration{
|
|
||||||
Cache: registry.NewCache(),
|
|
||||||
analysisQueue: upside_down.NewAnalysisQueue(4),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config contains library level configuration
|
|
||||||
var Config *configuration
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bootStart := time.Now()
|
|
||||||
|
|
||||||
// build the default configuration
|
|
||||||
Config = newConfiguration()
|
|
||||||
|
|
||||||
_, err := Config.Cache.DefineFragmentFormatter("highlightSpanHTML",
|
|
||||||
map[string]interface{}{
|
|
||||||
"type": "html",
|
|
||||||
"before": `<span class="highlight">`,
|
|
||||||
"after": `</span>`,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = Config.Cache.DefineHighlighter("html",
|
|
||||||
map[string]interface{}{
|
|
||||||
"type": "simple",
|
|
||||||
"fragmenter": "simple",
|
|
||||||
"formatter": "highlightSpanHTML",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = Config.Cache.DefineHighlighter("ansi",
|
|
||||||
map[string]interface{}{
|
|
||||||
"type": "simple",
|
|
||||||
"fragmenter": "simple",
|
|
||||||
"formatter": "ansi",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set the default highlighter
|
|
||||||
Config.DefaultHighlighter = "html"
|
|
||||||
|
|
||||||
// default kv store
|
|
||||||
Config.DefaultKVStore = "boltdb"
|
|
||||||
|
|
||||||
bootDuration := time.Since(bootStart)
|
|
||||||
bleveExpVar.Add("bootDuration", int64(bootDuration))
|
|
||||||
}
|
|
||||||
|
|
||||||
var logger = log.New(ioutil.Discard, "bleve", log.LstdFlags)
|
|
||||||
|
|
||||||
// SetLog sets the logger used for logging
|
|
||||||
// by default log messages are sent to ioutil.Discard
|
|
||||||
func SetLog(l *log.Logger) {
|
|
||||||
logger = l
|
|
||||||
}
|
|
8
Godeps/_workspace/src/github.com/blevesearch/go-porterstemmer/.gitignore
generated
vendored
8
Godeps/_workspace/src/github.com/blevesearch/go-porterstemmer/.gitignore
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
#*
|
|
||||||
*.sublime-*
|
|
||||||
*~
|
|
||||||
.#*
|
|
||||||
.project
|
|
||||||
.settings
|
|
||||||
.DS_Store
|
|
||||||
/testdata
|
|
16
Godeps/_workspace/src/github.com/blevesearch/go-porterstemmer/.travis.yml
generated
vendored
16
Godeps/_workspace/src/github.com/blevesearch/go-porterstemmer/.travis.yml
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go get golang.org/x/tools/cmd/vet
|
|
||||||
- go get golang.org/x/tools/cmd/cover
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
- go test -v -covermode=count -coverprofile=profile.out
|
|
||||||
- go vet
|
|
||||||
- goveralls -service drone.io -coverprofile=profile.out -repotoken $COVERALLS
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
- marty.schoch@gmail.com
|
|
9
Godeps/_workspace/src/github.com/blevesearch/segment/.gitignore
generated
vendored
9
Godeps/_workspace/src/github.com/blevesearch/segment/.gitignore
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
#*
|
|
||||||
*.sublime-*
|
|
||||||
*~
|
|
||||||
.#*
|
|
||||||
.project
|
|
||||||
.settings
|
|
||||||
.DS_Store
|
|
||||||
/maketables
|
|
||||||
/maketesttables
|
|
16
Godeps/_workspace/src/github.com/blevesearch/segment/.travis.yml
generated
vendored
16
Godeps/_workspace/src/github.com/blevesearch/segment/.travis.yml
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go get golang.org/x/tools/cmd/vet
|
|
||||||
- go get golang.org/x/tools/cmd/cover
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
- go test -v -covermode=count -coverprofile=profile.out
|
|
||||||
- go vet
|
|
||||||
- goveralls -service drone.io -coverprofile=profile.out -repotoken $COVERALLS
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
- marty.schoch@gmail.com
|
|
9
Godeps/_workspace/src/github.com/blevesearch/segment/Makefile
generated
vendored
9
Godeps/_workspace/src/github.com/blevesearch/segment/Makefile
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
maketables: maketables.go maketesttables.go
|
|
||||||
go build maketables.go
|
|
||||||
go build maketesttables.go
|
|
||||||
|
|
||||||
tables: maketables
|
|
||||||
./maketables > tables.go
|
|
||||||
gofmt -w tables.go
|
|
||||||
./maketesttables > tables_test.go
|
|
||||||
gofmt -w tables_test.go
|
|
279
Godeps/_workspace/src/github.com/blevesearch/segment/maketables.go
generated
vendored
279
Godeps/_workspace/src/github.com/blevesearch/segment/maketables.go
generated
vendored
@ -1,279 +0,0 @@
|
|||||||
// Copyright (c) 2014 Couchbase, Inc.
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
|
||||||
// except in compliance with the License. You may obtain a copy of the License at
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
|
||||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
||||||
// either express or implied. See the License for the specific language governing permissions
|
|
||||||
// and limitations under the License.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var url = flag.String("url",
|
|
||||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
|
|
||||||
"URL of Unicode database directory")
|
|
||||||
var verbose = flag.Bool("verbose",
|
|
||||||
false,
|
|
||||||
"write data to stdout as it is parsed")
|
|
||||||
var localFiles = flag.Bool("local",
|
|
||||||
false,
|
|
||||||
"data files have been copied to the current directory; for debugging only")
|
|
||||||
var outputFile = flag.String("output",
|
|
||||||
"",
|
|
||||||
"output file for generated tables; default stdout")
|
|
||||||
|
|
||||||
var output *bufio.Writer
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
setupOutput()
|
|
||||||
|
|
||||||
graphemePropertyRanges := make(map[string]*unicode.RangeTable)
|
|
||||||
loadUnicodeData("GraphemeBreakProperty.txt", graphemePropertyRanges)
|
|
||||||
wordPropertyRanges := make(map[string]*unicode.RangeTable)
|
|
||||||
loadUnicodeData("WordBreakProperty.txt", wordPropertyRanges)
|
|
||||||
sentencePropertyRanges := make(map[string]*unicode.RangeTable)
|
|
||||||
loadUnicodeData("SentenceBreakProperty.txt", sentencePropertyRanges)
|
|
||||||
|
|
||||||
fmt.Fprintf(output, fileHeader, *url)
|
|
||||||
generateTables("Grapheme", graphemePropertyRanges)
|
|
||||||
generateTables("Word", wordPropertyRanges)
|
|
||||||
generateTables("Sentence", sentencePropertyRanges)
|
|
||||||
|
|
||||||
flushOutput()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WordBreakProperty.txt has the form:
|
|
||||||
// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
|
|
||||||
// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
|
|
||||||
func openReader(file string) (input io.ReadCloser) {
|
|
||||||
if *localFiles {
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
input = f
|
|
||||||
} else {
|
|
||||||
path := *url + file
|
|
||||||
resp, err := http.Get(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
log.Fatal("bad GET status for "+file, resp.Status)
|
|
||||||
}
|
|
||||||
input = resp.Body
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadUnicodeData(filename string, propertyRanges map[string]*unicode.RangeTable) {
|
|
||||||
f := openReader(filename)
|
|
||||||
defer f.Close()
|
|
||||||
bufioReader := bufio.NewReader(f)
|
|
||||||
line, err := bufioReader.ReadString('\n')
|
|
||||||
for err == nil {
|
|
||||||
parseLine(line, propertyRanges)
|
|
||||||
line, err = bufioReader.ReadString('\n')
|
|
||||||
}
|
|
||||||
// if the err was EOF still need to process last value
|
|
||||||
if err == io.EOF {
|
|
||||||
parseLine(line, propertyRanges)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const comment = "#"
|
|
||||||
const sep = ";"
|
|
||||||
const rnge = ".."
|
|
||||||
|
|
||||||
func parseLine(line string, propertyRanges map[string]*unicode.RangeTable) {
|
|
||||||
if strings.HasPrefix(line, comment) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if len(line) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
commentStart := strings.Index(line, comment)
|
|
||||||
if commentStart > 0 {
|
|
||||||
line = line[0:commentStart]
|
|
||||||
}
|
|
||||||
pieces := strings.Split(line, sep)
|
|
||||||
if len(pieces) != 2 {
|
|
||||||
log.Printf("unexpected %d pieces in %s", len(pieces), line)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
propertyName := strings.TrimSpace(pieces[1])
|
|
||||||
|
|
||||||
rangeTable, ok := propertyRanges[propertyName]
|
|
||||||
if !ok {
|
|
||||||
rangeTable = &unicode.RangeTable{
|
|
||||||
LatinOffset: 0,
|
|
||||||
}
|
|
||||||
propertyRanges[propertyName] = rangeTable
|
|
||||||
}
|
|
||||||
|
|
||||||
codepointRange := strings.TrimSpace(pieces[0])
|
|
||||||
rngeIndex := strings.Index(codepointRange, rnge)
|
|
||||||
|
|
||||||
if rngeIndex < 0 {
|
|
||||||
// single codepoint, not range
|
|
||||||
codepointInt, err := strconv.ParseUint(codepointRange, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error parsing int: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if codepointInt < 0x10000 {
|
|
||||||
r16 := unicode.Range16{
|
|
||||||
Lo: uint16(codepointInt),
|
|
||||||
Hi: uint16(codepointInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR16ToTable(rangeTable, r16)
|
|
||||||
} else {
|
|
||||||
r32 := unicode.Range32{
|
|
||||||
Lo: uint32(codepointInt),
|
|
||||||
Hi: uint32(codepointInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR32ToTable(rangeTable, r32)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
rngeStart := codepointRange[0:rngeIndex]
|
|
||||||
rngeEnd := codepointRange[rngeIndex+2:]
|
|
||||||
rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error parsing int: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error parsing int: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 {
|
|
||||||
r16 := unicode.Range16{
|
|
||||||
Lo: uint16(rngeStartInt),
|
|
||||||
Hi: uint16(rngeEndInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR16ToTable(rangeTable, r16)
|
|
||||||
} else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 {
|
|
||||||
r32 := unicode.Range32{
|
|
||||||
Lo: uint32(rngeStartInt),
|
|
||||||
Hi: uint32(rngeEndInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR32ToTable(rangeTable, r32)
|
|
||||||
} else {
|
|
||||||
log.Printf("unexpected range")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) {
|
|
||||||
if r.R16 == nil {
|
|
||||||
r.R16 = make([]unicode.Range16, 0, 1)
|
|
||||||
}
|
|
||||||
r.R16 = append(r.R16, r16)
|
|
||||||
if r16.Hi <= unicode.MaxLatin1 {
|
|
||||||
r.LatinOffset++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) {
|
|
||||||
if r.R32 == nil {
|
|
||||||
r.R32 = make([]unicode.Range32, 0, 1)
|
|
||||||
}
|
|
||||||
r.R32 = append(r.R32, r32)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) {
|
|
||||||
for key, rt := range propertyRanges {
|
|
||||||
fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateRangeTable(rt *unicode.RangeTable) string {
|
|
||||||
rv := "&unicode.RangeTable{\n"
|
|
||||||
if rt.R16 != nil {
|
|
||||||
rv += "\tR16: []unicode.Range16{\n"
|
|
||||||
for _, r16 := range rt.R16 {
|
|
||||||
rv += fmt.Sprintf("\t\t%#v,\n", r16)
|
|
||||||
}
|
|
||||||
rv += "\t},\n"
|
|
||||||
}
|
|
||||||
if rt.R32 != nil {
|
|
||||||
rv += "\tR32: []unicode.Range32{\n"
|
|
||||||
for _, r32 := range rt.R32 {
|
|
||||||
rv += fmt.Sprintf("\t\t%#v,\n", r32)
|
|
||||||
}
|
|
||||||
rv += "\t},\n"
|
|
||||||
}
|
|
||||||
rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset)
|
|
||||||
rv += "}\n"
|
|
||||||
return rv
|
|
||||||
}
|
|
||||||
|
|
||||||
const fileHeader = `// Generated by running
|
|
||||||
// maketables --url=%s
|
|
||||||
// DO NOT EDIT
|
|
||||||
|
|
||||||
package segment
|
|
||||||
|
|
||||||
import(
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
func setupOutput() {
|
|
||||||
output = bufio.NewWriter(startGofmt())
|
|
||||||
}
|
|
||||||
|
|
||||||
// startGofmt connects output to a gofmt process if -output is set.
|
|
||||||
func startGofmt() io.Writer {
|
|
||||||
if *outputFile == "" {
|
|
||||||
return os.Stdout
|
|
||||||
}
|
|
||||||
stdout, err := os.Create(*outputFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Pipe output to gofmt.
|
|
||||||
gofmt := exec.Command("gofmt")
|
|
||||||
fd, err := gofmt.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
gofmt.Stdout = stdout
|
|
||||||
gofmt.Stderr = os.Stderr
|
|
||||||
err = gofmt.Start()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return fd
|
|
||||||
}
|
|
||||||
|
|
||||||
func flushOutput() {
|
|
||||||
err := output.Flush()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
326
Godeps/_workspace/src/github.com/blevesearch/segment/segment_words.go
generated
vendored
326
Godeps/_workspace/src/github.com/blevesearch/segment/segment_words.go
generated
vendored
@ -1,326 +0,0 @@
|
|||||||
// Copyright (c) 2014 Couchbase, Inc.
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
|
||||||
// except in compliance with the License. You may obtain a copy of the License at
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
|
||||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
||||||
// either express or implied. See the License for the specific language governing permissions
|
|
||||||
// and limitations under the License.
|
|
||||||
|
|
||||||
package segment
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewWordSegmenter returns a new Segmenter to read from r.
|
|
||||||
func NewWordSegmenter(r io.Reader) *Segmenter {
|
|
||||||
return NewSegmenter(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWordSegmenterDirect returns a new Segmenter to work directly with buf.
|
|
||||||
func NewWordSegmenterDirect(buf []byte) *Segmenter {
|
|
||||||
return NewSegmenterDirect(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
wordCR = iota
|
|
||||||
wordLF
|
|
||||||
wordNewline
|
|
||||||
wordExtend
|
|
||||||
wordRegional_Indicator
|
|
||||||
wordFormat
|
|
||||||
wordKatakana
|
|
||||||
wordHebrew_Letter
|
|
||||||
wordALetter
|
|
||||||
wordSingle_Quote
|
|
||||||
wordDouble_Quote
|
|
||||||
wordMidNumLet
|
|
||||||
wordMidLetter
|
|
||||||
wordMidNum
|
|
||||||
wordNumeric
|
|
||||||
wordExtendNumLet
|
|
||||||
wordOther
|
|
||||||
)
|
|
||||||
|
|
||||||
// Word Types
|
|
||||||
const (
|
|
||||||
None = iota
|
|
||||||
Number
|
|
||||||
Letter
|
|
||||||
Kana
|
|
||||||
Ideo
|
|
||||||
)
|
|
||||||
|
|
||||||
func SplitWords(data []byte, atEOF bool) (int, []byte, error) {
|
|
||||||
advance, token, _, err := SegmentWords(data, atEOF)
|
|
||||||
return advance, token, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func SegmentWords(data []byte, atEOF bool) (advance int, token []byte, typ int, err error) {
|
|
||||||
prevType := -1
|
|
||||||
prevPrevType := -1
|
|
||||||
nextType := -1
|
|
||||||
immediateNextType := -1
|
|
||||||
start := 0
|
|
||||||
wordType := None
|
|
||||||
currType := -1
|
|
||||||
for width := 0; start < len(data); start += width {
|
|
||||||
width = 1
|
|
||||||
r := rune(data[start])
|
|
||||||
if r >= utf8.RuneSelf {
|
|
||||||
r, width = utf8.DecodeRune(data[start:])
|
|
||||||
}
|
|
||||||
|
|
||||||
if immediateNextType > 0 {
|
|
||||||
currType = immediateNextType
|
|
||||||
} else {
|
|
||||||
currType = wordSegmentProperty(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasNext := false
|
|
||||||
next := start + width
|
|
||||||
nextToken := utf8.RuneError
|
|
||||||
for next < len(data) {
|
|
||||||
nextWidth := 1
|
|
||||||
nextToken = rune(data[next])
|
|
||||||
if nextToken >= utf8.RuneSelf {
|
|
||||||
nextToken, nextWidth = utf8.DecodeRune(data[next:])
|
|
||||||
}
|
|
||||||
nextType = wordSegmentProperty(nextToken)
|
|
||||||
if !hasNext {
|
|
||||||
immediateNextType = nextType
|
|
||||||
}
|
|
||||||
hasNext = true
|
|
||||||
if nextType != wordExtend && nextType != wordFormat {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next = next + nextWidth
|
|
||||||
}
|
|
||||||
|
|
||||||
if start != 0 && in(currType, wordExtend, wordFormat) {
|
|
||||||
// wb4
|
|
||||||
// dont set prevType, prevPrevType
|
|
||||||
// we ignore that these extended are here
|
|
||||||
// so types should be whatever we saw before them
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordALetter, wordHebrew_Letter) &&
|
|
||||||
in(prevType, wordALetter, wordHebrew_Letter) {
|
|
||||||
// wb5
|
|
||||||
wordType = updateWordType(wordType, lookupWordType(currType))
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordMidLetter, wordMidNumLet, wordSingle_Quote) &&
|
|
||||||
in(prevType, wordALetter, wordHebrew_Letter) &&
|
|
||||||
hasNext && in(nextType, wordALetter, wordHebrew_Letter) {
|
|
||||||
// wb6
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordMidLetter, wordMidNumLet, wordSingle_Quote) &&
|
|
||||||
in(prevType, wordALetter, wordHebrew_Letter) &&
|
|
||||||
!hasNext && !atEOF {
|
|
||||||
// possibly wb6, need more data to know
|
|
||||||
return 0, nil, 0, nil
|
|
||||||
} else if in(currType, wordALetter, wordHebrew_Letter) &&
|
|
||||||
in(prevType, wordMidLetter, wordMidNumLet, wordSingle_Quote) &&
|
|
||||||
in(prevPrevType, wordALetter, wordHebrew_Letter) {
|
|
||||||
// wb7
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordSingle_Quote) &&
|
|
||||||
in(prevType, wordHebrew_Letter) {
|
|
||||||
// wb7a
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordDouble_Quote) &&
|
|
||||||
in(prevType, wordHebrew_Letter) &&
|
|
||||||
hasNext && in(nextType, wordHebrew_Letter) {
|
|
||||||
// wb7b
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordDouble_Quote) &&
|
|
||||||
in(prevType, wordHebrew_Letter) &&
|
|
||||||
!hasNext && !atEOF {
|
|
||||||
// possibly wb7b, need more data
|
|
||||||
return 0, nil, 0, nil
|
|
||||||
} else if in(currType, wordHebrew_Letter) &&
|
|
||||||
in(prevType, wordDouble_Quote) && in(prevPrevType, wordHebrew_Letter) {
|
|
||||||
// wb7c
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordNumeric) &&
|
|
||||||
in(prevType, wordNumeric) {
|
|
||||||
// wb8
|
|
||||||
wordType = updateWordType(wordType, Number)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordNumeric) &&
|
|
||||||
in(prevType, wordALetter, wordHebrew_Letter) {
|
|
||||||
// wb9
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordALetter, wordHebrew_Letter) &&
|
|
||||||
in(prevType, wordNumeric) {
|
|
||||||
// wb10
|
|
||||||
wordType = updateWordType(wordType, Letter)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordNumeric) &&
|
|
||||||
in(prevType, wordMidNum, wordMidNumLet, wordSingle_Quote) &&
|
|
||||||
in(prevPrevType, wordNumeric) {
|
|
||||||
// wb11
|
|
||||||
wordType = updateWordType(wordType, Number)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordMidNum, wordMidNumLet, wordSingle_Quote) &&
|
|
||||||
in(prevType, wordNumeric) &&
|
|
||||||
hasNext && in(nextType, wordNumeric) {
|
|
||||||
// wb12
|
|
||||||
wordType = updateWordType(wordType, Number)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordMidNum, wordMidNumLet, wordSingle_Quote) &&
|
|
||||||
in(prevType, wordNumeric) &&
|
|
||||||
!hasNext && !atEOF {
|
|
||||||
// possibly wb12, need more data
|
|
||||||
return 0, nil, 0, nil
|
|
||||||
} else if in(currType, wordKatakana) &&
|
|
||||||
in(prevType, wordKatakana) {
|
|
||||||
// wb13
|
|
||||||
wordType = updateWordType(wordType, Ideo)
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordExtendNumLet) &&
|
|
||||||
in(prevType, wordALetter, wordHebrew_Letter, wordNumeric, wordKatakana, wordExtendNumLet) {
|
|
||||||
// wb13a
|
|
||||||
wordType = updateWordType(wordType, lookupWordType(currType))
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordALetter, wordHebrew_Letter, wordNumeric, wordKatakana) &&
|
|
||||||
in(prevType, wordExtendNumLet) {
|
|
||||||
// wb13b
|
|
||||||
wordType = updateWordType(wordType, lookupWordType(currType))
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordRegional_Indicator) &&
|
|
||||||
in(prevType, wordRegional_Indicator) {
|
|
||||||
// wb13c
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if start == 0 && in(currType, wordCR) &&
|
|
||||||
hasNext && in(immediateNextType, wordLF) {
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if start == 0 && !in(currType, wordCR, wordLF, wordNewline) {
|
|
||||||
// only first char, keep goin
|
|
||||||
wordType = lookupWordType(currType)
|
|
||||||
if wordType == None {
|
|
||||||
if unicode.In(r, unicode.Katakana, unicode.Hiragana, unicode.Ideographic) {
|
|
||||||
wordType = Ideo
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prevPrevType = prevType
|
|
||||||
prevType = currType
|
|
||||||
continue
|
|
||||||
} else if in(currType, wordLF) && in(prevType, wordCR) {
|
|
||||||
start += width
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
// wb14
|
|
||||||
if start == 0 {
|
|
||||||
start = width
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if start > 0 && atEOF {
|
|
||||||
return start, data[:start], wordType, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request more data
|
|
||||||
return 0, nil, 0, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func wordSegmentProperty(r rune) int {
|
|
||||||
if unicode.Is(_WordALetter, r) {
|
|
||||||
return wordALetter
|
|
||||||
} else if unicode.Is(_WordCR, r) {
|
|
||||||
return wordCR
|
|
||||||
} else if unicode.Is(_WordLF, r) {
|
|
||||||
return wordLF
|
|
||||||
} else if unicode.Is(_WordNewline, r) {
|
|
||||||
return wordNewline
|
|
||||||
} else if unicode.Is(_WordExtend, r) {
|
|
||||||
return wordExtend
|
|
||||||
} else if unicode.Is(_WordRegional_Indicator, r) {
|
|
||||||
return wordRegional_Indicator
|
|
||||||
} else if unicode.Is(_WordFormat, r) {
|
|
||||||
return wordFormat
|
|
||||||
} else if unicode.Is(_WordKatakana, r) {
|
|
||||||
return wordKatakana
|
|
||||||
} else if unicode.Is(_WordHebrew_Letter, r) {
|
|
||||||
return wordHebrew_Letter
|
|
||||||
} else if unicode.Is(_WordSingle_Quote, r) {
|
|
||||||
return wordSingle_Quote
|
|
||||||
} else if unicode.Is(_WordDouble_Quote, r) {
|
|
||||||
return wordDouble_Quote
|
|
||||||
} else if unicode.Is(_WordMidNumLet, r) {
|
|
||||||
return wordMidNumLet
|
|
||||||
} else if unicode.Is(_WordMidLetter, r) {
|
|
||||||
return wordMidLetter
|
|
||||||
} else if unicode.Is(_WordMidNum, r) {
|
|
||||||
return wordMidNum
|
|
||||||
} else if unicode.Is(_WordNumeric, r) {
|
|
||||||
return wordNumeric
|
|
||||||
} else if unicode.Is(_WordExtendNumLet, r) {
|
|
||||||
return wordExtendNumLet
|
|
||||||
} else {
|
|
||||||
return wordOther
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lookupWordType(tokenType int) int {
|
|
||||||
if tokenType == wordNumeric {
|
|
||||||
return Number
|
|
||||||
} else if tokenType == wordALetter {
|
|
||||||
return Letter
|
|
||||||
} else if tokenType == wordHebrew_Letter {
|
|
||||||
return Letter
|
|
||||||
} else if tokenType == wordKatakana {
|
|
||||||
return Ideo
|
|
||||||
}
|
|
||||||
|
|
||||||
return None
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateWordType(currentWordType, newWordType int) int {
|
|
||||||
if newWordType > currentWordType {
|
|
||||||
return newWordType
|
|
||||||
}
|
|
||||||
return currentWordType
|
|
||||||
}
|
|
5093
Godeps/_workspace/src/github.com/blevesearch/segment/tables.go
generated
vendored
5093
Godeps/_workspace/src/github.com/blevesearch/segment/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
9097
Godeps/_workspace/src/github.com/blevesearch/segment/tables_test.go
generated
vendored
9097
Godeps/_workspace/src/github.com/blevesearch/segment/tables_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore
generated
vendored
4
Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
*.prof
|
|
||||||
*.test
|
|
||||||
*.swp
|
|
||||||
/bin/
|
|
4
Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore
generated
vendored
4
Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
.DS_Store
|
|
||||||
bin
|
|
||||||
|
|
||||||
|
|
7
Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml
generated
vendored
7
Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.3.3
|
|
||||||
- 1.4.2
|
|
||||||
- 1.5
|
|
||||||
- tip
|
|
153
Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
generated
vendored
153
Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
generated
vendored
@ -1,153 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/golang/protobuf/proto"
|
|
||||||
pb "github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
|
||||||
msg := &pb.MyMessage{}
|
|
||||||
ext1 := &pb.Ext{}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
|
||||||
t.Fatalf("Could not set ext1: %s", ext1)
|
|
||||||
}
|
|
||||||
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
|
|
||||||
pb.E_Ext_More,
|
|
||||||
pb.E_Ext_Text,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetExtensions() failed: %s", err)
|
|
||||||
}
|
|
||||||
if exts[0] != ext1 {
|
|
||||||
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
|
|
||||||
}
|
|
||||||
if exts[1] != nil {
|
|
||||||
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetExtensionStability(t *testing.T) {
|
|
||||||
check := func(m *pb.MyMessage) bool {
|
|
||||||
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetExtension() failed: %s", err)
|
|
||||||
}
|
|
||||||
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetExtension() failed: %s", err)
|
|
||||||
}
|
|
||||||
return ext1 == ext2
|
|
||||||
}
|
|
||||||
msg := &pb.MyMessage{Count: proto.Int32(4)}
|
|
||||||
ext0 := &pb.Ext{}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
|
|
||||||
t.Fatalf("Could not set ext1: %s", ext0)
|
|
||||||
}
|
|
||||||
if !check(msg) {
|
|
||||||
t.Errorf("GetExtension() not stable before marshaling")
|
|
||||||
}
|
|
||||||
bb, err := proto.Marshal(msg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Marshal() failed: %s", err)
|
|
||||||
}
|
|
||||||
msg1 := &pb.MyMessage{}
|
|
||||||
err = proto.Unmarshal(bb, msg1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unmarshal() failed: %s", err)
|
|
||||||
}
|
|
||||||
if !check(msg1) {
|
|
||||||
t.Errorf("GetExtension() not stable after unmarshaling")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtensionsRoundTrip(t *testing.T) {
|
|
||||||
msg := &pb.MyMessage{}
|
|
||||||
ext1 := &pb.Ext{
|
|
||||||
Data: proto.String("hi"),
|
|
||||||
}
|
|
||||||
ext2 := &pb.Ext{
|
|
||||||
Data: proto.String("there"),
|
|
||||||
}
|
|
||||||
exists := proto.HasExtension(msg, pb.E_Ext_More)
|
|
||||||
if exists {
|
|
||||||
t.Error("Extension More present unexpectedly")
|
|
||||||
}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
e, err := proto.GetExtension(msg, pb.E_Ext_More)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
x, ok := e.(*pb.Ext)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("e has type %T, expected testdata.Ext", e)
|
|
||||||
} else if *x.Data != "there" {
|
|
||||||
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
|
|
||||||
}
|
|
||||||
proto.ClearExtension(msg, pb.E_Ext_More)
|
|
||||||
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
|
|
||||||
t.Errorf("got %v, expected ErrMissingExtension", e)
|
|
||||||
}
|
|
||||||
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
|
|
||||||
t.Error("expected bad extension error, got nil")
|
|
||||||
}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
|
|
||||||
t.Error("expected extension err")
|
|
||||||
}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
|
|
||||||
t.Error("expected some sort of type mismatch error, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNilExtension(t *testing.T) {
|
|
||||||
msg := &pb.MyMessage{
|
|
||||||
Count: proto.Int32(1),
|
|
||||||
}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
|
|
||||||
t.Error("expected SetExtension to fail due to a nil extension")
|
|
||||||
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
|
|
||||||
t.Errorf("expected error %v, got %v", want, err)
|
|
||||||
}
|
|
||||||
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
|
|
||||||
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
|
|
||||||
}
|
|
2397
Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
generated
vendored
2397
Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
22
Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore
generated
vendored
22
Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
6
Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml
generated
vendored
6
Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.1
|
|
||||||
- 1.2
|
|
||||||
- tip
|
|
7
Godeps/_workspace/src/github.com/hashicorp/hcl/.gitignore
generated
vendored
7
Godeps/_workspace/src/github.com/hashicorp/hcl/.gitignore
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
y.output
|
|
||||||
|
|
||||||
# ignore intellij files
|
|
||||||
.idea
|
|
||||||
*.iml
|
|
||||||
*.ipr
|
|
||||||
*.iws
|
|
3
Godeps/_workspace/src/github.com/hashicorp/hcl/.travis.yml
generated
vendored
3
Godeps/_workspace/src/github.com/hashicorp/hcl/.travis.yml
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go: 1.5
|
|
4
Godeps/_workspace/src/github.com/kr/pretty/.gitignore
generated
vendored
4
Godeps/_workspace/src/github.com/kr/pretty/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
[568].out
|
|
||||||
_go*
|
|
||||||
_test*
|
|
||||||
_obj
|
|
2
Godeps/_workspace/src/github.com/magiconair/properties/.gitignore
generated
vendored
2
Godeps/_workspace/src/github.com/magiconair/properties/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
*.sublime-project
|
|
||||||
*.sublime-workspace
|
|
5
Godeps/_workspace/src/github.com/magiconair/properties/.travis.yml
generated
vendored
5
Godeps/_workspace/src/github.com/magiconair/properties/.travis.yml
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- release
|
|
||||||
install: go get gopkg.in/check.v1
|
|
||||||
|
|
117
Godeps/_workspace/src/github.com/magiconair/properties/README.md
generated
vendored
117
Godeps/_workspace/src/github.com/magiconair/properties/README.md
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
Overview [![Build Status](https://travis-ci.org/magiconair/properties.png?branch=master)](https://travis-ci.org/magiconair/properties)
|
|
||||||
========
|
|
||||||
|
|
||||||
properties is a Go library for reading and writing properties files.
|
|
||||||
|
|
||||||
It supports reading from multiple files and Spring style recursive property
|
|
||||||
expansion of expressions like `${key}` to their corresponding value.
|
|
||||||
Value expressions can refer to other keys like in `${key}` or to
|
|
||||||
environment variables like in `${USER}`.
|
|
||||||
Filenames can also contain environment variables like in
|
|
||||||
`/home/${USER}/myapp.properties`.
|
|
||||||
|
|
||||||
Comments and the order of keys are preserved. Comments can be modified
|
|
||||||
and can be written to the output.
|
|
||||||
|
|
||||||
The properties library supports both ISO-8859-1 and UTF-8 encoded data.
|
|
||||||
|
|
||||||
Starting from version 1.3.0 the behavior of the MustXXX() functions is
|
|
||||||
configurable by providing a custom `ErrorHandler` function. The default has
|
|
||||||
changed from `panic` to `log.Fatal` but this is configurable and custom
|
|
||||||
error handling functions can be provided. See the package documentation for
|
|
||||||
details.
|
|
||||||
|
|
||||||
Getting Started
|
|
||||||
---------------
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/magiconair/properties"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
|
|
||||||
host := p.MustGetString("host")
|
|
||||||
port := p.GetInt("port", 8080)
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
|
|
||||||
|
|
||||||
Installation and Upgrade
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get -u github.com/magiconair/properties
|
|
||||||
```
|
|
||||||
|
|
||||||
For testing and debugging you need the [go-check](https://github.com/go-check/check) library
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get -u gopkg.in/check.v1
|
|
||||||
```
|
|
||||||
|
|
||||||
History
|
|
||||||
-------
|
|
||||||
|
|
||||||
v1.5.2, 10 Apr 2015
|
|
||||||
-------------------
|
|
||||||
* [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
|
|
||||||
* Add clickable links to README
|
|
||||||
|
|
||||||
v1.5.1, 08 Dec 2014
|
|
||||||
-------------------
|
|
||||||
* Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
|
|
||||||
[time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
|
|
||||||
|
|
||||||
v1.5.0, 18 Nov 2014
|
|
||||||
-------------------
|
|
||||||
* Added support for single and multi-line comments (reading, writing and updating)
|
|
||||||
* The order of keys is now preserved
|
|
||||||
* Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
|
|
||||||
* Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
|
|
||||||
* Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
|
|
||||||
|
|
||||||
v1.4.2, 15 Nov 2014
|
|
||||||
-------------------
|
|
||||||
* [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
|
|
||||||
|
|
||||||
v1.4.1, 13 Nov 2014
|
|
||||||
-------------------
|
|
||||||
* [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
|
|
||||||
|
|
||||||
v1.4.0, 23 Sep 2014
|
|
||||||
-------------------
|
|
||||||
* Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
|
|
||||||
* Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
|
|
||||||
|
|
||||||
v1.3.0, 18 Mar 2014
|
|
||||||
-------------------
|
|
||||||
* Added support for time.Duration
|
|
||||||
* Made MustXXX() failure behavior configurable (log.Fatal, panic, custom)
|
|
||||||
* Changed default of MustXXX() failure from panic to log.Fatal
|
|
||||||
|
|
||||||
v1.2.0, 05 Mar 2014
|
|
||||||
-------------------
|
|
||||||
* Added MustGet... functions
|
|
||||||
* Added support for int and uint with range checks on 32 bit platforms
|
|
||||||
|
|
||||||
v1.1.0, 20 Jan 2014
|
|
||||||
-------------------
|
|
||||||
* Renamed from goproperties to properties
|
|
||||||
* Added support for expansion of environment vars in
|
|
||||||
filenames and value expressions
|
|
||||||
* Fixed bug where value expressions were not at the
|
|
||||||
start of the string
|
|
||||||
|
|
||||||
v1.0.0, 7 Jan 2014
|
|
||||||
------------------
|
|
||||||
* Initial release
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
|
|
||||||
|
|
||||||
ToDo
|
|
||||||
----
|
|
||||||
* Dump contents with passwords and secrets obscured
|
|
24
Godeps/_workspace/src/github.com/matryer/resync/.gitignore
generated
vendored
24
Godeps/_workspace/src/github.com/matryer/resync/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
7
Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
7
Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test
|
|
23
Godeps/_workspace/src/github.com/spf13/cast/.gitignore
generated
vendored
23
Godeps/_workspace/src/github.com/spf13/cast/.gitignore
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
24
Godeps/_workspace/src/github.com/spf13/cobra/.gitignore
generated
vendored
24
Godeps/_workspace/src/github.com/spf13/cobra/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
|
|
||||||
cobra.test
|
|
3
Godeps/_workspace/src/github.com/spf13/cobra/.mailmap
generated
vendored
3
Godeps/_workspace/src/github.com/spf13/cobra/.mailmap
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
Steve Francia <steve.francia@gmail.com>
|
|
||||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
|
||||||
Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>
|
|
9
Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml
generated
vendored
9
Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.3.3
|
|
||||||
- 1.4.2
|
|
||||||
- 1.5.1
|
|
||||||
- tip
|
|
||||||
script:
|
|
||||||
- go test -v ./...
|
|
||||||
- go build
|
|
22
Godeps/_workspace/src/github.com/spf13/jwalterweatherman/.gitignore
generated
vendored
22
Godeps/_workspace/src/github.com/spf13/jwalterweatherman/.gitignore
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
56
Godeps/_workspace/src/github.com/spf13/jwalterweatherman/jww_test.go
generated
vendored
56
Godeps/_workspace/src/github.com/spf13/jwalterweatherman/jww_test.go
generated
vendored
@ -1,56 +0,0 @@
|
|||||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package jwalterweatherman
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLevels(t *testing.T) {
|
|
||||||
SetStdoutThreshold(LevelError)
|
|
||||||
assert.Equal(t, StdoutThreshold(), LevelError)
|
|
||||||
SetLogThreshold(LevelCritical)
|
|
||||||
assert.Equal(t, LogThreshold(), LevelCritical)
|
|
||||||
assert.NotEqual(t, StdoutThreshold(), LevelCritical)
|
|
||||||
SetStdoutThreshold(LevelWarn)
|
|
||||||
assert.Equal(t, StdoutThreshold(), LevelWarn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultLogging(t *testing.T) {
|
|
||||||
outputBuf := new(bytes.Buffer)
|
|
||||||
logBuf := new(bytes.Buffer)
|
|
||||||
LogHandle = logBuf
|
|
||||||
OutHandle = outputBuf
|
|
||||||
|
|
||||||
SetLogThreshold(LevelWarn)
|
|
||||||
SetStdoutThreshold(LevelError)
|
|
||||||
|
|
||||||
FATAL.Println("fatal err")
|
|
||||||
CRITICAL.Println("critical err")
|
|
||||||
ERROR.Println("an error")
|
|
||||||
WARN.Println("a warning")
|
|
||||||
INFO.Println("information")
|
|
||||||
DEBUG.Println("debugging info")
|
|
||||||
TRACE.Println("trace")
|
|
||||||
|
|
||||||
assert.Contains(t, logBuf.String(), "fatal err")
|
|
||||||
assert.Contains(t, logBuf.String(), "critical err")
|
|
||||||
assert.Contains(t, logBuf.String(), "an error")
|
|
||||||
assert.Contains(t, logBuf.String(), "a warning")
|
|
||||||
assert.NotContains(t, logBuf.String(), "information")
|
|
||||||
assert.NotContains(t, logBuf.String(), "debugging info")
|
|
||||||
assert.NotContains(t, logBuf.String(), "trace")
|
|
||||||
|
|
||||||
assert.Contains(t, outputBuf.String(), "fatal err")
|
|
||||||
assert.Contains(t, outputBuf.String(), "critical err")
|
|
||||||
assert.Contains(t, outputBuf.String(), "an error")
|
|
||||||
assert.NotContains(t, outputBuf.String(), "a warning")
|
|
||||||
assert.NotContains(t, outputBuf.String(), "information")
|
|
||||||
assert.NotContains(t, outputBuf.String(), "debugging info")
|
|
||||||
assert.NotContains(t, outputBuf.String(), "trace")
|
|
||||||
}
|
|
18
Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml
generated
vendored
18
Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- tip
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go get github.com/golang/lint/golint
|
|
||||||
- export PATH=$GOPATH/bin:$PATH
|
|
||||||
- go install ./...
|
|
||||||
|
|
||||||
script:
|
|
||||||
- verify/all.sh -v
|
|
||||||
- go test ./...
|
|
23
Godeps/_workspace/src/github.com/spf13/viper/.gitignore
generated
vendored
23
Godeps/_workspace/src/github.com/spf13/viper/.gitignore
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
9
Godeps/_workspace/src/github.com/spf13/viper/.travis.yml
generated
vendored
9
Godeps/_workspace/src/github.com/spf13/viper/.travis.yml
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- release
|
|
||||||
- tip
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v ./...
|
|
||||||
sudo: false
|
|
7
Godeps/_workspace/src/github.com/square/go-jose/.gitignore
generated
vendored
7
Godeps/_workspace/src/github.com/square/go-jose/.gitignore
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
*~
|
|
||||||
.*.swp
|
|
||||||
*.out
|
|
||||||
*.test
|
|
||||||
*.pem
|
|
||||||
*.cov
|
|
||||||
jose-util/jose-util
|
|
36
Godeps/_workspace/src/github.com/square/go-jose/.travis.yml
generated
vendored
36
Godeps/_workspace/src/github.com/square/go-jose/.travis.yml
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.2
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- tip
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export PATH=$HOME/.local/bin:$PATH
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- go get github.com/axw/gocov/gocov
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
- go get golang.org/x/tools/cmd/cover || true
|
|
||||||
- go get code.google.com/p/go.tools/cmd/cover || true
|
|
||||||
- pip install cram --user `whoami`
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test . -v -covermode=count -coverprofile=profile.cov
|
|
||||||
- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
|
|
||||||
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t
|
|
||||||
- cd ..
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- tail -n+2 cipher/profile.cov >> profile.cov
|
|
||||||
- $HOME/gopath/bin/goveralls -coverprofile=profile.cov -service=travis-ci
|
|
||||||
|
|
5
Godeps/_workspace/src/github.com/steveyen/gtreap/.gitignore
generated
vendored
5
Godeps/_workspace/src/github.com/steveyen/gtreap/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
#*
|
|
||||||
*~
|
|
||||||
*.test
|
|
||||||
tmp
|
|
||||||
|
|
154
Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
generated
vendored
154
Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
generated
vendored
@ -1,154 +0,0 @@
|
|||||||
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
|
|
||||||
//
|
|
||||||
// Example Usage
|
|
||||||
//
|
|
||||||
// The following is a complete example using assert in a standard test function:
|
|
||||||
// import (
|
|
||||||
// "testing"
|
|
||||||
// "github.com/stretchr/testify/assert"
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// func TestSomething(t *testing.T) {
|
|
||||||
//
|
|
||||||
// var a string = "Hello"
|
|
||||||
// var b string = "Hello"
|
|
||||||
//
|
|
||||||
// assert.Equal(t, a, b, "The two words should be the same.")
|
|
||||||
//
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if you assert many times, use the below:
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "testing"
|
|
||||||
// "github.com/stretchr/testify/assert"
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// func TestSomething(t *testing.T) {
|
|
||||||
// assert := assert.New(t)
|
|
||||||
//
|
|
||||||
// var a string = "Hello"
|
|
||||||
// var b string = "Hello"
|
|
||||||
//
|
|
||||||
// assert.Equal(a, b, "The two words should be the same.")
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Assertions
|
|
||||||
//
|
|
||||||
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
|
|
||||||
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
|
|
||||||
// testing framework. This allows the assertion funcs to write the failings and other details to
|
|
||||||
// the correct place.
|
|
||||||
//
|
|
||||||
// Every assertion function also takes an optional string message as the final argument,
|
|
||||||
// allowing custom error messages to be appended to the message the assertion method outputs.
|
|
||||||
//
|
|
||||||
// Here is an overview of the assert functions:
|
|
||||||
//
|
|
||||||
// assert.Equal(t, expected, actual [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.EqualValues(t, expected, actual [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotEqual(t, notExpected, actual [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.True(t, actualBool [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.False(t, actualBool [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Nil(t, actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotNil(t, actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Empty(t, actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotEmpty(t, actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Len(t, actualObject, expectedLength, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Error(t, errorObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NoError(t, errorObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.EqualError(t, theError, errString [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.IsType(t, expectedObject, actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Contains(t, stringOrSlice, substringOrElement [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotContains(t, stringOrSlice, substringOrElement [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Panics(t, func(){
|
|
||||||
//
|
|
||||||
// // call code that should panic
|
|
||||||
//
|
|
||||||
// } [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotPanics(t, func(){
|
|
||||||
//
|
|
||||||
// // call code that should not panic
|
|
||||||
//
|
|
||||||
// } [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.InDelta(t, numA, numB, delta, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert package contains Assertions object. it has assertion methods.
|
|
||||||
//
|
|
||||||
// Here is an overview of the assert functions:
|
|
||||||
// assert.Equal(expected, actual [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.EqualValues(expected, actual [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotEqual(notExpected, actual [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.True(actualBool [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.False(actualBool [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Nil(actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotNil(actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Empty(actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotEmpty(actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Len(actualObject, expectedLength, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Error(errorObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NoError(errorObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.EqualError(theError, errString [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.IsType(expectedObject, actualObject [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Contains(stringOrSlice, substringOrElement [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotContains(stringOrSlice, substringOrElement [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.Panics(func(){
|
|
||||||
//
|
|
||||||
// // call code that should panic
|
|
||||||
//
|
|
||||||
// } [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.NotPanics(func(){
|
|
||||||
//
|
|
||||||
// // call code that should not panic
|
|
||||||
//
|
|
||||||
// } [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.InDelta(numA, numB, delta, [, message [, format-args]])
|
|
||||||
//
|
|
||||||
// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]])
|
|
||||||
package assert
|
|
265
Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go
generated
vendored
265
Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go
generated
vendored
@ -1,265 +0,0 @@
|
|||||||
package assert
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// Assertions provides assertion methods around the
|
|
||||||
// TestingT interface.
|
|
||||||
type Assertions struct {
|
|
||||||
t TestingT
|
|
||||||
}
|
|
||||||
|
|
||||||
// New makes a new Assertions object for the specified TestingT.
|
|
||||||
func New(t TestingT) *Assertions {
|
|
||||||
return &Assertions{
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fail reports a failure through
|
|
||||||
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
|
|
||||||
return Fail(a.t, failureMessage, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements asserts that an object is implemented by the specified interface.
|
|
||||||
//
|
|
||||||
// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
|
|
||||||
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Implements(a.t, interfaceObject, object, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsType asserts that the specified objects are of the same type.
|
|
||||||
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return IsType(a.t, expectedType, object, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal asserts that two objects are equal.
|
|
||||||
//
|
|
||||||
// assert.Equal(123, 123, "123 and 123 should be equal")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Equal(a.t, expected, actual, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
|
||||||
// and equal.
|
|
||||||
//
|
|
||||||
// assert.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return EqualValues(a.t, expected, actual, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exactly asserts that two objects are equal is value and type.
|
|
||||||
//
|
|
||||||
// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Exactly(a.t, expected, actual, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotNil asserts that the specified object is not nil.
|
|
||||||
//
|
|
||||||
// assert.NotNil(err, "err should be something")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return NotNil(a.t, object, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nil asserts that the specified object is nil.
|
|
||||||
//
|
|
||||||
// assert.Nil(err, "err should be nothing")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Nil(a.t, object, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a
|
|
||||||
// slice with len == 0.
|
|
||||||
//
|
|
||||||
// assert.Empty(obj)
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Empty(a.t, object, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a
|
|
||||||
// slice with len == 0.
|
|
||||||
//
|
|
||||||
// if assert.NotEmpty(obj) {
|
|
||||||
// assert.Equal("two", obj[1])
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return NotEmpty(a.t, object, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len asserts that the specified object has specific length.
|
|
||||||
// Len also fails if the object has a type that len() not accept.
|
|
||||||
//
|
|
||||||
// assert.Len(mySlice, 3, "The size of slice is not 3")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
|
|
||||||
return Len(a.t, object, length, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// True asserts that the specified value is true.
|
|
||||||
//
|
|
||||||
// assert.True(myBool, "myBool should be true")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
|
|
||||||
return True(a.t, value, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// False asserts that the specified value is true.
|
|
||||||
//
|
|
||||||
// assert.False(myBool, "myBool should be false")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
|
|
||||||
return False(a.t, value, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotEqual asserts that the specified values are NOT equal.
|
|
||||||
//
|
|
||||||
// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return NotEqual(a.t, expected, actual, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains asserts that the specified string contains the specified substring.
|
|
||||||
//
|
|
||||||
// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Contains(a.t, s, contains, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotContains asserts that the specified string does NOT contain the specified substring.
|
|
||||||
//
|
|
||||||
// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return NotContains(a.t, s, contains, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Condition uses a Comparison to assert a complex condition.
|
|
||||||
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
|
|
||||||
return Condition(a.t, comp, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
|
||||||
//
|
|
||||||
// assert.Panics(func(){
|
|
||||||
// GoCrazy()
|
|
||||||
// }, "Calling GoCrazy() should panic")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
|
|
||||||
return Panics(a.t, f, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
|
||||||
//
|
|
||||||
// assert.NotPanics(func(){
|
|
||||||
// RemainCalm()
|
|
||||||
// }, "Calling RemainCalm() should NOT panic")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
|
|
||||||
return NotPanics(a.t, f, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithinDuration asserts that the two times are within duration delta of each other.
|
|
||||||
//
|
|
||||||
// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
|
|
||||||
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InDelta asserts that the two numerals are within delta of each other.
|
|
||||||
//
|
|
||||||
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
|
|
||||||
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
|
|
||||||
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NoError asserts that a function returned no error (i.e. `nil`).
|
|
||||||
//
|
|
||||||
// actualObj, err := SomeFunction()
|
|
||||||
// if assert.NoError(err) {
|
|
||||||
// assert.Equal(actualObj, expectedObj)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool {
|
|
||||||
return NoError(a.t, theError, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error asserts that a function returned an error (i.e. not `nil`).
|
|
||||||
//
|
|
||||||
// actualObj, err := SomeFunction()
|
|
||||||
// if assert.Error(err, "An error was expected") {
|
|
||||||
// assert.Equal(err, expectedError)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool {
|
|
||||||
return Error(a.t, theError, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
|
||||||
// and that it is equal to the provided error.
|
|
||||||
//
|
|
||||||
// actualObj, err := SomeFunction()
|
|
||||||
// if assert.Error(err, "An error was expected") {
|
|
||||||
// assert.Equal(err, expectedError)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
|
|
||||||
return EqualError(a.t, theError, errString, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Regexp asserts that a specified regexp matches a string.
|
|
||||||
//
|
|
||||||
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
|
|
||||||
// assert.Regexp(t, "start...$", "it's not starting")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return Regexp(a.t, rx, str, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotRegexp asserts that a specified regexp does not match a string.
|
|
||||||
//
|
|
||||||
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
|
|
||||||
// assert.NotRegexp(t, "^start", "it's not starting")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
|
|
||||||
return NotRegexp(a.t, rx, str, msgAndArgs...)
|
|
||||||
}
|
|
157
Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
157
Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
@ -1,157 +0,0 @@
|
|||||||
package assert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// httpCode is a helper that returns HTTP code of the response. It returns -1
|
|
||||||
// if building a new request fails.
|
|
||||||
func httpCode(handler http.HandlerFunc, mode, url string, values url.Values) int {
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
handler(w, req)
|
|
||||||
return w.Code
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
|
||||||
//
|
|
||||||
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func HTTPSuccess(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool {
|
|
||||||
code := httpCode(handler, mode, url, values)
|
|
||||||
if code == -1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return code >= http.StatusOK && code <= http.StatusPartialContent
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
|
||||||
//
|
|
||||||
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func HTTPRedirect(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool {
|
|
||||||
code := httpCode(handler, mode, url, values)
|
|
||||||
if code == -1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPError asserts that a specified handler returns an error status code.
|
|
||||||
//
|
|
||||||
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func HTTPError(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool {
|
|
||||||
code := httpCode(handler, mode, url, values)
|
|
||||||
if code == -1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return code >= http.StatusBadRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPBody is a helper that returns HTTP body of the response. It returns
|
|
||||||
// empty string if building a new request fails.
|
|
||||||
func HTTPBody(handler http.HandlerFunc, mode, url string, values url.Values) string {
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
handler(w, req)
|
|
||||||
return w.Body.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPBodyContains asserts that a specified handler returns a
|
|
||||||
// body that contains a string.
|
|
||||||
//
|
|
||||||
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
|
|
||||||
body := HTTPBody(handler, mode, url, values)
|
|
||||||
|
|
||||||
contains := strings.Contains(body, fmt.Sprint(str))
|
|
||||||
if !contains {
|
|
||||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
|
|
||||||
}
|
|
||||||
|
|
||||||
return contains
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
|
||||||
// body that does not contain a string.
|
|
||||||
//
|
|
||||||
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
|
|
||||||
body := HTTPBody(handler, mode, url, values)
|
|
||||||
|
|
||||||
contains := strings.Contains(body, fmt.Sprint(str))
|
|
||||||
if contains {
|
|
||||||
Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
return !contains
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Assertions Wrappers
|
|
||||||
//
|
|
||||||
|
|
||||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
|
||||||
//
|
|
||||||
// assert.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, mode, url string, values url.Values) bool {
|
|
||||||
return HTTPSuccess(a.t, handler, mode, url, values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
|
||||||
//
|
|
||||||
// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, mode, url string, values url.Values) bool {
|
|
||||||
return HTTPRedirect(a.t, handler, mode, url, values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPError asserts that a specified handler returns an error status code.
|
|
||||||
//
|
|
||||||
// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) HTTPError(handler http.HandlerFunc, mode, url string, values url.Values) bool {
|
|
||||||
return HTTPError(a.t, handler, mode, url, values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPBodyContains asserts that a specified handler returns a
|
|
||||||
// body that contains a string.
|
|
||||||
//
|
|
||||||
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
|
|
||||||
return HTTPBodyContains(a.t, handler, mode, url, values, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
|
||||||
// body that does not contain a string.
|
|
||||||
//
|
|
||||||
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
|
||||||
//
|
|
||||||
// Returns whether the assertion was successful (true) or not (false).
|
|
||||||
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
|
|
||||||
return HTTPBodyNotContains(a.t, handler, mode, url, values, str)
|
|
||||||
}
|
|
100
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
generated
vendored
100
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
generated
vendored
@ -1,100 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reader is the interface that wraps basic Get and NewIterator methods.
|
|
||||||
// This interface implemented by both DB and Snapshot.
|
|
||||||
type Reader interface {
|
|
||||||
Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
|
|
||||||
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
|
|
||||||
}
|
|
||||||
|
|
||||||
type Sizes []uint64
|
|
||||||
|
|
||||||
// Sum returns sum of the sizes.
|
|
||||||
func (p Sizes) Sum() (n uint64) {
|
|
||||||
for _, s := range p {
|
|
||||||
n += s
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logging.
|
|
||||||
func (db *DB) log(v ...interface{}) { db.s.log(v...) }
|
|
||||||
func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
|
|
||||||
|
|
||||||
// Check and clean files.
|
|
||||||
func (db *DB) checkAndCleanFiles() error {
|
|
||||||
v := db.s.version()
|
|
||||||
defer v.release()
|
|
||||||
|
|
||||||
tablesMap := make(map[uint64]bool)
|
|
||||||
for _, tables := range v.tables {
|
|
||||||
for _, t := range tables {
|
|
||||||
tablesMap[t.file.Num()] = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := db.s.getFiles(storage.TypeAll)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var nTables int
|
|
||||||
var rem []storage.File
|
|
||||||
for _, f := range files {
|
|
||||||
keep := true
|
|
||||||
switch f.Type() {
|
|
||||||
case storage.TypeManifest:
|
|
||||||
keep = f.Num() >= db.s.manifestFile.Num()
|
|
||||||
case storage.TypeJournal:
|
|
||||||
if db.frozenJournalFile != nil {
|
|
||||||
keep = f.Num() >= db.frozenJournalFile.Num()
|
|
||||||
} else {
|
|
||||||
keep = f.Num() >= db.journalFile.Num()
|
|
||||||
}
|
|
||||||
case storage.TypeTable:
|
|
||||||
_, keep = tablesMap[f.Num()]
|
|
||||||
if keep {
|
|
||||||
tablesMap[f.Num()] = true
|
|
||||||
nTables++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !keep {
|
|
||||||
rem = append(rem, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if nTables != len(tablesMap) {
|
|
||||||
var missing []*storage.FileInfo
|
|
||||||
for num, present := range tablesMap {
|
|
||||||
if !present {
|
|
||||||
missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
|
|
||||||
db.logf("db@janitor table missing @%d", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
|
|
||||||
}
|
|
||||||
|
|
||||||
db.logf("db@janitor F·%d G·%d", len(files), len(rem))
|
|
||||||
for _, f := range rem {
|
|
||||||
db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
|
|
||||||
if err := f.Remove(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
@ -1,58 +0,0 @@
|
|||||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
. "github.com/onsi/ginkgo"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = testutil.Defer(func() {
|
|
||||||
Describe("Leveldb external", func() {
|
|
||||||
o := &opt.Options{
|
|
||||||
DisableBlockCache: true,
|
|
||||||
BlockRestartInterval: 5,
|
|
||||||
BlockSize: 80,
|
|
||||||
Compression: opt.NoCompression,
|
|
||||||
OpenFilesCacheCapacity: -1,
|
|
||||||
Strict: opt.StrictAll,
|
|
||||||
WriteBuffer: 1000,
|
|
||||||
CompactionTableSize: 2000,
|
|
||||||
}
|
|
||||||
|
|
||||||
Describe("write test", func() {
|
|
||||||
It("should do write correctly", func(done Done) {
|
|
||||||
db := newTestingDB(o, nil, nil)
|
|
||||||
t := testutil.DBTesting{
|
|
||||||
DB: db,
|
|
||||||
Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(),
|
|
||||||
}
|
|
||||||
testutil.DoDBTesting(&t)
|
|
||||||
db.TestClose()
|
|
||||||
done <- true
|
|
||||||
}, 20.0)
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("read test", func() {
|
|
||||||
testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
|
|
||||||
// Building the DB.
|
|
||||||
db := newTestingDB(o, nil, nil)
|
|
||||||
kv.IterateShuffled(nil, func(i int, key, value []byte) {
|
|
||||||
err := db.TestPut(key, value)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
})
|
|
||||||
|
|
||||||
return db
|
|
||||||
}, func(db testutil.DB) {
|
|
||||||
db.(*testingDB).TestClose()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
142
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
142
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
@ -1,142 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ErrIkeyCorrupted struct {
|
|
||||||
Ikey []byte
|
|
||||||
Reason string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ErrIkeyCorrupted) Error() string {
|
|
||||||
return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newErrIkeyCorrupted(ikey []byte, reason string) error {
|
|
||||||
return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
|
|
||||||
}
|
|
||||||
|
|
||||||
type kType int
|
|
||||||
|
|
||||||
func (kt kType) String() string {
|
|
||||||
switch kt {
|
|
||||||
case ktDel:
|
|
||||||
return "d"
|
|
||||||
case ktVal:
|
|
||||||
return "v"
|
|
||||||
}
|
|
||||||
return "x"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value types encoded as the last component of internal keys.
|
|
||||||
// Don't modify; this value are saved to disk.
|
|
||||||
const (
|
|
||||||
ktDel kType = iota
|
|
||||||
ktVal
|
|
||||||
)
|
|
||||||
|
|
||||||
// ktSeek defines the kType that should be passed when constructing an
|
|
||||||
// internal key for seeking to a particular sequence number (since we
|
|
||||||
// sort sequence numbers in decreasing order and the value type is
|
|
||||||
// embedded as the low 8 bits in the sequence number in internal keys,
|
|
||||||
// we need to use the highest-numbered ValueType, not the lowest).
|
|
||||||
const ktSeek = ktVal
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Maximum value possible for sequence number; the 8-bits are
|
|
||||||
// used by value type, so its can packed together in single
|
|
||||||
// 64-bit integer.
|
|
||||||
kMaxSeq uint64 = (uint64(1) << 56) - 1
|
|
||||||
// Maximum value possible for packed sequence number and type.
|
|
||||||
kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Maximum number encoded in bytes.
|
|
||||||
var kMaxNumBytes = make([]byte, 8)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
type iKey []byte
|
|
||||||
|
|
||||||
func newIkey(ukey []byte, seq uint64, kt kType) iKey {
|
|
||||||
if seq > kMaxSeq {
|
|
||||||
panic("leveldb: invalid sequence number")
|
|
||||||
} else if kt > ktVal {
|
|
||||||
panic("leveldb: invalid type")
|
|
||||||
}
|
|
||||||
|
|
||||||
ik := make(iKey, len(ukey)+8)
|
|
||||||
copy(ik, ukey)
|
|
||||||
binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
|
|
||||||
return ik
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
|
|
||||||
if len(ik) < 8 {
|
|
||||||
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
|
|
||||||
}
|
|
||||||
num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
|
|
||||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
|
||||||
if kt > ktVal {
|
|
||||||
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
|
|
||||||
}
|
|
||||||
ukey = ik[:len(ik)-8]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func validIkey(ik []byte) bool {
|
|
||||||
_, _, _, err := parseIkey(ik)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ik iKey) assert() {
|
|
||||||
if ik == nil {
|
|
||||||
panic("leveldb: nil iKey")
|
|
||||||
}
|
|
||||||
if len(ik) < 8 {
|
|
||||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ik iKey) ukey() []byte {
|
|
||||||
ik.assert()
|
|
||||||
return ik[:len(ik)-8]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ik iKey) num() uint64 {
|
|
||||||
ik.assert()
|
|
||||||
return binary.LittleEndian.Uint64(ik[len(ik)-8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ik iKey) parseNum() (seq uint64, kt kType) {
|
|
||||||
num := ik.num()
|
|
||||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
|
||||||
if kt > ktVal {
|
|
||||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ik iKey) String() string {
|
|
||||||
if ik == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
|
|
||||||
if ukey, seq, kt, err := parseIkey(ik); err == nil {
|
|
||||||
return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
|
|
||||||
} else {
|
|
||||||
return "<invalid>"
|
|
||||||
}
|
|
||||||
}
|
|
455
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
455
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@ -1,455 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ErrManifestCorrupted struct {
|
|
||||||
Field string
|
|
||||||
Reason string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ErrManifestCorrupted) Error() string {
|
|
||||||
return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newErrManifestCorrupted(f storage.File, field, reason string) error {
|
|
||||||
return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
|
|
||||||
}
|
|
||||||
|
|
||||||
// session represent a persistent database session.
|
|
||||||
type session struct {
|
|
||||||
// Need 64-bit alignment.
|
|
||||||
stNextFileNum uint64 // current unused file number
|
|
||||||
stJournalNum uint64 // current journal file number; need external synchronization
|
|
||||||
stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
|
|
||||||
stSeqNum uint64 // last mem compacted seq; need external synchronization
|
|
||||||
stTempFileNum uint64
|
|
||||||
|
|
||||||
stor storage.Storage
|
|
||||||
storLock util.Releaser
|
|
||||||
o *cachedOptions
|
|
||||||
icmp *iComparer
|
|
||||||
tops *tOps
|
|
||||||
|
|
||||||
manifest *journal.Writer
|
|
||||||
manifestWriter storage.Writer
|
|
||||||
manifestFile storage.File
|
|
||||||
|
|
||||||
stCompPtrs []iKey // compaction pointers; need external synchronization
|
|
||||||
stVersion *version // current version
|
|
||||||
vmu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates new initialized session instance.
|
|
||||||
func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
|
||||||
if stor == nil {
|
|
||||||
return nil, os.ErrInvalid
|
|
||||||
}
|
|
||||||
storLock, err := stor.Lock()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s = &session{
|
|
||||||
stor: stor,
|
|
||||||
storLock: storLock,
|
|
||||||
stCompPtrs: make([]iKey, o.GetNumLevel()),
|
|
||||||
}
|
|
||||||
s.setOptions(o)
|
|
||||||
s.tops = newTableOps(s)
|
|
||||||
s.setVersion(newVersion(s))
|
|
||||||
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close session.
|
|
||||||
func (s *session) close() {
|
|
||||||
s.tops.close()
|
|
||||||
if s.manifest != nil {
|
|
||||||
s.manifest.Close()
|
|
||||||
}
|
|
||||||
if s.manifestWriter != nil {
|
|
||||||
s.manifestWriter.Close()
|
|
||||||
}
|
|
||||||
s.manifest = nil
|
|
||||||
s.manifestWriter = nil
|
|
||||||
s.manifestFile = nil
|
|
||||||
s.stVersion = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release session lock.
|
|
||||||
func (s *session) release() {
|
|
||||||
s.storLock.Release()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new database session; need external synchronization.
|
|
||||||
func (s *session) create() error {
|
|
||||||
// create manifest
|
|
||||||
return s.newManifest(nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recover a database session; need external synchronization.
|
|
||||||
func (s *session) recover() (err error) {
|
|
||||||
defer func() {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
// Don't return os.ErrNotExist if the underlying storage contains
|
|
||||||
// other files that belong to LevelDB. So the DB won't get trashed.
|
|
||||||
if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
|
|
||||||
err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
m, err := s.stor.GetManifest()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := m.Open()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
strict := s.o.GetStrict(opt.StrictManifest)
|
|
||||||
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
|
|
||||||
|
|
||||||
staging := s.stVersion.newStaging()
|
|
||||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
|
||||||
for {
|
|
||||||
var r io.Reader
|
|
||||||
r, err = jr.Next()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return errors.SetFile(err, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rec.decode(r)
|
|
||||||
if err == nil {
|
|
||||||
// save compact pointers
|
|
||||||
for _, r := range rec.compPtrs {
|
|
||||||
s.stCompPtrs[r.level] = iKey(r.ikey)
|
|
||||||
}
|
|
||||||
// commit record to version staging
|
|
||||||
staging.commit(rec)
|
|
||||||
} else {
|
|
||||||
err = errors.SetFile(err, m)
|
|
||||||
if strict || !errors.IsCorrupted(err) {
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rec.resetCompPtrs()
|
|
||||||
rec.resetAddedTables()
|
|
||||||
rec.resetDeletedTables()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case !rec.has(recComparer):
|
|
||||||
return newErrManifestCorrupted(m, "comparer", "missing")
|
|
||||||
case rec.comparer != s.icmp.uName():
|
|
||||||
return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
|
|
||||||
case !rec.has(recNextFileNum):
|
|
||||||
return newErrManifestCorrupted(m, "next-file-num", "missing")
|
|
||||||
case !rec.has(recJournalNum):
|
|
||||||
return newErrManifestCorrupted(m, "journal-file-num", "missing")
|
|
||||||
case !rec.has(recSeqNum):
|
|
||||||
return newErrManifestCorrupted(m, "seq-num", "missing")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.manifestFile = m
|
|
||||||
s.setVersion(staging.finish())
|
|
||||||
s.setNextFileNum(rec.nextFileNum)
|
|
||||||
s.recordCommited(rec)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit session; need external synchronization.
|
|
||||||
func (s *session) commit(r *sessionRecord) (err error) {
|
|
||||||
v := s.version()
|
|
||||||
defer v.release()
|
|
||||||
|
|
||||||
// spawn new version based on current version
|
|
||||||
nv := v.spawn(r)
|
|
||||||
|
|
||||||
if s.manifest == nil {
|
|
||||||
// manifest journal writer not yet created, create one
|
|
||||||
err = s.newManifest(r, nv)
|
|
||||||
} else {
|
|
||||||
err = s.flushManifest(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// finally, apply new version if no error rise
|
|
||||||
if err == nil {
|
|
||||||
s.setVersion(nv)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pick a compaction based on current state; need external synchronization.
|
|
||||||
func (s *session) pickCompaction() *compaction {
|
|
||||||
v := s.version()
|
|
||||||
|
|
||||||
var level int
|
|
||||||
var t0 tFiles
|
|
||||||
if v.cScore >= 1 {
|
|
||||||
level = v.cLevel
|
|
||||||
cptr := s.stCompPtrs[level]
|
|
||||||
tables := v.tables[level]
|
|
||||||
for _, t := range tables {
|
|
||||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
|
||||||
t0 = append(t0, t)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(t0) == 0 {
|
|
||||||
t0 = append(t0, tables[0])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
|
||||||
ts := (*tSet)(p)
|
|
||||||
level = ts.level
|
|
||||||
t0 = append(t0, ts.table)
|
|
||||||
} else {
|
|
||||||
v.release()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return newCompaction(s, v, level, t0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create compaction from given level and range; need external synchronization.
|
|
||||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
|
||||||
v := s.version()
|
|
||||||
|
|
||||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
|
||||||
if len(t0) == 0 {
|
|
||||||
v.release()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Avoid compacting too much in one shot in case the range is large.
|
|
||||||
// But we cannot do this for level-0 since level-0 files can overlap
|
|
||||||
// and we must not pick one file and drop another older file if the
|
|
||||||
// two files overlap.
|
|
||||||
if level > 0 {
|
|
||||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
|
||||||
total := uint64(0)
|
|
||||||
for i, t := range t0 {
|
|
||||||
total += t.size
|
|
||||||
if total >= limit {
|
|
||||||
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
|
|
||||||
t0 = t0[:i+1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return newCompaction(s, v, level, t0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
|
||||||
c := &compaction{
|
|
||||||
s: s,
|
|
||||||
v: v,
|
|
||||||
level: level,
|
|
||||||
tables: [2]tFiles{t0, nil},
|
|
||||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
|
||||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
|
||||||
}
|
|
||||||
c.expand()
|
|
||||||
c.save()
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// compaction represent a compaction state.
|
|
||||||
type compaction struct {
|
|
||||||
s *session
|
|
||||||
v *version
|
|
||||||
|
|
||||||
level int
|
|
||||||
tables [2]tFiles
|
|
||||||
maxGPOverlaps uint64
|
|
||||||
|
|
||||||
gp tFiles
|
|
||||||
gpi int
|
|
||||||
seenKey bool
|
|
||||||
gpOverlappedBytes uint64
|
|
||||||
imin, imax iKey
|
|
||||||
tPtrs []int
|
|
||||||
released bool
|
|
||||||
|
|
||||||
snapGPI int
|
|
||||||
snapSeenKey bool
|
|
||||||
snapGPOverlappedBytes uint64
|
|
||||||
snapTPtrs []int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *compaction) save() {
|
|
||||||
c.snapGPI = c.gpi
|
|
||||||
c.snapSeenKey = c.seenKey
|
|
||||||
c.snapGPOverlappedBytes = c.gpOverlappedBytes
|
|
||||||
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *compaction) restore() {
|
|
||||||
c.gpi = c.snapGPI
|
|
||||||
c.seenKey = c.snapSeenKey
|
|
||||||
c.gpOverlappedBytes = c.snapGPOverlappedBytes
|
|
||||||
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *compaction) release() {
|
|
||||||
if !c.released {
|
|
||||||
c.released = true
|
|
||||||
c.v.release()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expand compacted tables; need external synchronization.
|
|
||||||
func (c *compaction) expand() {
|
|
||||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
|
||||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
|
||||||
|
|
||||||
t0, t1 := c.tables[0], c.tables[1]
|
|
||||||
imin, imax := t0.getRange(c.s.icmp)
|
|
||||||
// We expand t0 here just incase ukey hop across tables.
|
|
||||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
|
||||||
if len(t0) != len(c.tables[0]) {
|
|
||||||
imin, imax = t0.getRange(c.s.icmp)
|
|
||||||
}
|
|
||||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
|
||||||
// Get entire range covered by compaction.
|
|
||||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
|
||||||
|
|
||||||
// See if we can grow the number of inputs in "level" without
|
|
||||||
// changing the number of "level+1" files we pick up.
|
|
||||||
if len(t1) > 0 {
|
|
||||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
|
||||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
|
||||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
|
||||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
|
||||||
if len(exp1) == len(t1) {
|
|
||||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
|
||||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
|
||||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
|
||||||
imin, imax = xmin, xmax
|
|
||||||
t0, t1 = exp0, exp1
|
|
||||||
amin, amax = append(t0, t1...).getRange(c.s.icmp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the set of grandparent files that overlap this compaction
|
|
||||||
// (parent == level+1; grandparent == level+2)
|
|
||||||
if c.level+2 < c.s.o.GetNumLevel() {
|
|
||||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.tables[0], c.tables[1] = t0, t1
|
|
||||||
c.imin, c.imax = imin, imax
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether compaction is trivial.
|
|
||||||
func (c *compaction) trivial() bool {
|
|
||||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
|
||||||
for level, tables := range c.v.tables[c.level+2:] {
|
|
||||||
for c.tPtrs[level] < len(tables) {
|
|
||||||
t := tables[c.tPtrs[level]]
|
|
||||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
|
||||||
// We've advanced far enough.
|
|
||||||
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
|
|
||||||
// Key falls in this file's range, so definitely not base level.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.tPtrs[level]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
|
||||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
|
||||||
gp := c.gp[c.gpi]
|
|
||||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if c.seenKey {
|
|
||||||
c.gpOverlappedBytes += gp.size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.seenKey = true
|
|
||||||
|
|
||||||
if c.gpOverlappedBytes > c.maxGPOverlaps {
|
|
||||||
// Too much overlap for current output; start new output.
|
|
||||||
c.gpOverlappedBytes = 0
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates an iterator.
|
|
||||||
func (c *compaction) newIterator() iterator.Iterator {
|
|
||||||
// Creates iterator slice.
|
|
||||||
icap := len(c.tables)
|
|
||||||
if c.level == 0 {
|
|
||||||
// Special case for level-0
|
|
||||||
icap = len(c.tables[0]) + 1
|
|
||||||
}
|
|
||||||
its := make([]iterator.Iterator, 0, icap)
|
|
||||||
|
|
||||||
// Options.
|
|
||||||
ro := &opt.ReadOptions{
|
|
||||||
DontFillCache: true,
|
|
||||||
Strict: opt.StrictOverride,
|
|
||||||
}
|
|
||||||
strict := c.s.o.GetStrict(opt.StrictCompaction)
|
|
||||||
if strict {
|
|
||||||
ro.Strict |= opt.StrictReader
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tables := range c.tables {
|
|
||||||
if len(tables) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Level-0 is not sorted and may overlaps each other.
|
|
||||||
if c.level+i == 0 {
|
|
||||||
for _, t := range tables {
|
|
||||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
|
|
||||||
its = append(its, it)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return iterator.NewMergedIterator(its, c.s.icmp, strict)
|
|
||||||
}
|
|
534
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
534
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
@ -1,534 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reservefs.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errFileOpen = errors.New("leveldb/storage: file still open")
|
|
||||||
|
|
||||||
type fileLock interface {
|
|
||||||
release() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileStorageLock struct {
|
|
||||||
fs *fileStorage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lock *fileStorageLock) Release() {
|
|
||||||
fs := lock.fs
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.slock == lock {
|
|
||||||
fs.slock = nil
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// fileStorage is a file-system backed storage.
|
|
||||||
type fileStorage struct {
|
|
||||||
path string
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
flock fileLock
|
|
||||||
slock *fileStorageLock
|
|
||||||
logw *os.File
|
|
||||||
buf []byte
|
|
||||||
// Opened file counter; if open < 0 means closed.
|
|
||||||
open int
|
|
||||||
day int
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenFile returns a new filesytem-backed storage implementation with the given
|
|
||||||
// path. This also hold a file lock, so any subsequent attempt to open the same
|
|
||||||
// path will fail.
|
|
||||||
//
|
|
||||||
// The storage must be closed after use, by calling Close method.
|
|
||||||
func OpenFile(path string) (Storage, error) {
|
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
flock, err := newFileLock(filepath.Join(path, "LOCK"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
flock.release()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old"))
|
|
||||||
logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := &fileStorage{path: path, flock: flock, logw: logw}
|
|
||||||
runtime.SetFinalizer(fs, (*fileStorage).Close)
|
|
||||||
return fs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) Lock() (util.Releaser, error) {
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.open < 0 {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
if fs.slock != nil {
|
|
||||||
return nil, ErrLocked
|
|
||||||
}
|
|
||||||
fs.slock = &fileStorageLock{fs: fs}
|
|
||||||
return fs.slock, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func itoa(buf []byte, i int, wid int) []byte {
|
|
||||||
var u uint = uint(i)
|
|
||||||
if u == 0 && wid <= 1 {
|
|
||||||
return append(buf, '0')
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assemble decimal in reverse order.
|
|
||||||
var b [32]byte
|
|
||||||
bp := len(b)
|
|
||||||
for ; u > 0 || wid > 0; u /= 10 {
|
|
||||||
bp--
|
|
||||||
wid--
|
|
||||||
b[bp] = byte(u%10) + '0'
|
|
||||||
}
|
|
||||||
return append(buf, b[bp:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) printDay(t time.Time) {
|
|
||||||
if fs.day == t.Day() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fs.day = t.Day()
|
|
||||||
fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) doLog(t time.Time, str string) {
|
|
||||||
fs.printDay(t)
|
|
||||||
hour, min, sec := t.Clock()
|
|
||||||
msec := t.Nanosecond() / 1e3
|
|
||||||
// time
|
|
||||||
fs.buf = itoa(fs.buf[:0], hour, 2)
|
|
||||||
fs.buf = append(fs.buf, ':')
|
|
||||||
fs.buf = itoa(fs.buf, min, 2)
|
|
||||||
fs.buf = append(fs.buf, ':')
|
|
||||||
fs.buf = itoa(fs.buf, sec, 2)
|
|
||||||
fs.buf = append(fs.buf, '.')
|
|
||||||
fs.buf = itoa(fs.buf, msec, 6)
|
|
||||||
fs.buf = append(fs.buf, ' ')
|
|
||||||
// write
|
|
||||||
fs.buf = append(fs.buf, []byte(str)...)
|
|
||||||
fs.buf = append(fs.buf, '\n')
|
|
||||||
fs.logw.Write(fs.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) Log(str string) {
|
|
||||||
t := time.Now()
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.open < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fs.doLog(t, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) log(str string) {
|
|
||||||
fs.doLog(time.Now(), str)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) GetFile(num uint64, t FileType) File {
|
|
||||||
return &file{fs: fs, num: num, t: t}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) {
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.open < 0 {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
dir, err := os.Open(fs.path)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fnn, err := dir.Readdirnames(0)
|
|
||||||
// Close the dir first before checking for Readdirnames error.
|
|
||||||
if err := dir.Close(); err != nil {
|
|
||||||
fs.log(fmt.Sprintf("close dir: %v", err))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f := &file{fs: fs}
|
|
||||||
for _, fn := range fnn {
|
|
||||||
if f.parse(fn) && (f.t&t) != 0 {
|
|
||||||
ff = append(ff, f)
|
|
||||||
f = &file{fs: fs}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) GetManifest() (f File, err error) {
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.open < 0 {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
dir, err := os.Open(fs.path)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fnn, err := dir.Readdirnames(0)
|
|
||||||
// Close the dir first before checking for Readdirnames error.
|
|
||||||
if err := dir.Close(); err != nil {
|
|
||||||
fs.log(fmt.Sprintf("close dir: %v", err))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Find latest CURRENT file.
|
|
||||||
var rem []string
|
|
||||||
var pend bool
|
|
||||||
var cerr error
|
|
||||||
for _, fn := range fnn {
|
|
||||||
if strings.HasPrefix(fn, "CURRENT") {
|
|
||||||
pend1 := len(fn) > 7
|
|
||||||
// Make sure it is valid name for a CURRENT file, otherwise skip it.
|
|
||||||
if pend1 {
|
|
||||||
if fn[7] != '.' || len(fn) < 9 {
|
|
||||||
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
|
|
||||||
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
path := filepath.Join(fs.path, fn)
|
|
||||||
r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
|
|
||||||
if e1 != nil {
|
|
||||||
return nil, e1
|
|
||||||
}
|
|
||||||
b, e1 := ioutil.ReadAll(r)
|
|
||||||
if e1 != nil {
|
|
||||||
r.Close()
|
|
||||||
return nil, e1
|
|
||||||
}
|
|
||||||
f1 := &file{fs: fs}
|
|
||||||
if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) {
|
|
||||||
fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn))
|
|
||||||
if pend1 {
|
|
||||||
rem = append(rem, fn)
|
|
||||||
}
|
|
||||||
if !pend1 || cerr == nil {
|
|
||||||
cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn)
|
|
||||||
}
|
|
||||||
} else if f != nil && f1.Num() < f.Num() {
|
|
||||||
fs.log(fmt.Sprintf("skipping %s: obsolete", fn))
|
|
||||||
if pend1 {
|
|
||||||
rem = append(rem, fn)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
f = f1
|
|
||||||
pend = pend1
|
|
||||||
}
|
|
||||||
if err := r.Close(); err != nil {
|
|
||||||
fs.log(fmt.Sprintf("close %s: %v", fn, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Don't remove any files if there is no valid CURRENT file.
|
|
||||||
if f == nil {
|
|
||||||
if cerr != nil {
|
|
||||||
err = cerr
|
|
||||||
} else {
|
|
||||||
err = os.ErrNotExist
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Rename pending CURRENT file to an effective CURRENT.
|
|
||||||
if pend {
|
|
||||||
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num())
|
|
||||||
if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
|
|
||||||
fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Remove obsolete or incomplete pending CURRENT files.
|
|
||||||
for _, fn := range rem {
|
|
||||||
path := filepath.Join(fs.path, fn)
|
|
||||||
if err := os.Remove(path); err != nil {
|
|
||||||
fs.log(fmt.Sprintf("remove %s: %v", fn, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) SetManifest(f File) (err error) {
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.open < 0 {
|
|
||||||
return ErrClosed
|
|
||||||
}
|
|
||||||
f2, ok := f.(*file)
|
|
||||||
if !ok || f2.t != TypeManifest {
|
|
||||||
return ErrInvalidFile
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
fs.log(fmt.Sprintf("CURRENT: %v", err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num())
|
|
||||||
w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = fmt.Fprintln(w, f2.name())
|
|
||||||
// Close the file first.
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return rename(path, filepath.Join(fs.path, "CURRENT"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *fileStorage) Close() error {
|
|
||||||
fs.mu.Lock()
|
|
||||||
defer fs.mu.Unlock()
|
|
||||||
if fs.open < 0 {
|
|
||||||
return ErrClosed
|
|
||||||
}
|
|
||||||
// Clear the finalizer.
|
|
||||||
runtime.SetFinalizer(fs, nil)
|
|
||||||
|
|
||||||
if fs.open > 0 {
|
|
||||||
fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open))
|
|
||||||
return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open)
|
|
||||||
}
|
|
||||||
fs.open = -1
|
|
||||||
e1 := fs.logw.Close()
|
|
||||||
err := fs.flock.release()
|
|
||||||
if err == nil {
|
|
||||||
err = e1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileWrap struct {
|
|
||||||
*os.File
|
|
||||||
f *file
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fw fileWrap) Sync() error {
|
|
||||||
if err := fw.File.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if fw.f.Type() == TypeManifest {
|
|
||||||
// Also sync parent directory if file type is manifest.
|
|
||||||
// See: https://code.google.com/p/leveldb/issues/detail?id=190.
|
|
||||||
if err := syncDir(fw.f.fs.path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fw fileWrap) Close() error {
|
|
||||||
f := fw.f
|
|
||||||
f.fs.mu.Lock()
|
|
||||||
defer f.fs.mu.Unlock()
|
|
||||||
if !f.open {
|
|
||||||
return ErrClosed
|
|
||||||
}
|
|
||||||
f.open = false
|
|
||||||
f.fs.open--
|
|
||||||
err := fw.File.Close()
|
|
||||||
if err != nil {
|
|
||||||
f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err))
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type file struct {
|
|
||||||
fs *fileStorage
|
|
||||||
num uint64
|
|
||||||
t FileType
|
|
||||||
open bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Open() (Reader, error) {
|
|
||||||
f.fs.mu.Lock()
|
|
||||||
defer f.fs.mu.Unlock()
|
|
||||||
if f.fs.open < 0 {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
if f.open {
|
|
||||||
return nil, errFileOpen
|
|
||||||
}
|
|
||||||
of, err := os.OpenFile(f.path(), os.O_RDONLY, 0)
|
|
||||||
if err != nil {
|
|
||||||
if f.hasOldName() && os.IsNotExist(err) {
|
|
||||||
of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0)
|
|
||||||
if err == nil {
|
|
||||||
goto ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ok:
|
|
||||||
f.open = true
|
|
||||||
f.fs.open++
|
|
||||||
return fileWrap{of, f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Create() (Writer, error) {
|
|
||||||
f.fs.mu.Lock()
|
|
||||||
defer f.fs.mu.Unlock()
|
|
||||||
if f.fs.open < 0 {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
if f.open {
|
|
||||||
return nil, errFileOpen
|
|
||||||
}
|
|
||||||
of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.open = true
|
|
||||||
f.fs.open++
|
|
||||||
return fileWrap{of, f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Replace(newfile File) error {
|
|
||||||
f.fs.mu.Lock()
|
|
||||||
defer f.fs.mu.Unlock()
|
|
||||||
if f.fs.open < 0 {
|
|
||||||
return ErrClosed
|
|
||||||
}
|
|
||||||
newfile2, ok := newfile.(*file)
|
|
||||||
if !ok {
|
|
||||||
return ErrInvalidFile
|
|
||||||
}
|
|
||||||
if f.open || newfile2.open {
|
|
||||||
return errFileOpen
|
|
||||||
}
|
|
||||||
return rename(newfile2.path(), f.path())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Type() FileType {
|
|
||||||
return f.t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Num() uint64 {
|
|
||||||
return f.num
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Remove() error {
|
|
||||||
f.fs.mu.Lock()
|
|
||||||
defer f.fs.mu.Unlock()
|
|
||||||
if f.fs.open < 0 {
|
|
||||||
return ErrClosed
|
|
||||||
}
|
|
||||||
if f.open {
|
|
||||||
return errFileOpen
|
|
||||||
}
|
|
||||||
err := os.Remove(f.path())
|
|
||||||
if err != nil {
|
|
||||||
f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err))
|
|
||||||
}
|
|
||||||
// Also try remove file with old name, just in case.
|
|
||||||
if f.hasOldName() {
|
|
||||||
if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) {
|
|
||||||
f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err))
|
|
||||||
err = e1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) hasOldName() bool {
|
|
||||||
return f.t == TypeTable
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) oldName() string {
|
|
||||||
switch f.t {
|
|
||||||
case TypeTable:
|
|
||||||
return fmt.Sprintf("%06d.sst", f.num)
|
|
||||||
}
|
|
||||||
return f.name()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) oldPath() string {
|
|
||||||
return filepath.Join(f.fs.path, f.oldName())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) name() string {
|
|
||||||
switch f.t {
|
|
||||||
case TypeManifest:
|
|
||||||
return fmt.Sprintf("MANIFEST-%06d", f.num)
|
|
||||||
case TypeJournal:
|
|
||||||
return fmt.Sprintf("%06d.log", f.num)
|
|
||||||
case TypeTable:
|
|
||||||
return fmt.Sprintf("%06d.ldb", f.num)
|
|
||||||
case TypeTemp:
|
|
||||||
return fmt.Sprintf("%06d.tmp", f.num)
|
|
||||||
default:
|
|
||||||
panic("invalid file type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) path() string {
|
|
||||||
return filepath.Join(f.fs.path, f.name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) parse(name string) bool {
|
|
||||||
var num uint64
|
|
||||||
var tail string
|
|
||||||
_, err := fmt.Sscanf(name, "%d.%s", &num, &tail)
|
|
||||||
if err == nil {
|
|
||||||
switch tail {
|
|
||||||
case "log":
|
|
||||||
f.t = TypeJournal
|
|
||||||
case "ldb", "sst":
|
|
||||||
f.t = TypeTable
|
|
||||||
case "tmp":
|
|
||||||
f.t = TypeTemp
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
f.num = num
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail)
|
|
||||||
if n == 1 {
|
|
||||||
f.t = TypeManifest
|
|
||||||
f.num = num
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
203
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
generated
vendored
203
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
generated
vendored
@ -1,203 +0,0 @@
|
|||||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
const typeShift = 3
|
|
||||||
|
|
||||||
type memStorageLock struct {
|
|
||||||
ms *memStorage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lock *memStorageLock) Release() {
|
|
||||||
ms := lock.ms
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if ms.slock == lock {
|
|
||||||
ms.slock = nil
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// memStorage is a memory-backed storage.
|
|
||||||
type memStorage struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
slock *memStorageLock
|
|
||||||
files map[uint64]*memFile
|
|
||||||
manifest *memFilePtr
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemStorage returns a new memory-backed storage implementation.
|
|
||||||
func NewMemStorage() Storage {
|
|
||||||
return &memStorage{
|
|
||||||
files: make(map[uint64]*memFile),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *memStorage) Lock() (util.Releaser, error) {
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if ms.slock != nil {
|
|
||||||
return nil, ErrLocked
|
|
||||||
}
|
|
||||||
ms.slock = &memStorageLock{ms: ms}
|
|
||||||
return ms.slock, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*memStorage) Log(str string) {}
|
|
||||||
|
|
||||||
func (ms *memStorage) GetFile(num uint64, t FileType) File {
|
|
||||||
return &memFilePtr{ms: ms, num: num, t: t}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *memStorage) GetFiles(t FileType) ([]File, error) {
|
|
||||||
ms.mu.Lock()
|
|
||||||
var ff []File
|
|
||||||
for x, _ := range ms.files {
|
|
||||||
num, mt := x>>typeShift, FileType(x)&TypeAll
|
|
||||||
if mt&t == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt})
|
|
||||||
}
|
|
||||||
ms.mu.Unlock()
|
|
||||||
return ff, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *memStorage) GetManifest() (File, error) {
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if ms.manifest == nil {
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
return ms.manifest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *memStorage) SetManifest(f File) error {
|
|
||||||
fm, ok := f.(*memFilePtr)
|
|
||||||
if !ok || fm.t != TypeManifest {
|
|
||||||
return ErrInvalidFile
|
|
||||||
}
|
|
||||||
ms.mu.Lock()
|
|
||||||
ms.manifest = fm
|
|
||||||
ms.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*memStorage) Close() error { return nil }
|
|
||||||
|
|
||||||
type memReader struct {
|
|
||||||
*bytes.Reader
|
|
||||||
m *memFile
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mr *memReader) Close() error {
|
|
||||||
return mr.m.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type memFile struct {
|
|
||||||
bytes.Buffer
|
|
||||||
ms *memStorage
|
|
||||||
open bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*memFile) Sync() error { return nil }
|
|
||||||
func (m *memFile) Close() error {
|
|
||||||
m.ms.mu.Lock()
|
|
||||||
m.open = false
|
|
||||||
m.ms.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type memFilePtr struct {
|
|
||||||
ms *memStorage
|
|
||||||
num uint64
|
|
||||||
t FileType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) x() uint64 {
|
|
||||||
return p.Num()<<typeShift | uint64(p.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) Open() (Reader, error) {
|
|
||||||
ms := p.ms
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if m, exist := ms.files[p.x()]; exist {
|
|
||||||
if m.open {
|
|
||||||
return nil, errFileOpen
|
|
||||||
}
|
|
||||||
m.open = true
|
|
||||||
return &memReader{Reader: bytes.NewReader(m.Bytes()), m: m}, nil
|
|
||||||
}
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) Create() (Writer, error) {
|
|
||||||
ms := p.ms
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
m, exist := ms.files[p.x()]
|
|
||||||
if exist {
|
|
||||||
if m.open {
|
|
||||||
return nil, errFileOpen
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
} else {
|
|
||||||
m = &memFile{ms: ms}
|
|
||||||
ms.files[p.x()] = m
|
|
||||||
}
|
|
||||||
m.open = true
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) Replace(newfile File) error {
|
|
||||||
p1, ok := newfile.(*memFilePtr)
|
|
||||||
if !ok {
|
|
||||||
return ErrInvalidFile
|
|
||||||
}
|
|
||||||
ms := p.ms
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
m1, exist := ms.files[p1.x()]
|
|
||||||
if !exist {
|
|
||||||
return os.ErrNotExist
|
|
||||||
}
|
|
||||||
m0, exist := ms.files[p.x()]
|
|
||||||
if (exist && m0.open) || m1.open {
|
|
||||||
return errFileOpen
|
|
||||||
}
|
|
||||||
delete(ms.files, p1.x())
|
|
||||||
ms.files[p.x()] = m1
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) Type() FileType {
|
|
||||||
return p.t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) Num() uint64 {
|
|
||||||
return p.num
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *memFilePtr) Remove() error {
|
|
||||||
ms := p.ms
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if _, exist := ms.files[p.x()]; exist {
|
|
||||||
delete(ms.files, p.x())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return os.ErrNotExist
|
|
||||||
}
|
|
157
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
generated
vendored
157
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
generated
vendored
@ -1,157 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package storage provides storage abstraction for LevelDB.
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileType uint32
|
|
||||||
|
|
||||||
const (
|
|
||||||
TypeManifest FileType = 1 << iota
|
|
||||||
TypeJournal
|
|
||||||
TypeTable
|
|
||||||
TypeTemp
|
|
||||||
|
|
||||||
TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t FileType) String() string {
|
|
||||||
switch t {
|
|
||||||
case TypeManifest:
|
|
||||||
return "manifest"
|
|
||||||
case TypeJournal:
|
|
||||||
return "journal"
|
|
||||||
case TypeTable:
|
|
||||||
return "table"
|
|
||||||
case TypeTemp:
|
|
||||||
return "temp"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("<unknown:%d>", t)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument")
|
|
||||||
ErrLocked = errors.New("leveldb/storage: already locked")
|
|
||||||
ErrClosed = errors.New("leveldb/storage: closed")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Syncer is the interface that wraps basic Sync method.
|
|
||||||
type Syncer interface {
|
|
||||||
// Sync commits the current contents of the file to stable storage.
|
|
||||||
Sync() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader is the interface that groups the basic Read, Seek, ReadAt and Close
|
|
||||||
// methods.
|
|
||||||
type Reader interface {
|
|
||||||
io.ReadSeeker
|
|
||||||
io.ReaderAt
|
|
||||||
io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer is the interface that groups the basic Write, Sync and Close
|
|
||||||
// methods.
|
|
||||||
type Writer interface {
|
|
||||||
io.WriteCloser
|
|
||||||
Syncer
|
|
||||||
}
|
|
||||||
|
|
||||||
// File is the file. A file instance must be goroutine-safe.
|
|
||||||
type File interface {
|
|
||||||
// Open opens the file for read. Returns os.ErrNotExist error
|
|
||||||
// if the file does not exist.
|
|
||||||
// Returns ErrClosed if the underlying storage is closed.
|
|
||||||
Open() (r Reader, err error)
|
|
||||||
|
|
||||||
// Create creates the file for writting. Truncate the file if
|
|
||||||
// already exist.
|
|
||||||
// Returns ErrClosed if the underlying storage is closed.
|
|
||||||
Create() (w Writer, err error)
|
|
||||||
|
|
||||||
// Replace replaces file with newfile.
|
|
||||||
// Returns ErrClosed if the underlying storage is closed.
|
|
||||||
Replace(newfile File) error
|
|
||||||
|
|
||||||
// Type returns the file type
|
|
||||||
Type() FileType
|
|
||||||
|
|
||||||
// Num returns the file number.
|
|
||||||
Num() uint64
|
|
||||||
|
|
||||||
// Remove removes the file.
|
|
||||||
// Returns ErrClosed if the underlying storage is closed.
|
|
||||||
Remove() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storage is the storage. A storage instance must be goroutine-safe.
|
|
||||||
type Storage interface {
|
|
||||||
// Lock locks the storage. Any subsequent attempt to call Lock will fail
|
|
||||||
// until the last lock released.
|
|
||||||
// After use the caller should call the Release method.
|
|
||||||
Lock() (l util.Releaser, err error)
|
|
||||||
|
|
||||||
// Log logs a string. This is used for logging. An implementation
|
|
||||||
// may write to a file, stdout or simply do nothing.
|
|
||||||
Log(str string)
|
|
||||||
|
|
||||||
// GetFile returns a file for the given number and type. GetFile will never
|
|
||||||
// returns nil, even if the underlying storage is closed.
|
|
||||||
GetFile(num uint64, t FileType) File
|
|
||||||
|
|
||||||
// GetFiles returns a slice of files that match the given file types.
|
|
||||||
// The file types may be OR'ed together.
|
|
||||||
GetFiles(t FileType) ([]File, error)
|
|
||||||
|
|
||||||
// GetManifest returns a manifest file. Returns os.ErrNotExist if manifest
|
|
||||||
// file does not exist.
|
|
||||||
GetManifest() (File, error)
|
|
||||||
|
|
||||||
// SetManifest sets the given file as manifest file. The given file should
|
|
||||||
// be a manifest file type or error will be returned.
|
|
||||||
SetManifest(f File) error
|
|
||||||
|
|
||||||
// Close closes the storage. It is valid to call Close multiple times.
|
|
||||||
// Other methods should not be called after the storage has been closed.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileInfo wraps basic file info.
|
|
||||||
type FileInfo struct {
|
|
||||||
Type FileType
|
|
||||||
Num uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fi FileInfo) String() string {
|
|
||||||
switch fi.Type {
|
|
||||||
case TypeManifest:
|
|
||||||
return fmt.Sprintf("MANIFEST-%06d", fi.Num)
|
|
||||||
case TypeJournal:
|
|
||||||
return fmt.Sprintf("%06d.log", fi.Num)
|
|
||||||
case TypeTable:
|
|
||||||
return fmt.Sprintf("%06d.ldb", fi.Num)
|
|
||||||
case TypeTemp:
|
|
||||||
return fmt.Sprintf("%06d.tmp", fi.Num)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%#x-%d", fi.Type, fi.Num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFileInfo creates new FileInfo from the given File. It will returns nil
|
|
||||||
// if File is nil.
|
|
||||||
func NewFileInfo(f File) *FileInfo {
|
|
||||||
if f == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &FileInfo{f.Type(), f.Num()}
|
|
||||||
}
|
|
539
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
539
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
@ -1,539 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
const typeShift = 4
|
|
||||||
|
|
||||||
var (
|
|
||||||
tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument")
|
|
||||||
tsErrFileOpen = errors.New("leveldb.testStorage: file still open")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
|
|
||||||
tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR")
|
|
||||||
tsKeepFS = tsFSEnv == "2"
|
|
||||||
tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
|
|
||||||
tsMU = &sync.Mutex{}
|
|
||||||
tsNum = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
type tsOp uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
tsOpOpen tsOp = iota
|
|
||||||
tsOpCreate
|
|
||||||
tsOpRead
|
|
||||||
tsOpReadAt
|
|
||||||
tsOpWrite
|
|
||||||
tsOpSync
|
|
||||||
|
|
||||||
tsOpNum
|
|
||||||
)
|
|
||||||
|
|
||||||
type tsLock struct {
|
|
||||||
ts *testStorage
|
|
||||||
r util.Releaser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l tsLock) Release() {
|
|
||||||
l.r.Release()
|
|
||||||
l.ts.t.Log("I: storage lock released")
|
|
||||||
}
|
|
||||||
|
|
||||||
type tsReader struct {
|
|
||||||
tf tsFile
|
|
||||||
storage.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr tsReader) Read(b []byte) (n int, err error) {
|
|
||||||
ts := tr.tf.ts
|
|
||||||
ts.countRead(tr.tf.Type())
|
|
||||||
if tr.tf.shouldErrLocked(tsOpRead) {
|
|
||||||
return 0, errors.New("leveldb.testStorage: emulated read error")
|
|
||||||
}
|
|
||||||
n, err = tr.Reader.Read(b)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) {
|
|
||||||
ts := tr.tf.ts
|
|
||||||
ts.countRead(tr.tf.Type())
|
|
||||||
if tr.tf.shouldErrLocked(tsOpReadAt) {
|
|
||||||
return 0, errors.New("leveldb.testStorage: emulated readAt error")
|
|
||||||
}
|
|
||||||
n, err = tr.Reader.ReadAt(b, off)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr tsReader) Close() (err error) {
|
|
||||||
err = tr.Reader.Close()
|
|
||||||
tr.tf.close("reader", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type tsWriter struct {
|
|
||||||
tf tsFile
|
|
||||||
storage.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tw tsWriter) Write(b []byte) (n int, err error) {
|
|
||||||
if tw.tf.shouldErrLocked(tsOpWrite) {
|
|
||||||
return 0, errors.New("leveldb.testStorage: emulated write error")
|
|
||||||
}
|
|
||||||
n, err = tw.Writer.Write(b)
|
|
||||||
if err != nil {
|
|
||||||
tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tw tsWriter) Sync() (err error) {
|
|
||||||
ts := tw.tf.ts
|
|
||||||
ts.mu.Lock()
|
|
||||||
for ts.emuDelaySync&tw.tf.Type() != 0 {
|
|
||||||
ts.cond.Wait()
|
|
||||||
}
|
|
||||||
ts.mu.Unlock()
|
|
||||||
if tw.tf.shouldErrLocked(tsOpSync) {
|
|
||||||
return errors.New("leveldb.testStorage: emulated sync error")
|
|
||||||
}
|
|
||||||
err = tw.Writer.Sync()
|
|
||||||
if err != nil {
|
|
||||||
tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tw tsWriter) Close() (err error) {
|
|
||||||
err = tw.Writer.Close()
|
|
||||||
tw.tf.close("writer", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type tsFile struct {
|
|
||||||
ts *testStorage
|
|
||||||
storage.File
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) x() uint64 {
|
|
||||||
return tf.Num()<<typeShift | uint64(tf.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) shouldErr(op tsOp) bool {
|
|
||||||
return tf.ts.shouldErr(tf, op)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) shouldErrLocked(op tsOp) bool {
|
|
||||||
tf.ts.mu.Lock()
|
|
||||||
defer tf.ts.mu.Unlock()
|
|
||||||
return tf.shouldErr(op)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) checkOpen(m string) error {
|
|
||||||
ts := tf.ts
|
|
||||||
if writer, ok := ts.opens[tf.x()]; ok {
|
|
||||||
if writer {
|
|
||||||
ts.t.Errorf("E: cannot %s file, num=%d type=%v: a writer still open", m, tf.Num(), tf.Type())
|
|
||||||
} else {
|
|
||||||
ts.t.Errorf("E: cannot %s file, num=%d type=%v: a reader still open", m, tf.Num(), tf.Type())
|
|
||||||
}
|
|
||||||
return tsErrFileOpen
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) close(m string, err error) {
|
|
||||||
ts := tf.ts
|
|
||||||
ts.mu.Lock()
|
|
||||||
defer ts.mu.Unlock()
|
|
||||||
if _, ok := ts.opens[tf.x()]; !ok {
|
|
||||||
ts.t.Errorf("E: %s: redudant file closing, num=%d type=%v", m, tf.Num(), tf.Type())
|
|
||||||
} else if err == nil {
|
|
||||||
ts.t.Logf("I: %s: file closed, num=%d type=%v", m, tf.Num(), tf.Type())
|
|
||||||
}
|
|
||||||
delete(ts.opens, tf.x())
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: %s: cannot close file, num=%d type=%v: %v", m, tf.Num(), tf.Type(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) Open() (r storage.Reader, err error) {
|
|
||||||
ts := tf.ts
|
|
||||||
ts.mu.Lock()
|
|
||||||
defer ts.mu.Unlock()
|
|
||||||
err = tf.checkOpen("open")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if tf.shouldErr(tsOpOpen) {
|
|
||||||
err = errors.New("leveldb.testStorage: emulated open error")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r, err = tf.File.Open()
|
|
||||||
if err != nil {
|
|
||||||
if ts.ignoreOpenErr&tf.Type() != 0 {
|
|
||||||
ts.t.Logf("I: cannot open file, num=%d type=%v: %v (ignored)", tf.Num(), tf.Type(), err)
|
|
||||||
} else {
|
|
||||||
ts.t.Errorf("E: cannot open file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ts.t.Logf("I: file opened, num=%d type=%v", tf.Num(), tf.Type())
|
|
||||||
ts.opens[tf.x()] = false
|
|
||||||
r = tsReader{tf, r}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) Create() (w storage.Writer, err error) {
|
|
||||||
ts := tf.ts
|
|
||||||
ts.mu.Lock()
|
|
||||||
defer ts.mu.Unlock()
|
|
||||||
err = tf.checkOpen("create")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if tf.shouldErr(tsOpCreate) {
|
|
||||||
err = errors.New("leveldb.testStorage: emulated create error")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w, err = tf.File.Create()
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: cannot create file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
|
||||||
} else {
|
|
||||||
ts.t.Logf("I: file created, num=%d type=%v", tf.Num(), tf.Type())
|
|
||||||
ts.opens[tf.x()] = true
|
|
||||||
w = tsWriter{tf, w}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) Replace(newfile storage.File) (err error) {
|
|
||||||
ts := tf.ts
|
|
||||||
ts.mu.Lock()
|
|
||||||
defer ts.mu.Unlock()
|
|
||||||
err = tf.checkOpen("replace")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = tf.File.Replace(newfile.(tsFile).File)
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
|
||||||
} else {
|
|
||||||
ts.t.Logf("I: file replace, num=%d type=%v", tf.Num(), tf.Type())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf tsFile) Remove() (err error) {
|
|
||||||
ts := tf.ts
|
|
||||||
ts.mu.Lock()
|
|
||||||
defer ts.mu.Unlock()
|
|
||||||
err = tf.checkOpen("remove")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = tf.File.Remove()
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
|
||||||
} else {
|
|
||||||
ts.t.Logf("I: file removed, num=%d type=%v", tf.Num(), tf.Type())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type testStorage struct {
|
|
||||||
t *testing.T
|
|
||||||
storage.Storage
|
|
||||||
closeFn func() error
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
cond sync.Cond
|
|
||||||
// Open files, true=writer, false=reader
|
|
||||||
opens map[uint64]bool
|
|
||||||
emuDelaySync storage.FileType
|
|
||||||
ignoreOpenErr storage.FileType
|
|
||||||
readCnt uint64
|
|
||||||
readCntEn storage.FileType
|
|
||||||
|
|
||||||
emuErr [tsOpNum]storage.FileType
|
|
||||||
emuErrOnce [tsOpNum]storage.FileType
|
|
||||||
emuRandErr [tsOpNum]storage.FileType
|
|
||||||
emuRandErrProb int
|
|
||||||
emuErrOnceMap map[uint64]uint
|
|
||||||
emuRandRand *rand.Rand
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) shouldErr(tf tsFile, op tsOp) bool {
|
|
||||||
if ts.emuErr[op]&tf.Type() != 0 {
|
|
||||||
return true
|
|
||||||
} else if ts.emuRandErr[op]&tf.Type() != 0 || ts.emuErrOnce[op]&tf.Type() != 0 {
|
|
||||||
sop := uint(1) << op
|
|
||||||
eop := ts.emuErrOnceMap[tf.x()]
|
|
||||||
if eop&sop == 0 && (ts.emuRandRand.Int()%ts.emuRandErrProb == 0 || ts.emuErrOnce[op]&tf.Type() != 0) {
|
|
||||||
ts.emuErrOnceMap[tf.x()] = eop | sop
|
|
||||||
ts.t.Logf("I: emulated error: file=%d type=%v op=%v", tf.Num(), tf.Type(), op)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetEmuErr(t storage.FileType, ops ...tsOp) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
for _, op := range ops {
|
|
||||||
ts.emuErr[op] = t
|
|
||||||
}
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetEmuErrOnce(t storage.FileType, ops ...tsOp) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
for _, op := range ops {
|
|
||||||
ts.emuErrOnce[op] = t
|
|
||||||
}
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetEmuRandErr(t storage.FileType, ops ...tsOp) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
for _, op := range ops {
|
|
||||||
ts.emuRandErr[op] = t
|
|
||||||
}
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetEmuRandErrProb(prob int) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
ts.emuRandErrProb = prob
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) DelaySync(t storage.FileType) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
ts.emuDelaySync |= t
|
|
||||||
ts.cond.Broadcast()
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) ReleaseSync(t storage.FileType) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
ts.emuDelaySync &= ^t
|
|
||||||
ts.cond.Broadcast()
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) ReadCounter() uint64 {
|
|
||||||
ts.mu.Lock()
|
|
||||||
defer ts.mu.Unlock()
|
|
||||||
return ts.readCnt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) ResetReadCounter() {
|
|
||||||
ts.mu.Lock()
|
|
||||||
ts.readCnt = 0
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetReadCounter(t storage.FileType) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
ts.readCntEn = t
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) countRead(t storage.FileType) {
|
|
||||||
ts.mu.Lock()
|
|
||||||
if ts.readCntEn&t != 0 {
|
|
||||||
ts.readCnt++
|
|
||||||
}
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetIgnoreOpenErr(t storage.FileType) {
|
|
||||||
ts.ignoreOpenErr = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) Lock() (r util.Releaser, err error) {
|
|
||||||
r, err = ts.Storage.Lock()
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Logf("W: storage locking failed: %v", err)
|
|
||||||
} else {
|
|
||||||
ts.t.Log("I: storage locked")
|
|
||||||
r = tsLock{ts, r}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) Log(str string) {
|
|
||||||
ts.t.Log("L: " + str)
|
|
||||||
ts.Storage.Log(str)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) GetFile(num uint64, t storage.FileType) storage.File {
|
|
||||||
return tsFile{ts, ts.Storage.GetFile(num, t)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) GetFiles(t storage.FileType) (ff []storage.File, err error) {
|
|
||||||
ff0, err := ts.Storage.GetFiles(t)
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: get files failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ff = make([]storage.File, len(ff0))
|
|
||||||
for i, f := range ff0 {
|
|
||||||
ff[i] = tsFile{ts, f}
|
|
||||||
}
|
|
||||||
ts.t.Logf("I: get files, type=0x%x count=%d", int(t), len(ff))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) GetManifest() (f storage.File, err error) {
|
|
||||||
f0, err := ts.Storage.GetManifest()
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
ts.t.Errorf("E: get manifest failed: %v", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f = tsFile{ts, f0}
|
|
||||||
ts.t.Logf("I: get manifest, num=%d", f.Num())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) SetManifest(f storage.File) error {
|
|
||||||
tf, ok := f.(tsFile)
|
|
||||||
if !ok {
|
|
||||||
ts.t.Error("E: set manifest failed: type assertion failed")
|
|
||||||
return tsErrInvalidFile
|
|
||||||
} else if tf.Type() != storage.TypeManifest {
|
|
||||||
ts.t.Errorf("E: set manifest failed: invalid file type: %s", tf.Type())
|
|
||||||
return tsErrInvalidFile
|
|
||||||
}
|
|
||||||
err := ts.Storage.SetManifest(tf.File)
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: set manifest failed: %v", err)
|
|
||||||
} else {
|
|
||||||
ts.t.Logf("I: set manifest, num=%d", tf.Num())
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) Close() error {
|
|
||||||
ts.CloseCheck()
|
|
||||||
err := ts.Storage.Close()
|
|
||||||
if err != nil {
|
|
||||||
ts.t.Errorf("E: closing storage failed: %v", err)
|
|
||||||
} else {
|
|
||||||
ts.t.Log("I: storage closed")
|
|
||||||
}
|
|
||||||
if ts.closeFn != nil {
|
|
||||||
if err := ts.closeFn(); err != nil {
|
|
||||||
ts.t.Errorf("E: close function: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testStorage) CloseCheck() {
|
|
||||||
ts.mu.Lock()
|
|
||||||
if len(ts.opens) == 0 {
|
|
||||||
ts.t.Log("I: all files are closed")
|
|
||||||
} else {
|
|
||||||
ts.t.Errorf("E: %d files still open", len(ts.opens))
|
|
||||||
for x, writer := range ts.opens {
|
|
||||||
num, tt := x>>typeShift, storage.FileType(x)&storage.TypeAll
|
|
||||||
ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ts.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestStorage(t *testing.T) *testStorage {
|
|
||||||
var stor storage.Storage
|
|
||||||
var closeFn func() error
|
|
||||||
if tsFS {
|
|
||||||
for {
|
|
||||||
tsMU.Lock()
|
|
||||||
num := tsNum
|
|
||||||
tsNum++
|
|
||||||
tsMU.Unlock()
|
|
||||||
tempdir := tsTempdir
|
|
||||||
if tempdir == "" {
|
|
||||||
tempdir = os.TempDir()
|
|
||||||
}
|
|
||||||
path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
|
|
||||||
if _, err := os.Stat(path); err != nil {
|
|
||||||
stor, err = storage.OpenFile(path)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("F: cannot create storage: %v", err)
|
|
||||||
}
|
|
||||||
t.Logf("I: storage created: %s", path)
|
|
||||||
closeFn = func() error {
|
|
||||||
for _, name := range []string{"LOG.old", "LOG"} {
|
|
||||||
f, err := os.Open(filepath.Join(path, name))
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if log, err := ioutil.ReadAll(f); err != nil {
|
|
||||||
t.Logf("---------------------- %s ----------------------", name)
|
|
||||||
t.Logf("cannot read log: %v", err)
|
|
||||||
t.Logf("---------------------- %s ----------------------", name)
|
|
||||||
} else if len(log) > 0 {
|
|
||||||
t.Logf("---------------------- %s ----------------------\n%s", name, string(log))
|
|
||||||
t.Logf("---------------------- %s ----------------------", name)
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
if t.Failed() {
|
|
||||||
t.Logf("testing failed, test DB preserved at %s", path)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if tsKeepFS {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return os.RemoveAll(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stor = storage.NewMemStorage()
|
|
||||||
}
|
|
||||||
ts := &testStorage{
|
|
||||||
t: t,
|
|
||||||
Storage: stor,
|
|
||||||
closeFn: closeFn,
|
|
||||||
opens: make(map[uint64]bool),
|
|
||||||
emuErrOnceMap: make(map[uint64]uint),
|
|
||||||
emuRandErrProb: 0x999,
|
|
||||||
emuRandRand: rand.New(rand.NewSource(0xfacedead)),
|
|
||||||
}
|
|
||||||
ts.cond.L = &ts.mu
|
|
||||||
return ts
|
|
||||||
}
|
|
187
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
generated
vendored
187
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
generated
vendored
@ -1,187 +0,0 @@
|
|||||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
|
|
||||||
if rnd == nil {
|
|
||||||
rnd = NewRand()
|
|
||||||
}
|
|
||||||
|
|
||||||
if p == nil {
|
|
||||||
BeforeEach(func() {
|
|
||||||
p = setup(kv)
|
|
||||||
})
|
|
||||||
if teardown != nil {
|
|
||||||
AfterEach(func() {
|
|
||||||
teardown(p)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
It("Should find all keys with Find", func() {
|
|
||||||
if db, ok := p.(Find); ok {
|
|
||||||
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
|
|
||||||
key_, key, value := kv.IndexInexact(i)
|
|
||||||
|
|
||||||
// Using exact key.
|
|
||||||
rkey, rvalue, err := db.TestFind(key)
|
|
||||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
|
|
||||||
Expect(rkey).Should(Equal(key), "Key")
|
|
||||||
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
|
|
||||||
|
|
||||||
// Using inexact key.
|
|
||||||
rkey, rvalue, err = db.TestFind(key_)
|
|
||||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
|
|
||||||
Expect(rkey).Should(Equal(key))
|
|
||||||
Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should return error if the key is not present", func() {
|
|
||||||
if db, ok := p.(Find); ok {
|
|
||||||
var key []byte
|
|
||||||
if kv.Len() > 0 {
|
|
||||||
key_, _ := kv.Index(kv.Len() - 1)
|
|
||||||
key = BytesAfter(key_)
|
|
||||||
}
|
|
||||||
rkey, _, err := db.TestFind(key)
|
|
||||||
Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
|
|
||||||
Expect(err).Should(Equal(errors.ErrNotFound))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should only find exact key with Get", func() {
|
|
||||||
if db, ok := p.(Get); ok {
|
|
||||||
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
|
|
||||||
key_, key, value := kv.IndexInexact(i)
|
|
||||||
|
|
||||||
// Using exact key.
|
|
||||||
rvalue, err := db.TestGet(key)
|
|
||||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
|
|
||||||
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
|
|
||||||
|
|
||||||
// Using inexact key.
|
|
||||||
if len(key_) > 0 {
|
|
||||||
_, err = db.TestGet(key_)
|
|
||||||
Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
|
|
||||||
Expect(err).Should(Equal(errors.ErrNotFound))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
It("Should only find present key with Has", func() {
|
|
||||||
if db, ok := p.(Has); ok {
|
|
||||||
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
|
|
||||||
key_, key, _ := kv.IndexInexact(i)
|
|
||||||
|
|
||||||
// Using exact key.
|
|
||||||
ret, err := db.TestHas(key)
|
|
||||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
|
|
||||||
Expect(ret).Should(BeTrue(), "False for key %q", key)
|
|
||||||
|
|
||||||
// Using inexact key.
|
|
||||||
if len(key_) > 0 {
|
|
||||||
ret, err = db.TestHas(key_)
|
|
||||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
|
|
||||||
Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
TestIter := func(r *util.Range, _kv KeyValue) {
|
|
||||||
if db, ok := p.(NewIterator); ok {
|
|
||||||
iter := db.TestNewIterator(r)
|
|
||||||
Expect(iter.Error()).ShouldNot(HaveOccurred())
|
|
||||||
|
|
||||||
t := IteratorTesting{
|
|
||||||
KeyValue: _kv,
|
|
||||||
Iter: iter,
|
|
||||||
}
|
|
||||||
|
|
||||||
DoIteratorTesting(&t)
|
|
||||||
iter.Release()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
It("Should iterates and seeks correctly", func(done Done) {
|
|
||||||
TestIter(nil, kv.Clone())
|
|
||||||
done <- true
|
|
||||||
}, 3.0)
|
|
||||||
|
|
||||||
RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
|
|
||||||
type slice struct {
|
|
||||||
r *util.Range
|
|
||||||
start, limit int
|
|
||||||
}
|
|
||||||
|
|
||||||
key_, _, _ := kv.IndexInexact(i)
|
|
||||||
for _, x := range []slice{
|
|
||||||
{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
|
|
||||||
{&util.Range{Start: nil, Limit: key_}, 0, i},
|
|
||||||
} {
|
|
||||||
It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
|
|
||||||
TestIter(x.r, kv.Slice(x.start, x.limit))
|
|
||||||
done <- true
|
|
||||||
}, 3.0)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
|
|
||||||
It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
|
|
||||||
r := kv.Range(start, limit)
|
|
||||||
TestIter(&r, kv.Slice(start, limit))
|
|
||||||
done <- true
|
|
||||||
}, 3.0)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
|
|
||||||
Test := func(kv *KeyValue) func() {
|
|
||||||
return func() {
|
|
||||||
var p DB
|
|
||||||
if setup != nil {
|
|
||||||
Defer("setup", func() {
|
|
||||||
p = setup(*kv)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if teardown != nil {
|
|
||||||
Defer("teardown", func() {
|
|
||||||
teardown(p)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if body != nil {
|
|
||||||
p = body(*kv)
|
|
||||||
}
|
|
||||||
KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
|
|
||||||
return p
|
|
||||||
}, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Describe("with no key/value (empty)", Test(&KeyValue{}))
|
|
||||||
Describe("with empty key", Test(KeyValue_EmptyKey()))
|
|
||||||
Describe("with empty value", Test(KeyValue_EmptyValue()))
|
|
||||||
Describe("with one key/value", Test(KeyValue_OneKeyValue()))
|
|
||||||
Describe("with big value", Test(KeyValue_BigValue()))
|
|
||||||
Describe("with special key", Test(KeyValue_SpecialKey()))
|
|
||||||
Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
|
|
||||||
Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
|
|
||||||
}
|
|
586
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
generated
vendored
586
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
generated
vendored
@ -1,586 +0,0 @@
|
|||||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
storageMu sync.Mutex
|
|
||||||
storageUseFS bool = true
|
|
||||||
storageKeepFS bool = false
|
|
||||||
storageNum int
|
|
||||||
)
|
|
||||||
|
|
||||||
type StorageMode int
|
|
||||||
|
|
||||||
const (
|
|
||||||
ModeOpen StorageMode = 1 << iota
|
|
||||||
ModeCreate
|
|
||||||
ModeRemove
|
|
||||||
ModeRead
|
|
||||||
ModeWrite
|
|
||||||
ModeSync
|
|
||||||
ModeClose
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
modeOpen = iota
|
|
||||||
modeCreate
|
|
||||||
modeRemove
|
|
||||||
modeRead
|
|
||||||
modeWrite
|
|
||||||
modeSync
|
|
||||||
modeClose
|
|
||||||
|
|
||||||
modeCount
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
typeManifest = iota
|
|
||||||
typeJournal
|
|
||||||
typeTable
|
|
||||||
typeTemp
|
|
||||||
|
|
||||||
typeCount
|
|
||||||
)
|
|
||||||
|
|
||||||
const flattenCount = modeCount * typeCount
|
|
||||||
|
|
||||||
func flattenType(m StorageMode, t storage.FileType) int {
|
|
||||||
var x int
|
|
||||||
switch m {
|
|
||||||
case ModeOpen:
|
|
||||||
x = modeOpen
|
|
||||||
case ModeCreate:
|
|
||||||
x = modeCreate
|
|
||||||
case ModeRemove:
|
|
||||||
x = modeRemove
|
|
||||||
case ModeRead:
|
|
||||||
x = modeRead
|
|
||||||
case ModeWrite:
|
|
||||||
x = modeWrite
|
|
||||||
case ModeSync:
|
|
||||||
x = modeSync
|
|
||||||
case ModeClose:
|
|
||||||
x = modeClose
|
|
||||||
default:
|
|
||||||
panic("invalid storage mode")
|
|
||||||
}
|
|
||||||
x *= typeCount
|
|
||||||
switch t {
|
|
||||||
case storage.TypeManifest:
|
|
||||||
return x + typeManifest
|
|
||||||
case storage.TypeJournal:
|
|
||||||
return x + typeJournal
|
|
||||||
case storage.TypeTable:
|
|
||||||
return x + typeTable
|
|
||||||
case storage.TypeTemp:
|
|
||||||
return x + typeTemp
|
|
||||||
default:
|
|
||||||
panic("invalid file type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func listFlattenType(m StorageMode, t storage.FileType) []int {
|
|
||||||
ret := make([]int, 0, flattenCount)
|
|
||||||
add := func(x int) {
|
|
||||||
x *= typeCount
|
|
||||||
switch {
|
|
||||||
case t&storage.TypeManifest != 0:
|
|
||||||
ret = append(ret, x+typeManifest)
|
|
||||||
case t&storage.TypeJournal != 0:
|
|
||||||
ret = append(ret, x+typeJournal)
|
|
||||||
case t&storage.TypeTable != 0:
|
|
||||||
ret = append(ret, x+typeTable)
|
|
||||||
case t&storage.TypeTemp != 0:
|
|
||||||
ret = append(ret, x+typeTemp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case m&ModeOpen != 0:
|
|
||||||
add(modeOpen)
|
|
||||||
case m&ModeCreate != 0:
|
|
||||||
add(modeCreate)
|
|
||||||
case m&ModeRemove != 0:
|
|
||||||
add(modeRemove)
|
|
||||||
case m&ModeRead != 0:
|
|
||||||
add(modeRead)
|
|
||||||
case m&ModeWrite != 0:
|
|
||||||
add(modeWrite)
|
|
||||||
case m&ModeSync != 0:
|
|
||||||
add(modeSync)
|
|
||||||
case m&ModeClose != 0:
|
|
||||||
add(modeClose)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func packFile(num uint64, t storage.FileType) uint64 {
|
|
||||||
if num>>(64-typeCount) != 0 {
|
|
||||||
panic("overflow")
|
|
||||||
}
|
|
||||||
return num<<typeCount | uint64(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func unpackFile(x uint64) (uint64, storage.FileType) {
|
|
||||||
return x >> typeCount, storage.FileType(x) & storage.TypeAll
|
|
||||||
}
|
|
||||||
|
|
||||||
type emulatedError struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err emulatedError) Error() string {
|
|
||||||
return fmt.Sprintf("emulated storage error: %v", err.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
type storageLock struct {
|
|
||||||
s *Storage
|
|
||||||
r util.Releaser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l storageLock) Release() {
|
|
||||||
l.r.Release()
|
|
||||||
l.s.logI("storage lock released")
|
|
||||||
}
|
|
||||||
|
|
||||||
type reader struct {
|
|
||||||
f *file
|
|
||||||
storage.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *reader) Read(p []byte) (n int, err error) {
|
|
||||||
err = r.f.s.emulateError(ModeRead, r.f.Type())
|
|
||||||
if err == nil {
|
|
||||||
r.f.s.stall(ModeRead, r.f.Type())
|
|
||||||
n, err = r.Reader.Read(p)
|
|
||||||
}
|
|
||||||
r.f.s.count(ModeRead, r.f.Type(), n)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *reader) ReadAt(p []byte, off int64) (n int, err error) {
|
|
||||||
err = r.f.s.emulateError(ModeRead, r.f.Type())
|
|
||||||
if err == nil {
|
|
||||||
r.f.s.stall(ModeRead, r.f.Type())
|
|
||||||
n, err = r.Reader.ReadAt(p, off)
|
|
||||||
}
|
|
||||||
r.f.s.count(ModeRead, r.f.Type(), n)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *reader) Close() (err error) {
|
|
||||||
return r.f.doClose(r.Reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
type writer struct {
|
|
||||||
f *file
|
|
||||||
storage.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Write(p []byte) (n int, err error) {
|
|
||||||
err = w.f.s.emulateError(ModeWrite, w.f.Type())
|
|
||||||
if err == nil {
|
|
||||||
w.f.s.stall(ModeWrite, w.f.Type())
|
|
||||||
n, err = w.Writer.Write(p)
|
|
||||||
}
|
|
||||||
w.f.s.count(ModeWrite, w.f.Type(), n)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Sync() (err error) {
|
|
||||||
err = w.f.s.emulateError(ModeSync, w.f.Type())
|
|
||||||
if err == nil {
|
|
||||||
w.f.s.stall(ModeSync, w.f.Type())
|
|
||||||
err = w.Writer.Sync()
|
|
||||||
}
|
|
||||||
w.f.s.count(ModeSync, w.f.Type(), 0)
|
|
||||||
if err != nil {
|
|
||||||
w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Close() (err error) {
|
|
||||||
return w.f.doClose(w.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
type file struct {
|
|
||||||
s *Storage
|
|
||||||
storage.File
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) pack() uint64 {
|
|
||||||
return packFile(f.Num(), f.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) assertOpen() {
|
|
||||||
ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) doClose(closer io.Closer) (err error) {
|
|
||||||
err = f.s.emulateError(ModeClose, f.Type())
|
|
||||||
if err == nil {
|
|
||||||
f.s.stall(ModeClose, f.Type())
|
|
||||||
}
|
|
||||||
f.s.mu.Lock()
|
|
||||||
defer f.s.mu.Unlock()
|
|
||||||
if err == nil {
|
|
||||||
ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type())
|
|
||||||
err = closer.Close()
|
|
||||||
}
|
|
||||||
f.s.countNB(ModeClose, f.Type(), 0)
|
|
||||||
writer := f.s.opens[f.pack()]
|
|
||||||
if err != nil {
|
|
||||||
f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err)
|
|
||||||
} else {
|
|
||||||
f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer)
|
|
||||||
delete(f.s.opens, f.pack())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Open() (r storage.Reader, err error) {
|
|
||||||
err = f.s.emulateError(ModeOpen, f.Type())
|
|
||||||
if err == nil {
|
|
||||||
f.s.stall(ModeOpen, f.Type())
|
|
||||||
}
|
|
||||||
f.s.mu.Lock()
|
|
||||||
defer f.s.mu.Unlock()
|
|
||||||
if err == nil {
|
|
||||||
f.assertOpen()
|
|
||||||
f.s.countNB(ModeOpen, f.Type(), 0)
|
|
||||||
r, err = f.File.Open()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
|
|
||||||
} else {
|
|
||||||
f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type())
|
|
||||||
f.s.opens[f.pack()] = false
|
|
||||||
r = &reader{f, r}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Create() (w storage.Writer, err error) {
|
|
||||||
err = f.s.emulateError(ModeCreate, f.Type())
|
|
||||||
if err == nil {
|
|
||||||
f.s.stall(ModeCreate, f.Type())
|
|
||||||
}
|
|
||||||
f.s.mu.Lock()
|
|
||||||
defer f.s.mu.Unlock()
|
|
||||||
if err == nil {
|
|
||||||
f.assertOpen()
|
|
||||||
f.s.countNB(ModeCreate, f.Type(), 0)
|
|
||||||
w, err = f.File.Create()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
|
|
||||||
} else {
|
|
||||||
f.s.logI("file created, num=%d type=%v", f.Num(), f.Type())
|
|
||||||
f.s.opens[f.pack()] = true
|
|
||||||
w = &writer{f, w}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Remove() (err error) {
|
|
||||||
err = f.s.emulateError(ModeRemove, f.Type())
|
|
||||||
if err == nil {
|
|
||||||
f.s.stall(ModeRemove, f.Type())
|
|
||||||
}
|
|
||||||
f.s.mu.Lock()
|
|
||||||
defer f.s.mu.Unlock()
|
|
||||||
if err == nil {
|
|
||||||
f.assertOpen()
|
|
||||||
f.s.countNB(ModeRemove, f.Type(), 0)
|
|
||||||
err = f.File.Remove()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
|
|
||||||
} else {
|
|
||||||
f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type Storage struct {
|
|
||||||
storage.Storage
|
|
||||||
closeFn func() error
|
|
||||||
|
|
||||||
lmu sync.Mutex
|
|
||||||
lb bytes.Buffer
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
// Open files, true=writer, false=reader
|
|
||||||
opens map[uint64]bool
|
|
||||||
counters [flattenCount]int
|
|
||||||
bytesCounter [flattenCount]int64
|
|
||||||
emulatedError [flattenCount]error
|
|
||||||
stallCond sync.Cond
|
|
||||||
stalled [flattenCount]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) log(skip int, str string) {
|
|
||||||
s.lmu.Lock()
|
|
||||||
defer s.lmu.Unlock()
|
|
||||||
_, file, line, ok := runtime.Caller(skip + 2)
|
|
||||||
if ok {
|
|
||||||
// Truncate file name at last file name separator.
|
|
||||||
if index := strings.LastIndex(file, "/"); index >= 0 {
|
|
||||||
file = file[index+1:]
|
|
||||||
} else if index = strings.LastIndex(file, "\\"); index >= 0 {
|
|
||||||
file = file[index+1:]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
file = "???"
|
|
||||||
line = 1
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&s.lb, "%s:%d: ", file, line)
|
|
||||||
lines := strings.Split(str, "\n")
|
|
||||||
if l := len(lines); l > 1 && lines[l-1] == "" {
|
|
||||||
lines = lines[:l-1]
|
|
||||||
}
|
|
||||||
for i, line := range lines {
|
|
||||||
if i > 0 {
|
|
||||||
s.lb.WriteString("\n\t")
|
|
||||||
}
|
|
||||||
s.lb.WriteString(line)
|
|
||||||
}
|
|
||||||
s.lb.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) logISkip(skip int, format string, args ...interface{}) {
|
|
||||||
pc, _, _, ok := runtime.Caller(skip + 1)
|
|
||||||
if ok {
|
|
||||||
if f := runtime.FuncForPC(pc); f != nil {
|
|
||||||
fname := f.Name()
|
|
||||||
if index := strings.LastIndex(fname, "."); index >= 0 {
|
|
||||||
fname = fname[index+1:]
|
|
||||||
}
|
|
||||||
format = fname + ": " + format
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.log(skip+1, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) logI(format string, args ...interface{}) {
|
|
||||||
s.logISkip(1, format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Log(str string) {
|
|
||||||
s.log(1, "Log: "+str)
|
|
||||||
s.Storage.Log(str)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Lock() (r util.Releaser, err error) {
|
|
||||||
r, err = s.Storage.Lock()
|
|
||||||
if err != nil {
|
|
||||||
s.logI("storage locking failed, err=%v", err)
|
|
||||||
} else {
|
|
||||||
s.logI("storage locked")
|
|
||||||
r = storageLock{s, r}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File {
|
|
||||||
return &file{s, s.Storage.GetFile(num, t)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) {
|
|
||||||
rfiles, err := s.Storage.GetFiles(t)
|
|
||||||
if err != nil {
|
|
||||||
s.logI("get files failed, err=%v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
files = make([]storage.File, len(rfiles))
|
|
||||||
for i, f := range rfiles {
|
|
||||||
files[i] = &file{s, f}
|
|
||||||
}
|
|
||||||
s.logI("get files, type=0x%x count=%d", int(t), len(files))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) GetManifest() (f storage.File, err error) {
|
|
||||||
manifest, err := s.Storage.GetManifest()
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
s.logI("get manifest failed, err=%v", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.logI("get manifest, num=%d", manifest.Num())
|
|
||||||
return &file{s, manifest}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) SetManifest(f storage.File) error {
|
|
||||||
f_, ok := f.(*file)
|
|
||||||
ExpectWithOffset(1, ok).To(BeTrue())
|
|
||||||
ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest))
|
|
||||||
err := s.Storage.SetManifest(f_.File)
|
|
||||||
if err != nil {
|
|
||||||
s.logI("set manifest failed, err=%v", err)
|
|
||||||
} else {
|
|
||||||
s.logI("set manifest, num=%d", f_.Num())
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) openFiles() string {
|
|
||||||
out := "Open files:"
|
|
||||||
for x, writer := range s.opens {
|
|
||||||
num, t := unpackFile(x)
|
|
||||||
out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Close() error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
|
|
||||||
err := s.Storage.Close()
|
|
||||||
if err != nil {
|
|
||||||
s.logI("storage closing failed, err=%v", err)
|
|
||||||
} else {
|
|
||||||
s.logI("storage closed")
|
|
||||||
}
|
|
||||||
if s.closeFn != nil {
|
|
||||||
if err1 := s.closeFn(); err1 != nil {
|
|
||||||
s.logI("close func error, err=%v", err1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) {
|
|
||||||
s.counters[flattenType(m, t)]++
|
|
||||||
s.bytesCounter[flattenType(m, t)] += int64(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) count(m StorageMode, t storage.FileType, n int) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.countNB(m, t, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) {
|
|
||||||
for _, x := range listFlattenType(m, t) {
|
|
||||||
s.counters[x] = 0
|
|
||||||
s.bytesCounter[x] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) {
|
|
||||||
for _, x := range listFlattenType(m, t) {
|
|
||||||
count += s.counters[x]
|
|
||||||
bytes += s.bytesCounter[x]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) emulateError(m StorageMode, t storage.FileType) error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
err := s.emulatedError[flattenType(m, t)]
|
|
||||||
if err != nil {
|
|
||||||
return emulatedError{err}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
for _, x := range listFlattenType(m, t) {
|
|
||||||
s.emulatedError[x] = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) stall(m StorageMode, t storage.FileType) {
|
|
||||||
x := flattenType(m, t)
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
for s.stalled[x] {
|
|
||||||
s.stallCond.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Stall(m StorageMode, t storage.FileType) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
for _, x := range listFlattenType(m, t) {
|
|
||||||
s.stalled[x] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Release(m StorageMode, t storage.FileType) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
for _, x := range listFlattenType(m, t) {
|
|
||||||
s.stalled[x] = false
|
|
||||||
}
|
|
||||||
s.stallCond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStorage() *Storage {
|
|
||||||
var stor storage.Storage
|
|
||||||
var closeFn func() error
|
|
||||||
if storageUseFS {
|
|
||||||
for {
|
|
||||||
storageMu.Lock()
|
|
||||||
num := storageNum
|
|
||||||
storageNum++
|
|
||||||
storageMu.Unlock()
|
|
||||||
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
|
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
|
||||||
stor, err = storage.OpenFile(path)
|
|
||||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path)
|
|
||||||
closeFn = func() error {
|
|
||||||
if storageKeepFS {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return os.RemoveAll(path)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stor = storage.NewMemStorage()
|
|
||||||
}
|
|
||||||
s := &Storage{
|
|
||||||
Storage: stor,
|
|
||||||
closeFn: closeFn,
|
|
||||||
opens: make(map[uint64]bool),
|
|
||||||
}
|
|
||||||
s.stallCond.L = &s.mu
|
|
||||||
return s
|
|
||||||
}
|
|
258
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
generated
vendored
258
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
generated
vendored
@ -1,258 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package snappy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// We limit how far copy back-references can go, the same as the C++ code.
|
|
||||||
const maxOffset = 1 << 15
|
|
||||||
|
|
||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
|
||||||
func emitLiteral(dst, lit []byte) int {
|
|
||||||
i, n := 0, uint(len(lit)-1)
|
|
||||||
switch {
|
|
||||||
case n < 60:
|
|
||||||
dst[0] = uint8(n)<<2 | tagLiteral
|
|
||||||
i = 1
|
|
||||||
case n < 1<<8:
|
|
||||||
dst[0] = 60<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
i = 2
|
|
||||||
case n < 1<<16:
|
|
||||||
dst[0] = 61<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
dst[2] = uint8(n >> 8)
|
|
||||||
i = 3
|
|
||||||
case n < 1<<24:
|
|
||||||
dst[0] = 62<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
dst[2] = uint8(n >> 8)
|
|
||||||
dst[3] = uint8(n >> 16)
|
|
||||||
i = 4
|
|
||||||
case int64(n) < 1<<32:
|
|
||||||
dst[0] = 63<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
dst[2] = uint8(n >> 8)
|
|
||||||
dst[3] = uint8(n >> 16)
|
|
||||||
dst[4] = uint8(n >> 24)
|
|
||||||
i = 5
|
|
||||||
default:
|
|
||||||
panic("snappy: source buffer is too long")
|
|
||||||
}
|
|
||||||
if copy(dst[i:], lit) != len(lit) {
|
|
||||||
panic("snappy: destination buffer is too short")
|
|
||||||
}
|
|
||||||
return i + len(lit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
|
||||||
func emitCopy(dst []byte, offset, length int) int {
|
|
||||||
i := 0
|
|
||||||
for length > 0 {
|
|
||||||
x := length - 4
|
|
||||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
|
||||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
|
||||||
dst[i+1] = uint8(offset)
|
|
||||||
i += 2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
x = length
|
|
||||||
if x > 1<<6 {
|
|
||||||
x = 1 << 6
|
|
||||||
}
|
|
||||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
|
||||||
dst[i+1] = uint8(offset)
|
|
||||||
dst[i+2] = uint8(offset >> 8)
|
|
||||||
i += 3
|
|
||||||
length -= x
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
|
||||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
|
||||||
// Otherwise, a newly allocated slice will be returned.
|
|
||||||
// It is valid to pass a nil dst.
|
|
||||||
func Encode(dst, src []byte) ([]byte, error) {
|
|
||||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
|
||||||
dst = make([]byte, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
|
||||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
|
||||||
|
|
||||||
// Return early if src is short.
|
|
||||||
if len(src) <= 4 {
|
|
||||||
if len(src) != 0 {
|
|
||||||
d += emitLiteral(dst[d:], src)
|
|
||||||
}
|
|
||||||
return dst[:d], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
|
||||||
const maxTableSize = 1 << 14
|
|
||||||
shift, tableSize := uint(32-8), 1<<8
|
|
||||||
for tableSize < maxTableSize && tableSize < len(src) {
|
|
||||||
shift--
|
|
||||||
tableSize *= 2
|
|
||||||
}
|
|
||||||
var table [maxTableSize]int
|
|
||||||
|
|
||||||
// Iterate over the source bytes.
|
|
||||||
var (
|
|
||||||
s int // The iterator position.
|
|
||||||
t int // The last position with the same hash as s.
|
|
||||||
lit int // The start position of any pending literal bytes.
|
|
||||||
)
|
|
||||||
for s+3 < len(src) {
|
|
||||||
// Update the hash table.
|
|
||||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
|
||||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
|
||||||
p := &table[(h*0x1e35a7bd)>>shift]
|
|
||||||
// We need to to store values in [-1, inf) in table. To save
|
|
||||||
// some initialization time, (re)use the table's zero value
|
|
||||||
// and shift the values against this zero: add 1 on writes,
|
|
||||||
// subtract 1 on reads.
|
|
||||||
t, *p = *p-1, s+1
|
|
||||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
|
||||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
|
||||||
s++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
|
||||||
if lit != s {
|
|
||||||
d += emitLiteral(dst[d:], src[lit:s])
|
|
||||||
}
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
s0 := s
|
|
||||||
s, t = s+4, t+4
|
|
||||||
for s < len(src) && src[s] == src[t] {
|
|
||||||
s++
|
|
||||||
t++
|
|
||||||
}
|
|
||||||
// Emit the copied bytes.
|
|
||||||
d += emitCopy(dst[d:], s-t, s-s0)
|
|
||||||
lit = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit any final pending literal bytes and return.
|
|
||||||
if lit != len(src) {
|
|
||||||
d += emitLiteral(dst[d:], src[lit:])
|
|
||||||
}
|
|
||||||
return dst[:d], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
|
||||||
// uncompressed length.
|
|
||||||
func MaxEncodedLen(srcLen int) int {
|
|
||||||
// Compressed data can be defined as:
|
|
||||||
// compressed := item* literal*
|
|
||||||
// item := literal* copy
|
|
||||||
//
|
|
||||||
// The trailing literal sequence has a space blowup of at most 62/60
|
|
||||||
// since a literal of length 60 needs one tag byte + one extra byte
|
|
||||||
// for length information.
|
|
||||||
//
|
|
||||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
|
||||||
// 4 bytes of data. Because of a special check in the encoding code,
|
|
||||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
|
||||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
|
||||||
// to at most the 62/60 blowup for representing literals.
|
|
||||||
//
|
|
||||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
|
||||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
|
||||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
|
||||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
|
||||||
//
|
|
||||||
// This last factor dominates the blowup, so the final estimate is:
|
|
||||||
return 32 + srcLen + srcLen/6
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter returns a new Writer that compresses to w, using the framing
|
|
||||||
// format described at
|
|
||||||
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
|
|
||||||
func NewWriter(w io.Writer) *Writer {
|
|
||||||
return &Writer{
|
|
||||||
w: w,
|
|
||||||
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer is an io.Writer than can write Snappy-compressed bytes.
|
|
||||||
type Writer struct {
|
|
||||||
w io.Writer
|
|
||||||
err error
|
|
||||||
enc []byte
|
|
||||||
buf [checksumSize + chunkHeaderSize]byte
|
|
||||||
wroteHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
|
||||||
// w. This permits reusing a Writer rather than allocating a new one.
|
|
||||||
func (w *Writer) Reset(writer io.Writer) {
|
|
||||||
w.w = writer
|
|
||||||
w.err = nil
|
|
||||||
w.wroteHeader = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write satisfies the io.Writer interface.
|
|
||||||
func (w *Writer) Write(p []byte) (n int, errRet error) {
|
|
||||||
if w.err != nil {
|
|
||||||
return 0, w.err
|
|
||||||
}
|
|
||||||
if !w.wroteHeader {
|
|
||||||
copy(w.enc, magicChunk)
|
|
||||||
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
w.wroteHeader = true
|
|
||||||
}
|
|
||||||
for len(p) > 0 {
|
|
||||||
var uncompressed []byte
|
|
||||||
if len(p) > maxUncompressedChunkLen {
|
|
||||||
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
|
|
||||||
} else {
|
|
||||||
uncompressed, p = p, nil
|
|
||||||
}
|
|
||||||
checksum := crc(uncompressed)
|
|
||||||
|
|
||||||
// Compress the buffer, discarding the result if the improvement
|
|
||||||
// isn't at least 12.5%.
|
|
||||||
chunkType := uint8(chunkTypeCompressedData)
|
|
||||||
chunkBody, err := Encode(w.enc, uncompressed)
|
|
||||||
if err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
|
|
||||||
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkLen := 4 + len(chunkBody)
|
|
||||||
w.buf[0] = chunkType
|
|
||||||
w.buf[1] = uint8(chunkLen >> 0)
|
|
||||||
w.buf[2] = uint8(chunkLen >> 8)
|
|
||||||
w.buf[3] = uint8(chunkLen >> 16)
|
|
||||||
w.buf[4] = uint8(checksum >> 0)
|
|
||||||
w.buf[5] = uint8(checksum >> 8)
|
|
||||||
w.buf[6] = uint8(checksum >> 16)
|
|
||||||
w.buf[7] = uint8(checksum >> 24)
|
|
||||||
if _, err = w.w.Write(w.buf[:]); err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if _, err = w.w.Write(chunkBody); err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n += len(uncompressed)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
364
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
generated
vendored
364
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
generated
vendored
@ -1,364 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package snappy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
|
|
||||||
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
|
|
||||||
)
|
|
||||||
|
|
||||||
func roundtrip(b, ebuf, dbuf []byte) error {
|
|
||||||
e, err := Encode(ebuf, b)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encoding error: %v", err)
|
|
||||||
}
|
|
||||||
d, err := Decode(dbuf, e)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("decoding error: %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(b, d) {
|
|
||||||
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmpty(t *testing.T) {
|
|
||||||
if err := roundtrip(nil, nil, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSmallCopy(t *testing.T) {
|
|
||||||
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
|
||||||
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
|
||||||
for i := 0; i < 32; i++ {
|
|
||||||
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
|
|
||||||
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
|
|
||||||
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSmallRand(t *testing.T) {
|
|
||||||
rng := rand.New(rand.NewSource(27354294))
|
|
||||||
for n := 1; n < 20000; n += 23 {
|
|
||||||
b := make([]byte, n)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = uint8(rng.Uint32())
|
|
||||||
}
|
|
||||||
if err := roundtrip(b, nil, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSmallRegular(t *testing.T) {
|
|
||||||
for n := 1; n < 20000; n += 23 {
|
|
||||||
b := make([]byte, n)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = uint8(i%10 + 'a')
|
|
||||||
}
|
|
||||||
if err := roundtrip(b, nil, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmp(a, b []byte) error {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
|
|
||||||
}
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFramingFormat(t *testing.T) {
|
|
||||||
// src is comprised of alternating 1e5-sized sequences of random
|
|
||||||
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
|
|
||||||
// because it is larger than maxUncompressedChunkLen (64k).
|
|
||||||
src := make([]byte, 1e6)
|
|
||||||
rng := rand.New(rand.NewSource(1))
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
if i%2 == 0 {
|
|
||||||
for j := 0; j < 1e5; j++ {
|
|
||||||
src[1e5*i+j] = uint8(rng.Intn(256))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for j := 0; j < 1e5; j++ {
|
|
||||||
src[1e5*i+j] = uint8(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if _, err := NewWriter(buf).Write(src); err != nil {
|
|
||||||
t.Fatalf("Write: encoding: %v", err)
|
|
||||||
}
|
|
||||||
dst, err := ioutil.ReadAll(NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ReadAll: decoding: %v", err)
|
|
||||||
}
|
|
||||||
if err := cmp(dst, src); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReaderReset(t *testing.T) {
|
|
||||||
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if _, err := NewWriter(buf).Write(gold); err != nil {
|
|
||||||
t.Fatalf("Write: %v", err)
|
|
||||||
}
|
|
||||||
encoded, invalid, partial := buf.String(), "invalid", "partial"
|
|
||||||
r := NewReader(nil)
|
|
||||||
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
|
|
||||||
if s == partial {
|
|
||||||
r.Reset(strings.NewReader(encoded))
|
|
||||||
if _, err := r.Read(make([]byte, 101)); err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r.Reset(strings.NewReader(s))
|
|
||||||
got, err := ioutil.ReadAll(r)
|
|
||||||
switch s {
|
|
||||||
case encoded:
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := cmp(got, gold); err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case invalid:
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("#%d: got nil error, want non-nil", i)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriterReset(t *testing.T) {
|
|
||||||
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
|
|
||||||
var gots, wants [][]byte
|
|
||||||
const n = 20
|
|
||||||
w, failed := NewWriter(nil), false
|
|
||||||
for i := 0; i <= n; i++ {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
w.Reset(buf)
|
|
||||||
want := gold[:len(gold)*i/n]
|
|
||||||
if _, err := w.Write(want); err != nil {
|
|
||||||
t.Errorf("#%d: Write: %v", i, err)
|
|
||||||
failed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
got, err := ioutil.ReadAll(NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("#%d: ReadAll: %v", i, err)
|
|
||||||
failed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
gots = append(gots, got)
|
|
||||||
wants = append(wants, want)
|
|
||||||
}
|
|
||||||
if failed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := range gots {
|
|
||||||
if err := cmp(gots[i], wants[i]); err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchDecode(b *testing.B, src []byte) {
|
|
||||||
encoded, err := Encode(nil, src)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
// Bandwidth is in amount of uncompressed data.
|
|
||||||
b.SetBytes(int64(len(src)))
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Decode(src, encoded)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchEncode(b *testing.B, src []byte) {
|
|
||||||
// Bandwidth is in amount of uncompressed data.
|
|
||||||
b.SetBytes(int64(len(src)))
|
|
||||||
dst := make([]byte, MaxEncodedLen(len(src)))
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Encode(dst, src)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func readFile(b testing.TB, filename string) []byte {
|
|
||||||
src, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed reading %s: %s", filename, err)
|
|
||||||
}
|
|
||||||
if len(src) == 0 {
|
|
||||||
b.Fatalf("%s has zero length", filename)
|
|
||||||
}
|
|
||||||
return src
|
|
||||||
}
|
|
||||||
|
|
||||||
// expand returns a slice of length n containing repeated copies of src.
|
|
||||||
func expand(src []byte, n int) []byte {
|
|
||||||
dst := make([]byte, n)
|
|
||||||
for x := dst; len(x) > 0; {
|
|
||||||
i := copy(x, src)
|
|
||||||
x = x[i:]
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchWords(b *testing.B, n int, decode bool) {
|
|
||||||
// Note: the file is OS-language dependent so the resulting values are not
|
|
||||||
// directly comparable for non-US-English OS installations.
|
|
||||||
data := expand(readFile(b, "/usr/share/dict/words"), n)
|
|
||||||
if decode {
|
|
||||||
benchDecode(b, data)
|
|
||||||
} else {
|
|
||||||
benchEncode(b, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
|
|
||||||
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
|
|
||||||
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
|
|
||||||
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
|
|
||||||
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
|
|
||||||
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
|
|
||||||
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
|
|
||||||
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
|
|
||||||
|
|
||||||
// testFiles' values are copied directly from
|
|
||||||
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
|
|
||||||
// The label field is unused in snappy-go.
|
|
||||||
var testFiles = []struct {
|
|
||||||
label string
|
|
||||||
filename string
|
|
||||||
}{
|
|
||||||
{"html", "html"},
|
|
||||||
{"urls", "urls.10K"},
|
|
||||||
{"jpg", "fireworks.jpeg"},
|
|
||||||
{"jpg_200", "fireworks.jpeg"},
|
|
||||||
{"pdf", "paper-100k.pdf"},
|
|
||||||
{"html4", "html_x_4"},
|
|
||||||
{"txt1", "alice29.txt"},
|
|
||||||
{"txt2", "asyoulik.txt"},
|
|
||||||
{"txt3", "lcet10.txt"},
|
|
||||||
{"txt4", "plrabn12.txt"},
|
|
||||||
{"pb", "geo.protodata"},
|
|
||||||
{"gaviota", "kppkn.gtb"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// The test data files are present at this canonical URL.
|
|
||||||
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
|
|
||||||
|
|
||||||
func downloadTestdata(basename string) (errRet error) {
|
|
||||||
filename := filepath.Join(*testdata, basename)
|
|
||||||
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !*download {
|
|
||||||
return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
|
|
||||||
}
|
|
||||||
// Download the official snappy C++ implementation reference test data
|
|
||||||
// files for benchmarking.
|
|
||||||
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
|
|
||||||
return fmt.Errorf("failed to create testdata: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Create(filename)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create %s: %s", filename, err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
defer func() {
|
|
||||||
if errRet != nil {
|
|
||||||
os.Remove(filename)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
url := baseURL + basename
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to download %s: %s", url, err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if s := resp.StatusCode; s != http.StatusOK {
|
|
||||||
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
|
|
||||||
}
|
|
||||||
_, err = io.Copy(f, resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchFile(b *testing.B, n int, decode bool) {
|
|
||||||
if err := downloadTestdata(testFiles[n].filename); err != nil {
|
|
||||||
b.Fatalf("failed to download testdata: %s", err)
|
|
||||||
}
|
|
||||||
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
|
|
||||||
if decode {
|
|
||||||
benchDecode(b, data)
|
|
||||||
} else {
|
|
||||||
benchEncode(b, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Naming convention is kept similar to what snappy's C++ implementation uses.
|
|
||||||
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
|
|
||||||
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
|
|
||||||
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
|
|
||||||
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
|
|
||||||
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
|
|
||||||
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
|
|
||||||
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
|
|
||||||
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
|
|
||||||
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
|
|
||||||
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
|
|
||||||
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
|
|
||||||
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
|
|
||||||
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
|
|
||||||
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
|
|
||||||
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
|
|
||||||
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
|
|
||||||
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
|
|
||||||
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
|
|
||||||
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
|
|
||||||
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
|
|
||||||
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
|
|
||||||
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
|
|
||||||
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
|
|
||||||
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
|
|
22
Godeps/_workspace/src/github.com/willf/bitset/.gitignore
generated
vendored
22
Godeps/_workspace/src/github.com/willf/bitset/.gitignore
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
10
Godeps/_workspace/src/github.com/willf/bitset/CHANGELOG
generated
vendored
10
Godeps/_workspace/src/github.com/willf/bitset/CHANGELOG
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
2014-02-14 Version 1.0.0 💗
|
|
||||||
|
|
||||||
- First 'released' version
|
|
||||||
- includes various fixes by:
|
|
||||||
- Daniel Lemire
|
|
||||||
- Todd Vierling
|
|
||||||
- Zellyn Hunter
|
|
||||||
- Seyi Ogunyemi
|
|
||||||
- Simon Menke
|
|
||||||
- Cenk Altı
|
|
44
Godeps/_workspace/src/github.com/willf/bitset/README.md
generated
vendored
44
Godeps/_workspace/src/github.com/willf/bitset/README.md
generated
vendored
@ -1,44 +0,0 @@
|
|||||||
Package bitset implements bitsets, a mapping
|
|
||||||
between non-negative integers and boolean values. It should be more
|
|
||||||
efficient than map[uint] bool.
|
|
||||||
|
|
||||||
It provides methods for setting, clearing, flipping, and testing
|
|
||||||
individual integers.
|
|
||||||
|
|
||||||
But it also provides set intersection, union, difference,
|
|
||||||
complement, and symmetric operations, as well as tests to
|
|
||||||
check whether any, all, or no bits are set, and querying a
|
|
||||||
bitset's current length and number of postive bits.
|
|
||||||
|
|
||||||
BitSets are expanded to the size of the largest set bit; the
|
|
||||||
memory allocation is approximately Max bits, where Max is
|
|
||||||
the largest set bit. BitSets are never shrunk. On creation,
|
|
||||||
a hint can be given for the number of bits that will be used.
|
|
||||||
|
|
||||||
Many of the methods, including Set, Clear, and Flip, return
|
|
||||||
a BitSet pointer, which allows for chaining.
|
|
||||||
|
|
||||||
Example use:
|
|
||||||
|
|
||||||
import "bitset"
|
|
||||||
var b BitSet
|
|
||||||
b.Set(10).Set(11)
|
|
||||||
if b.Test(1000) {
|
|
||||||
b.Clear(1000)
|
|
||||||
}
|
|
||||||
for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {
|
|
||||||
frmt.Println("The following bit is set:",i);
|
|
||||||
}
|
|
||||||
if B.Intersection(bitset.New(100).Set(10)).Count() > 1 {
|
|
||||||
fmt.Println("Intersection works.")
|
|
||||||
}
|
|
||||||
|
|
||||||
As an alternative to BitSets, one should check out the 'big' package,
|
|
||||||
which provides a (less set-theoretical) view of bitsets.
|
|
||||||
|
|
||||||
Discussions golang-nuts Google Group:
|
|
||||||
|
|
||||||
* [Revised BitSet](https://groups.google.com/forum/#!topic/golang-nuts/5i3l0CXDiBg)
|
|
||||||
* [simple bitset?](https://groups.google.com/d/topic/golang-nuts/7n1VkRTlBf4/discussion)
|
|
||||||
|
|
||||||
Godoc documentation is at: https://godoc.org/github.com/willf/bitset
|
|
2
Godeps/_workspace/src/golang.org/x/net/http2/.gitignore
generated
vendored
2
Godeps/_workspace/src/golang.org/x/net/http2/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
*~
|
|
||||||
h2i/h2i
|
|
5
Godeps/_workspace/src/golang.org/x/net/http2/h2demo/.gitignore
generated
vendored
5
Godeps/_workspace/src/golang.org/x/net/http2/h2demo/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
h2demo
|
|
||||||
h2demo.linux
|
|
||||||
client-id.dat
|
|
||||||
client-secret.dat
|
|
||||||
token.dat
|
|
23
Godeps/_workspace/src/golang.org/x/text/unicode/norm/Makefile
generated
vendored
23
Godeps/_workspace/src/golang.org/x/text/unicode/norm/Makefile
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
# Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
maketables: maketables.go triegen.go
|
|
||||||
go build $^
|
|
||||||
|
|
||||||
normregtest: normregtest.go
|
|
||||||
go build $^
|
|
||||||
|
|
||||||
tables: maketables
|
|
||||||
./maketables > tables.go
|
|
||||||
gofmt -w tables.go
|
|
||||||
|
|
||||||
# Downloads from www.unicode.org, so not part
|
|
||||||
# of standard test scripts.
|
|
||||||
test: testtables regtest
|
|
||||||
|
|
||||||
testtables: maketables
|
|
||||||
./maketables -test > data_test.go && go test -tags=test
|
|
||||||
|
|
||||||
regtest: normregtest
|
|
||||||
./normregtest
|
|
7549
Godeps/_workspace/src/golang.org/x/text/unicode/norm/tables.go
generated
vendored
7549
Godeps/_workspace/src/golang.org/x/text/unicode/norm/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
6
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore
generated
vendored
6
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
# Setup a Global .gitignore for OS and editor generated files:
|
|
||||||
# https://help.github.com/articles/ignoring-files
|
|
||||||
# git config --global core.excludesfile ~/.gitignore_global
|
|
||||||
|
|
||||||
.vagrant
|
|
||||||
*.sublime-project
|
|
23
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml
generated
vendored
23
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.5.2
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- go get -u github.com/golang/lint/golint
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v --race ./...
|
|
||||||
|
|
||||||
after_script:
|
|
||||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
|
||||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
|
||||||
- go vet ./...
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email: false
|
|
@ -4,7 +4,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
)
|
)
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
)
|
)
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/assets"
|
"github.com/khlieng/dispatch/assets"
|
||||||
"github.com/khlieng/dispatch/server"
|
"github.com/khlieng/dispatch/server"
|
||||||
|
@ -7,8 +7,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/jpillora/backoff"
|
"github.com/jpillora/backoff"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/matryer/resync"
|
"github.com/matryer/resync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testClient() *Client {
|
func testClient() *Client {
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ircd *mockIrcd
|
var ircd *mockIrcd
|
||||||
|
@ -3,7 +3,7 @@ package irc
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseMessage(t *testing.T) {
|
func TestParseMessage(t *testing.T) {
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/xenolf/lego/acme"
|
"github.com/xenolf/lego/acme"
|
||||||
)
|
)
|
||||||
|
|
||||||
const URL = "https://acme-v01.api.letsencrypt.org/directory"
|
const URL = "https://acme-v01.api.letsencrypt.org/directory"
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/xenolf/lego/acme"
|
"github.com/xenolf/lego/acme"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultUser = "default"
|
const defaultUser = "default"
|
||||||
|
@ -4,8 +4,8 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/xenolf/lego/acme"
|
"github.com/xenolf/lego/acme"
|
||||||
)
|
)
|
||||||
|
|
||||||
func tempdir() string {
|
func tempdir() string {
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/dgrijalva/jwt-go"
|
"github.com/dgrijalva/jwt-go"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
)
|
)
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
)
|
)
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/irc"
|
"github.com/khlieng/dispatch/irc"
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/irc"
|
"github.com/khlieng/dispatch/irc"
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/assets"
|
"github.com/khlieng/dispatch/assets"
|
||||||
)
|
)
|
||||||
|
@ -8,9 +8,9 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/letsencrypt"
|
"github.com/khlieng/dispatch/letsencrypt"
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
|
@ -7,7 +7,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
)
|
)
|
||||||
|
|
||||||
func listenAndServeTLS(srv *http.Server) error {
|
func listenAndServeTLS(srv *http.Server) error {
|
||||||
|
@ -3,7 +3,7 @@ package server
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
)
|
)
|
||||||
|
|
||||||
type wsConn struct {
|
type wsConn struct {
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/irc"
|
"github.com/khlieng/dispatch/irc"
|
||||||
"github.com/khlieng/dispatch/storage"
|
"github.com/khlieng/dispatch/storage"
|
||||||
|
@ -3,7 +3,7 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetSetUsers(t *testing.T) {
|
func TestGetSetUsers(t *testing.T) {
|
||||||
|
@ -3,7 +3,7 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultDirectory() string {
|
func DefaultDirectory() string {
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/boltdb/bolt"
|
"github.com/boltdb/bolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -7,8 +7,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve"
|
"github.com/blevesearch/bleve"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/boltdb/bolt"
|
"github.com/boltdb/bolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
type User struct {
|
type User struct {
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve"
|
"github.com/blevesearch/bleve"
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/boltdb/bolt"
|
"github.com/boltdb/bolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Message struct {
|
type Message struct {
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func tempdir() string {
|
func tempdir() string {
|
||||||
|
61
vendor/github.com/BurntSushi/toml/_examples/example.go
generated
vendored
Normal file
61
vendor/github.com/BurntSushi/toml/_examples/example.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
)
|
||||||
|
|
||||||
|
type tomlConfig struct {
|
||||||
|
Title string
|
||||||
|
Owner ownerInfo
|
||||||
|
DB database `toml:"database"`
|
||||||
|
Servers map[string]server
|
||||||
|
Clients clients
|
||||||
|
}
|
||||||
|
|
||||||
|
type ownerInfo struct {
|
||||||
|
Name string
|
||||||
|
Org string `toml:"organization"`
|
||||||
|
Bio string
|
||||||
|
DOB time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type database struct {
|
||||||
|
Server string
|
||||||
|
Ports []int
|
||||||
|
ConnMax int `toml:"connection_max"`
|
||||||
|
Enabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type server struct {
|
||||||
|
IP string
|
||||||
|
DC string
|
||||||
|
}
|
||||||
|
|
||||||
|
type clients struct {
|
||||||
|
Data [][]interface{}
|
||||||
|
Hosts []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var config tomlConfig
|
||||||
|
if _, err := toml.DecodeFile("example.toml", &config); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Title: %s\n", config.Title)
|
||||||
|
fmt.Printf("Owner: %s (%s, %s), Born: %s\n",
|
||||||
|
config.Owner.Name, config.Owner.Org, config.Owner.Bio,
|
||||||
|
config.Owner.DOB)
|
||||||
|
fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n",
|
||||||
|
config.DB.Server, config.DB.Ports, config.DB.ConnMax,
|
||||||
|
config.DB.Enabled)
|
||||||
|
for serverName, server := range config.Servers {
|
||||||
|
fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC)
|
||||||
|
}
|
||||||
|
fmt.Printf("Client data: %v\n", config.Clients.Data)
|
||||||
|
fmt.Printf("Client hosts: %v\n", config.Clients.Hosts)
|
||||||
|
}
|
35
vendor/github.com/BurntSushi/toml/_examples/example.toml
generated
vendored
Normal file
35
vendor/github.com/BurntSushi/toml/_examples/example.toml
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# This is a TOML document. Boom.
|
||||||
|
|
||||||
|
title = "TOML Example"
|
||||||
|
|
||||||
|
[owner]
|
||||||
|
name = "Tom Preston-Werner"
|
||||||
|
organization = "GitHub"
|
||||||
|
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||||
|
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||||
|
|
||||||
|
[database]
|
||||||
|
server = "192.168.1.1"
|
||||||
|
ports = [ 8001, 8001, 8002 ]
|
||||||
|
connection_max = 5000
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[servers]
|
||||||
|
|
||||||
|
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||||
|
[servers.alpha]
|
||||||
|
ip = "10.0.0.1"
|
||||||
|
dc = "eqdc10"
|
||||||
|
|
||||||
|
[servers.beta]
|
||||||
|
ip = "10.0.0.2"
|
||||||
|
dc = "eqdc10"
|
||||||
|
|
||||||
|
[clients]
|
||||||
|
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||||
|
|
||||||
|
# Line breaks are OK when inside arrays
|
||||||
|
hosts = [
|
||||||
|
"alpha",
|
||||||
|
"omega"
|
||||||
|
]
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user