Update all dependencies

This commit is contained in:
Ken-Håvard Lieng 2018-08-31 03:57:19 +02:00
parent 628dc66685
commit 47bc78b80a
365 changed files with 37935 additions and 16866 deletions

File diff suppressed because one or more lines are too long

View File

@ -9,22 +9,22 @@
"not op_mini all"
],
"devDependencies": {
"@babel/core": "^7.0.0-rc.2",
"@babel/plugin-proposal-class-properties": "^7.0.0-rc.2",
"@babel/plugin-proposal-export-default-from": "^7.0.0-rc.2",
"@babel/plugin-proposal-export-namespace-from": "^7.0.0-rc.2",
"@babel/plugin-transform-react-constant-elements": "^7.0.0-rc.2",
"@babel/plugin-transform-react-inline-elements": "^7.0.0-rc.2",
"@babel/preset-env": "^7.0.0-rc.2",
"@babel/preset-react": "^7.0.0-rc.2",
"autoprefixer": "^9.1.2",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-class-properties": "^7.0.0",
"@babel/plugin-proposal-export-default-from": "^7.0.0",
"@babel/plugin-proposal-export-namespace-from": "^7.0.0",
"@babel/plugin-transform-react-constant-elements": "^7.0.0",
"@babel/plugin-transform-react-inline-elements": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-react": "^7.0.0",
"autoprefixer": "^9.1.3",
"babel-core": "^7.0.0-0",
"babel-eslint": "^8.2.6",
"babel-eslint": "^9.0.0",
"babel-jest": "^23.4.2",
"babel-loader": "^8.0.0-beta",
"babel-loader": "^8.0.0",
"brotli": "^1.3.1",
"css-loader": "^1.0.0",
"cssnano": "^4.0.5",
"cssnano": "^4.1.0",
"del": "^3.0.0",
"eslint": "^5.4.0",
"eslint-config-airbnb": "^16.1.0",
@ -44,10 +44,10 @@
"postcss-flexbugs-fixes": "^4.1.0",
"postcss-loader": "^3.0.0",
"prettier": "1.14.2",
"style-loader": "^0.22.1",
"style-loader": "^0.23.0",
"through2": "^2.0.3",
"webpack": "^4.17.0",
"webpack-dev-middleware": "^3.0.1",
"webpack": "^4.17.1",
"webpack-dev-middleware": "^3.2.0",
"webpack-hot-middleware": "^2.22.3"
},
"dependencies": {
@ -57,14 +57,14 @@
"classnames": "^2.2.6",
"es6-promise": "^4.2.4",
"fontfaceobserver": "^2.0.9",
"formik": "1.0.3",
"formik": "1.1.1",
"history": "4.5.1",
"immer": "^1.5.0",
"js-cookie": "^2.1.4",
"lodash": "^4.17.10",
"react": "^16.4.2",
"react-dom": "^16.4.2",
"react-hot-loader": "^4.3.4",
"react-hot-loader": "^4.3.5",
"react-redux": "^5.0.2",
"react-virtualized": "^9.20.1",
"redux": "^4.0.0",

File diff suppressed because it is too large Load Diff

75
go.mod
View File

@ -1,48 +1,71 @@
module github.com/khlieng/dispatch
require (
github.com/RoaringBitmap/roaring v0.0.0-20180423145726-e4aafc3cbc16 // indirect
github.com/BurntSushi/toml v0.3.0 // indirect
github.com/RoaringBitmap/roaring v0.4.16 // indirect
github.com/blevesearch/bleve v0.0.0-20180525174403-1d6d47ed3ad9
github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect
github.com/blevesearch/go-porterstemmer v0.0.0-20141230013033-23a2c8e5cf1f // indirect
github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect
github.com/boltdb/bolt v0.0.0-20180302180052-fd01fc79c553
github.com/couchbase/vellum v0.0.0-20180427141700-eb6ae3743b3f // indirect
github.com/davecgh/go-spew v1.1.0 // indirect
github.com/dsnet/compress v0.0.0-20160704023633-b9aab3c6a04e
github.com/couchbase/vellum v0.0.0-20180822133609-0ceea4a37442 // indirect
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07 // indirect
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369 // indirect
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 // indirect
github.com/dsnet/compress v0.0.0-20171208185109-cc9eb1d7ad76
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 // indirect
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/fsnotify/fsnotify v1.4.7
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd // indirect
github.com/golang/protobuf v1.1.0 // indirect
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect
github.com/gorilla/websocket v0.0.0-20180420171612-21ab95fa12b9
github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce // indirect
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect
github.com/golang/protobuf v1.2.0 // indirect
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect
github.com/gorilla/websocket v1.3.0
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hpcloud/tail v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jpillora/backoff v0.0.0-20170222002228-06c7a16c845d
github.com/kjk/betterguid v0.0.0-20150214055115-c70aca50d858
github.com/magiconair/properties v0.0.0-20180217134545-2c9e95027885 // indirect
github.com/mailru/easyjson v0.0.0-20180323154445-8b799c424f57
github.com/miekg/dns v0.0.0-20180406150955-01d59357d468 // indirect
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238 // indirect
github.com/jmhodges/levigo v0.0.0-20161115193449-c42d9e0ca023 // indirect
github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d
github.com/jtolds/gls v4.2.1+incompatible // indirect
github.com/kjk/betterguid v0.0.0-20170621091430-c442874ba63a
github.com/kr/pretty v0.1.0 // indirect
github.com/magiconair/properties v1.8.0 // indirect
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329
github.com/miekg/dns v1.0.8 // indirect
github.com/mitchellh/go-homedir v1.0.0
github.com/mitchellh/mapstructure v1.0.0 // indirect
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect
github.com/pelletier/go-toml v0.0.0-20180323185243-66540cf1fcd2 // indirect
github.com/onsi/ginkgo v1.6.0 // indirect
github.com/onsi/gomega v1.4.1 // indirect
github.com/pelletier/go-toml v1.2.0 // indirect
github.com/philhofer/fwd v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/afero v1.1.0 // indirect
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
github.com/spf13/afero v1.1.1 // indirect
github.com/spf13/cast v1.2.0
github.com/spf13/cobra v0.0.3
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec // indirect
github.com/spf13/pflag v1.0.1 // indirect
github.com/spf13/viper v0.0.0-20180404183325-8dc2790b029d
github.com/spf13/jwalterweatherman v0.0.0-20180814060501-14d3d4c51834 // indirect
github.com/spf13/pflag v1.0.2 // indirect
github.com/spf13/viper v1.1.0
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1
github.com/stretchr/testify v1.2.2
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d // indirect
github.com/tecbot/gorocksdb v0.0.0-20180730101644-8b1d981498e1 // indirect
github.com/tinylib/msgp v0.0.0-20180215042507-3b5c87ab5fb0 // indirect
github.com/willf/bitset v1.1.9 // indirect
github.com/xenolf/lego v0.0.0-20180425151241-8e9c5ac3e6bf
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307 // indirect
golang.org/x/net v0.0.0-20180420171651-5f9ae10d9af5
golang.org/x/sys v0.0.0-20180501092740-78d5f264b493 // indirect
github.com/xenolf/lego v1.0.1
golang.org/x/crypto v0.0.0-20180830192347-182538f80094 // indirect
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 // indirect
golang.org/x/text v0.0.0-20180410181320-7922cc490dd5 // indirect
gopkg.in/square/go-jose.v1 v1.1.0 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/square/go-jose.v2 v2.1.8 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.2.1 // indirect
)

156
go.sum
View File

@ -1,89 +1,141 @@
github.com/RoaringBitmap/roaring v0.0.0-20180423145726-e4aafc3cbc16 h1:MThrqRbdw6sk/f9gxw+FWpFCK6iSWDWHiAb/0TvlYRs=
github.com/RoaringBitmap/roaring v0.0.0-20180423145726-e4aafc3cbc16/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/BurntSushi/toml v0.3.0 h1:e1/Ivsx3Z0FVTV0NSOv/aVgbUWyQuzj7DDnFblkRvsY=
github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/RoaringBitmap/roaring v0.4.16 h1:NholfewybRLOwACgfqfzn/N5xa6keKNs4fP00t0cwLo=
github.com/RoaringBitmap/roaring v0.4.16/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/blevesearch/bleve v0.0.0-20180525174403-1d6d47ed3ad9 h1:q25+axgzH1KX+j63v3yrkY1VHc6PkyTfpnzOmtAH154=
github.com/blevesearch/bleve v0.0.0-20180525174403-1d6d47ed3ad9/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw=
github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA=
github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ=
github.com/blevesearch/go-porterstemmer v0.0.0-20141230013033-23a2c8e5cf1f h1:J9ZVHbB2X6JNxbKw/f3Y4E9Xq+Ro+zPiivzgmi3RTvg=
github.com/blevesearch/go-porterstemmer v0.0.0-20141230013033-23a2c8e5cf1f/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA=
github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg=
github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8=
github.com/boltdb/bolt v0.0.0-20180302180052-fd01fc79c553 h1:yvSJ8qbaWLeS7COhu2KJ0epn4mmc+aGeBP7Dpg7xQTY=
github.com/boltdb/bolt v0.0.0-20180302180052-fd01fc79c553/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/couchbase/vellum v0.0.0-20180427141700-eb6ae3743b3f h1:Jjz4kKvBsSuO5uHrFU3cP3QV9Q9rY+8gjax+pQhVrck=
github.com/couchbase/vellum v0.0.0-20180427141700-eb6ae3743b3f/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dsnet/compress v0.0.0-20160704023633-b9aab3c6a04e h1:lvxyV8HsE4ye/qPwX+yHMypdTbwA7lpbrZ8s+n+W/MY=
github.com/dsnet/compress v0.0.0-20160704023633-b9aab3c6a04e/go.mod h1:KjxHHirfLaw19iGT70HvVjHQsL1vq1SRQB4yOsAfy2s=
github.com/couchbase/vellum v0.0.0-20180822133609-0ceea4a37442 h1:gCXKsqWDJ/vxUXbtowgP0PfXm3etTapmDP19yu0lHSg=
github.com/couchbase/vellum v0.0.0-20180822133609-0ceea4a37442/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g=
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07 h1:UHFGPvSxX4C4YBApSPvmUfL8tTvWLj2ryqvT9K4Jcuk=
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369 h1:XNT/Zf5l++1Pyg08/HV04ppB0gKxAqtZQBRYiYrUuYk=
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 h1:0rkFMAbn5KBKNpJyHQ6Prb95vIKanmAe62KxsrN+sqA=
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dsnet/compress v0.0.0-20171208185109-cc9eb1d7ad76 h1:eX+pdPPlD279OWgdx7f6KqIRSONuK7egk+jDx7OM3Ac=
github.com/dsnet/compress v0.0.0-20171208185109-cc9eb1d7ad76/go.mod h1:KjxHHirfLaw19iGT70HvVjHQsL1vq1SRQB4yOsAfy2s=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gorilla/websocket v0.0.0-20180420171612-21ab95fa12b9 h1:XM0qkx9Su0WV9s1e5A7xr9ZX0NUYxp7L0cbmJGjKZio=
github.com/gorilla/websocket v0.0.0-20180420171612-21ab95fa12b9/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce h1:xdsDDbiBDQTKASoGEZ+pEmF1OnWuu8AQ9I8iNbHNeno=
github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c h1:16eHWuMGvCjSfgRJKqIzapE78onvvTbdi1rMkU00lZw=
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.3.0 h1:r/LXc0VJIMd0rCMsc6DxgczaQtoCwCLatnfXmSYcXx8=
github.com/gorilla/websocket v1.3.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jpillora/backoff v0.0.0-20170222002228-06c7a16c845d h1:ETeT81zgLgSNc4BWdDO2Fg9ekVItYErbNtE8mKD2pJA=
github.com/jpillora/backoff v0.0.0-20170222002228-06c7a16c845d/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/kjk/betterguid v0.0.0-20150214055115-c70aca50d858 h1:E7Jdj4zTHbmE4UpJ6oW1zjkDsFN15Ga+aa1K46lO54A=
github.com/kjk/betterguid v0.0.0-20150214055115-c70aca50d858/go.mod h1:uxRAhHE1nl34DpWgfe0CYbNYbCnYplaB6rZH9ReWtUk=
github.com/magiconair/properties v0.0.0-20180217134545-2c9e95027885 h1:HWxJJvF+QceKcql4r9PC93NtMEgEBfBxlQrZPvbcQvs=
github.com/magiconair/properties v0.0.0-20180217134545-2c9e95027885/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180323154445-8b799c424f57 h1:qhv1ir3dIyOFmFU+5KqG4dF3zSQTA4nn1DFhu2NQC44=
github.com/mailru/easyjson v0.0.0-20180323154445-8b799c424f57/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/miekg/dns v0.0.0-20180406150955-01d59357d468 h1:XExh3YL9jT+Iv7Kn5SxUBimacGRPt7TEaJ78ibq/3Ng=
github.com/miekg/dns v0.0.0-20180406150955-01d59357d468/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 h1:eQox4Rh4ewJF+mqYPxCkmBAirRnPaHEB26UkNuPyjlk=
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238 h1:+MZW2uvHgN8kYvksEN3f7eFL2wpzk0GxmlFsMybWc7E=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/jmhodges/levigo v0.0.0-20161115193449-c42d9e0ca023 h1:y5P5G9cANJZt3MXlMrgELo5mNLZPXH8aGFFFG7IzPU0=
github.com/jmhodges/levigo v0.0.0-20161115193449-c42d9e0ca023/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d h1:ix3WmphUvN0GDd0DO9MH0v6/5xTv+Xm1bPN+1UJn58k=
github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kjk/betterguid v0.0.0-20170621091430-c442874ba63a h1:b+Gt8sQs//Sl5Dcem5zP9Qc2FgEUAygREa2AAa2Vmcw=
github.com/kjk/betterguid v0.0.0-20170621091430-c442874ba63a/go.mod h1:uxRAhHE1nl34DpWgfe0CYbNYbCnYplaB6rZH9ReWtUk=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/miekg/dns v1.0.8 h1:Zi8HNpze3NeRWH1PQV6O71YcvJRQ6j0lORO6DAEmAAI=
github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.0.0 h1:vVpGvMXJPqSDh2VYHF7gsfQj8Ncx+Xw5Y1KHeTRY+7I=
github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/pelletier/go-toml v0.0.0-20180323185243-66540cf1fcd2 h1:BR4UJUSGxC9crpVRG7k28Mq2HRB7lO2A3/ghfWl0R+M=
github.com/pelletier/go-toml v0.0.0-20180323185243-66540cf1fcd2/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.1 h1:PZSj/UFNaVp3KxrzHOcS7oyuWA7LoOY/77yCTEFu21U=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/spf13/afero v1.1.0 h1:bopulORc2JeYaxfHLvJa5NzxviA9PoWhpiiJkru7Ji4=
github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf h1:6V1qxN6Usn4jy8unvggSJz/NC790tefw8Zdy6OZS5co=
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo=
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/spf13/afero v1.1.1 h1:Lt3ihYMlE+lreX1GS4Qw4ZsNpYQLxIXKBTEOXm3nt6I=
github.com/spf13/afero v1.1.1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec h1:2ZXvIUGghLpdTVHR1UfvfrzoVlZaE/yOWC5LueIHZig=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v0.0.0-20180404183325-8dc2790b029d h1:L8nQzf8HvZkwRTWjJb7O5Fww1fk6WtNaDr07G6tyJ/E=
github.com/spf13/viper v0.0.0-20180404183325-8dc2790b029d/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/jwalterweatherman v0.0.0-20180814060501-14d3d4c51834 h1:kJI9pPzfsULT/72wy7mxkRQZPtKWgFdCA2RTGZ4v8/E=
github.com/spf13/jwalterweatherman v0.0.0-20180814060501-14d3d4c51834/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.1.0 h1:V7OZpY8i3C1x/pDmU0zNNlfVoDz112fSYvtWMjjS3f4=
github.com/spf13/viper v1.1.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s=
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw=
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1 h1:Zx8Rp9ozC4FPFxfEKRSUu8+Ay3sZxEUZ7JrCWMbGgvE=
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc=
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/tecbot/gorocksdb v0.0.0-20180730101644-8b1d981498e1 h1:RHZn+N/7zUNd1vhTmoy4OUcizIHrhskE7kR3v2HEMTg=
github.com/tecbot/gorocksdb v0.0.0-20180730101644-8b1d981498e1/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
github.com/tinylib/msgp v0.0.0-20180215042507-3b5c87ab5fb0 h1:uAwzi+JwkDdOtQZVqPYljFvJr7i43ZgUYXKypk9Eibk=
github.com/tinylib/msgp v0.0.0-20180215042507-3b5c87ab5fb0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/willf/bitset v1.1.9 h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xenolf/lego v0.0.0-20180425151241-8e9c5ac3e6bf h1:SVIcFbJClninU2W9ktlKgSKtowYzxQcPIo5UhRZ7zy0=
github.com/xenolf/lego v0.0.0-20180425151241-8e9c5ac3e6bf/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307 h1:O5C+XK++apFo5B+Vq4ujc/LkLwHxg9fDdgjgoIikBdA=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180420171651-5f9ae10d9af5 h1:ylIG3jIeS45kB0W95N19kS62fwermjMYLIyybf8xh9M=
golang.org/x/net v0.0.0-20180420171651-5f9ae10d9af5/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sys v0.0.0-20180501092740-78d5f264b493 h1:IdoM71H+6PmWKfe808vA3uKntXqEtc9yMa7A8TxcVVg=
golang.org/x/sys v0.0.0-20180501092740-78d5f264b493/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
github.com/xenolf/lego v1.0.1 h1:Rr9iqO8MoNxY6OvqdIZTnNZ8bwt0RNz00nGXfoTq4Bc=
github.com/xenolf/lego v1.0.1/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY=
golang.org/x/crypto v0.0.0-20180830192347-182538f80094 h1:rVTAlhYa4+lCfNxmAIEOGQRoD23UqP72M3+rSWVGDTg=
golang.org/x/crypto v0.0.0-20180830192347-182538f80094/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.0.0-20180410181320-7922cc490dd5 h1:JuX0hiV/F4xp4w8qqbN49Nsatovt4Kckp6J/5EbNi9Y=
golang.org/x/text v0.0.0-20180410181320-7922cc490dd5/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/square/go-jose.v1 v1.1.0 h1:T/KcERvxOFKL2QzwvOsP0l5xRvvhTlwcTxw5qad61pQ=
gopkg.in/square/go-jose.v1 v1.1.0/go.mod h1:QpYS+a4WhS+DTlyQIi6Ka7MS3SuR9a055rgXNEe6EiA=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/square/go-jose.v2 v2.1.8 h1:yECBkTX7ypNaRFILw4trAAYXRLvcGxTeHCBKj/fc8gU=
gopkg.in/square/go-jose.v2 v2.1.8/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -11,7 +11,7 @@ import (
"github.com/xenolf/lego/acme"
)
const URL = "https://acme-v01.api.letsencrypt.org/directory"
const URL = "https://acme-v02.api.letsencrypt.org/directory"
const KeySize = 2048
var directory Directory
@ -28,16 +28,10 @@ func Run(dir, domain, email, port string) (*state, error) {
if err != nil {
return nil, err
}
client.ExcludeChallenges([]acme.Challenge{acme.TLSSNI01})
client.SetHTTPAddress(port)
if user.Registration == nil {
user.Registration, err = client.Register()
if err != nil {
return nil, err
}
err = client.AgreeToTOS()
user.Registration, err = client.Register(true)
if err != nil {
return nil, err
}
@ -97,7 +91,7 @@ func (s *state) getCertPEM() []byte {
return certPEM
}
func (s *state) setCert(meta acme.CertificateResource) {
func (s *state) setCert(meta *acme.CertificateResource) {
cert, err := tls.X509KeyPair(meta.Certificate, meta.PrivateKey)
if err == nil {
s.lock.Lock()
@ -126,23 +120,15 @@ func (s *state) setOCSP(ocsp []byte) {
}
func (s *state) obtain() error {
cert, errors := s.client.ObtainCertificate([]string{s.domain}, true, nil, false)
if err := errors[s.domain]; err != nil {
if _, ok := err.(acme.TOSError); ok {
err := s.client.AgreeToTOS()
if err != nil {
return err
}
return s.obtain()
}
cert, err := s.client.ObtainCertificate([]string{s.domain}, true, nil, false)
if err != nil {
return err
}
s.setCert(cert)
s.refreshOCSP()
err := saveCert(cert)
err = saveCert(cert)
if err != nil {
return err
}
@ -182,16 +168,8 @@ func (s *state) renew() bool {
meta.Certificate = cert
meta.PrivateKey = key
Renew:
newMeta, err := s.client.RenewCertificate(meta, true, false)
if err != nil {
if _, ok := err.(acme.TOSError); ok {
err := s.client.AgreeToTOS()
if err != nil {
return false
}
goto Renew
}
return false
}
@ -240,7 +218,7 @@ func (s *state) loadCert() error {
return err
}
s.setCert(acme.CertificateResource{
s.setCert(&acme.CertificateResource{
Certificate: cert,
PrivateKey: key,
})
@ -258,7 +236,7 @@ func certExists(domain string) bool {
return true
}
func saveCert(cert acme.CertificateResource) error {
func saveCert(cert *acme.CertificateResource) error {
err := os.MkdirAll(directory.Domain(cert.Domain), 0700)
if err != nil {
return err

View File

@ -8,10 +8,10 @@ install:
notifications:
email: false
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- "1.7.x"
- "1.8.x"
- "1.9.x"
- "1.10.x"
- tip
# whitelist

View File

@ -7,4 +7,5 @@ Bob Potter (@bpot),
Tyson Maly (@tvmaly),
Will Glynn (@willglynn),
Brent Pedersen (@brentp)
Maciej Biłas (@maciej)
Maciej Biłas (@maciej),
Joe Nall (@joenall)

View File

@ -9,4 +9,6 @@ Will Glynn (@willglynn),
Brent Pedersen (@brentp),
Jason E. Aten (@glycerine),
Vali Malinoiu (@0x4139),
Forud Ghafouri (@fzerorubigd)
Forud Ghafouri (@fzerorubigd),
Joe Nall (@joenall),
(@fredim)

View File

@ -1,4 +1,4 @@
.PHONY: help all test format fmtcheck vet lint qa deps clean nuke rle backrle ser fetch-real-roaring-datasets
.PHONY: help all test format fmtcheck vet lint qa deps clean nuke ser fetch-real-roaring-datasets
@ -97,18 +97,8 @@ nuke:
rm -rf ./target
GOPATH=$(GOPATH) go clean -i ./...
rle:
cp rle.go rle16.go
perl -pi -e 's/32/16/g' rle16.go
cp rle_test.go rle16_test.go
perl -pi -e 's/32/16/g' rle16_test.go
backrle:
cp rle16.go rle.go
perl -pi -e 's/16/32/g' rle.go
perl -pi -e 's/2032/2016/g' rle.go
ser: rle
ser:
go generate
cover:

View File

@ -133,6 +133,7 @@ func main() {
if rb1.Equals(newrb) {
fmt.Println("I wrote the content to a byte stream and read it back.")
}
// you can iterate over bitmaps using ReverseIterator(), Iterator, ManyIterator()
}
```

View File

@ -28,6 +28,10 @@ func (ac *arrayContainer) getShortIterator() shortIterable {
return &shortIterator{ac.content, 0}
}
func (ac *arrayContainer) getReverseIterator() shortIterable {
return &reverseIterator{ac.content, len(ac.content) - 1}
}
func (ac *arrayContainer) getManyIterator() manyIterable {
return &manyIterator{ac.content, 0}
}
@ -115,7 +119,6 @@ func (ac *arrayContainer) iremoveRange(firstOfRange, endx int) container {
// flip the values in the range [firstOfRange,endx)
func (ac *arrayContainer) not(firstOfRange, endx int) container {
if firstOfRange >= endx {
//p("arrayContainer.not(): exiting early with ac.clone()")
return ac.clone()
}
return ac.notClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
@ -124,18 +127,15 @@ func (ac *arrayContainer) not(firstOfRange, endx int) container {
// flip the values in the range [firstOfRange,lastOfRange]
func (ac *arrayContainer) notClose(firstOfRange, lastOfRange int) container {
if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
//p("arrayContainer.notClose(): exiting early with ac.clone()")
return ac.clone()
}
// determine the span of array indices to be affected^M
startIndex := binarySearch(ac.content, uint16(firstOfRange))
//p("startIndex=%v", startIndex)
if startIndex < 0 {
startIndex = -startIndex - 1
}
lastIndex := binarySearch(ac.content, uint16(lastOfRange))
//p("lastIndex=%v", lastIndex)
if lastIndex < 0 {
lastIndex = -lastIndex - 2
}
@ -144,9 +144,7 @@ func (ac *arrayContainer) notClose(firstOfRange, lastOfRange int) container {
newValuesInRange := spanToBeFlipped - currentValuesInRange
cardinalityChange := newValuesInRange - currentValuesInRange
newCardinality := len(ac.content) + cardinalityChange
//p("new card is %v", newCardinality)
if newCardinality > arrayDefaultMaxSize {
//p("new card over arrayDefaultMaxSize, so returning bitmap")
return ac.toBitmapContainer().not(firstOfRange, lastOfRange+1)
}
answer := newArrayContainer()
@ -503,7 +501,6 @@ func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container {
}
func (ac *arrayContainer) and(a container) container {
//p("ac.and() called")
switch x := a.(type) {
case *arrayContainer:
return ac.andArray(x)
@ -550,7 +547,7 @@ func (ac *arrayContainer) iand(a container) container {
return ac.iandBitmap(x)
case *runContainer16:
if x.isFull() {
return ac.clone()
return ac
}
return x.andArray(ac)
}
@ -722,7 +719,6 @@ func (ac *arrayContainer) inot(firstOfRange, endx int) container {
// flip the values in the range [firstOfRange,lastOfRange]
func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container {
//p("ac.inotClose() starting")
if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
return ac
}
@ -745,7 +741,6 @@ func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container {
if cardinalityChange > 0 {
if newCardinality > len(ac.content) {
if newCardinality > arrayDefaultMaxSize {
//p("ac.inotClose() converting to bitmap and doing inot there")
bcRet := ac.toBitmapContainer()
bcRet.inot(firstOfRange, lastOfRange+1)
*ac = *bcRet.toArrayContainer()
@ -766,7 +761,6 @@ func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container {
}
}
ac.content = ac.content[:newCardinality]
//p("bottom of ac.inotClose(): returning ac")
return ac
}
@ -958,3 +952,17 @@ func (ac *arrayContainer) toEfficientContainer() container {
func (ac *arrayContainer) containerType() contype {
return arrayContype
}
func (ac *arrayContainer) addOffset(x uint16) []container {
low := &arrayContainer{}
high := &arrayContainer{}
for _, val := range ac.content {
y := uint32(val) + uint32(x)
if highbits(y) > 0 {
high.content = append(high.content, lowbits(y))
} else {
low.content = append(low.content, lowbits(y))
}
}
return []container{low, high}
}

View File

@ -6,7 +6,7 @@ package roaring
import "github.com/tinylib/msgp/msgp"
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -49,7 +49,7 @@ func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "content"
@ -70,7 +70,7 @@ func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
@ -83,7 +83,7 @@ func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -127,7 +127,7 @@ func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *arrayContainer) Msgsize() (s int) {
s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size))
return

View File

@ -118,6 +118,36 @@ func (bc *bitmapContainer) getShortIterator() shortIterable {
return newBitmapContainerShortIterator(bc)
}
type reverseBitmapContainerShortIterator struct {
ptr *bitmapContainer
i int
}
func (bcsi *reverseBitmapContainerShortIterator) next() uint16 {
if bcsi.i == -1 {
panic("reverseBitmapContainerShortIterator.next() going beyond what is available")
}
j := bcsi.i
bcsi.i = bcsi.ptr.PrevSetBit(bcsi.i - 1)
return uint16(j)
}
func (bcsi *reverseBitmapContainerShortIterator) hasNext() bool {
return bcsi.i >= 0
}
func newReverseBitmapContainerShortIterator(a *bitmapContainer) *reverseBitmapContainerShortIterator {
if a.cardinality == 0 {
return &reverseBitmapContainerShortIterator{a, -1}
}
return &reverseBitmapContainerShortIterator{a, int(a.maximum())}
}
func (bc *bitmapContainer) getReverseIterator() shortIterable {
return newReverseBitmapContainerShortIterator(bc)
}
type bitmapContainerManyIterator struct {
ptr *bitmapContainer
base int
@ -131,7 +161,7 @@ func (bcmi *bitmapContainerManyIterator) nextMany(hs uint32, buf []uint32) int {
for n < len(buf) {
if bitset == 0 {
base += 1
base++
if base >= len(bcmi.ptr.bitmap) {
bcmi.base = base
bcmi.bitset = bitset
@ -177,16 +207,13 @@ func bitmapContainerSizeInBytes() int {
func bitmapEquals(a, b []uint64) bool {
if len(a) != len(b) {
//p("bitmaps differ on length. len(a)=%v; len(b)=%v", len(a), len(b))
return false
}
for i, v := range a {
if v != b[i] {
//p("bitmaps differ on element i=%v", i)
return false
}
}
//p("bitmapEquals returning true")
return true
}
@ -209,9 +236,7 @@ func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask ui
func (bc *bitmapContainer) equals(o container) bool {
srb, ok := o.(*bitmapContainer)
if ok {
//p("bitmapContainers.equals: both are bitmapContainers")
if srb.cardinality != bc.cardinality {
//p("bitmapContainers.equals: card differs: %v vs %v", srb.cardinality, bc.cardinality)
return false
}
return bitmapEquals(bc.bitmap, srb.bitmap)
@ -261,12 +286,6 @@ func (bc *bitmapContainer) iremoveReturnMinimized(i uint16) container {
// iremove returns true if i was found.
func (bc *bitmapContainer) iremove(i uint16) bool {
/* branchless code
w := bc.bitmap[i>>6]
mask := uint64(1) << (i % 64)
neww := w &^ mask
bc.cardinality -= int((w ^ neww) >> (i % 64))
bc.bitmap[i>>6] = neww */
if bc.contains(i) {
bc.cardinality--
bc.bitmap[i/64] &^= (uint64(1) << (i % 64))
@ -306,14 +325,10 @@ func (bc *bitmapContainer) iremoveRange(firstOfRange, lastOfRange int) container
// flip all values in range [firstOfRange,endx)
func (bc *bitmapContainer) inot(firstOfRange, endx int) container {
p("bc.inot() called with [%v, %v)", firstOfRange, endx)
if endx-firstOfRange == maxCapacity {
//p("endx-firstOfRange == maxCapacity")
flipBitmapRange(bc.bitmap, firstOfRange, endx)
bc.cardinality = maxCapacity - bc.cardinality
//p("bc.cardinality is now %v", bc.cardinality)
} else if endx-firstOfRange > maxCapacity/2 {
//p("endx-firstOfRange > maxCapacity/2")
flipBitmapRange(bc.bitmap, firstOfRange, endx)
bc.computeCardinality()
} else {
@ -712,11 +727,11 @@ func (bc *bitmapContainer) getCardinalityInRange(start, end uint) int {
endword := (end - 1) / 64
const allones = ^uint64(0)
if firstword == endword {
return int(popcount(bc.bitmap[firstword] & ((allones << (start % 64)) & (allones >> (64 - (end % 64))))))
return int(popcount(bc.bitmap[firstword] & ((allones << (start % 64)) & (allones >> ((64 - end) & 63)))))
}
answer := popcount(bc.bitmap[firstword] & (allones << (start % 64)))
answer += popcntSlice(bc.bitmap[firstword+1 : endword])
answer += popcount(bc.bitmap[endword] & (allones >> (64 - (end % 64))))
answer += popcount(bc.bitmap[endword] & (allones >> ((64 - end) & 63)))
return int(answer)
}
@ -789,8 +804,6 @@ func (bc *bitmapContainer) andNotRun16(rc *runContainer16) container {
}
func (bc *bitmapContainer) iandNot(a container) container {
//p("bitmapContainer.iandNot() starting")
switch x := a.(type) {
case *arrayContainer:
return bc.iandNotArray(x)
@ -844,12 +857,15 @@ func (bc *bitmapContainer) andNotBitmap(value2 *bitmapContainer) container {
return ac
}
func (bc *bitmapContainer) iandNotBitmapSurely(value2 *bitmapContainer) *bitmapContainer {
func (bc *bitmapContainer) iandNotBitmapSurely(value2 *bitmapContainer) container {
newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap))
for k := 0; k < len(bc.bitmap); k++ {
bc.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k]
}
bc.cardinality = newCardinality
if bc.getCardinality() <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
@ -917,6 +933,32 @@ func (bc *bitmapContainer) NextSetBit(i int) int {
return -1
}
func (bc *bitmapContainer) PrevSetBit(i int) int {
if i < 0 {
return -1
}
x := i / 64
if x >= len(bc.bitmap) {
return -1
}
w := bc.bitmap[x]
b := i % 64
w = w << uint(63-b)
if w != 0 {
return b - countLeadingZeros(w)
}
x--
for ; x >= 0; x-- {
if bc.bitmap[x] != 0 {
return (x * 64) + 63 - countLeadingZeros(bc.bitmap[x])
}
}
return -1
}
// reference the java implementation
// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/BitmapContainer.java#L875-L892
//
@ -980,3 +1022,35 @@ func newBitmapContainerFromRun(rc *runContainer16) *bitmapContainer {
func (bc *bitmapContainer) containerType() contype {
return bitmapContype
}
func (bc *bitmapContainer) addOffset(x uint16) []container {
low := newBitmapContainer()
high := newBitmapContainer()
b := uint32(x) >> 6
i := uint32(x) % 64
end := uint32(1024) - b
if i == 0 {
copy(low.bitmap[b:], bc.bitmap[:end])
copy(high.bitmap[:b], bc.bitmap[end:])
} else {
low.bitmap[b] = bc.bitmap[0] << i
for k := uint32(1); k < end; k++ {
newval := bc.bitmap[k] << i
if newval == 0 {
newval = bc.bitmap[k-1] >> (64 - i)
}
low.bitmap[b+k] = newval
}
for k := end; k < 1024; k++ {
newval := bc.bitmap[k] << i
if newval == 0 {
newval = bc.bitmap[k-1] >> (64 - i)
}
high.bitmap[k-end] = newval
}
high.bitmap[b] = bc.bitmap[1023] >> (64 - i)
}
low.computeCardinality()
high.computeCardinality()
return []container{low, high}
}

View File

@ -6,7 +6,7 @@ package roaring
import "github.com/tinylib/msgp/msgp"
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -54,7 +54,7 @@ func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "cardinality"
@ -84,7 +84,7 @@ func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@ -100,7 +100,7 @@ func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -149,13 +149,13 @@ func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *bitmapContainer) Msgsize() (s int) {
s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size))
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -239,7 +239,7 @@ func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "ptr"
@ -291,7 +291,7 @@ func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@ -317,7 +317,7 @@ func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -402,7 +402,7 @@ func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err e
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *bitmapContainerShortIterator) Msgsize() (s int) {
s = 1 + 4
if z.ptr == nil {

11
vendor/github.com/RoaringBitmap/roaring/clz.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build go1.9
// "go1.9", from Go version 1.9 onward
// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
package roaring
import "math/bits"
func countLeadingZeros(x uint64) int {
return bits.LeadingZeros64(x)
}

36
vendor/github.com/RoaringBitmap/roaring/clz_compat.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// +build !go1.9
package roaring
// LeadingZeroBits returns the number of consecutive most significant zero
// bits of x.
func countLeadingZeros(i uint64) int {
if i == 0 {
return 64
}
n := 1
x := uint32(i >> 32)
if x == 0 {
n += 32
x = uint32(i)
}
if (x >> 16) == 0 {
n += 16
x <<= 16
}
if (x >> 24) == 0 {
n += 8
x <<= 8
}
if x>>28 == 0 {
n += 4
x <<= 4
}
if x>>30 == 0 {
n += 2
x <<= 2
}
n -= int(x >> 31)
return n
}

View File

@ -143,8 +143,8 @@ func toBitmapContainer(c container) container {
func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer, expectedKeysChan <-chan int) {
expectedKeys := -1
appendedKeys := 0
keys := make([]uint16, 0)
containers := make([]container, 0)
var keys []uint16
var containers []container
for appendedKeys != expectedKeys {
select {
case item := <-resultChan:
@ -337,7 +337,7 @@ func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap {
// (if it is set to 0, a default number of workers is chosen)
func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
var lKey uint16 = MaxUint16
var hKey uint16 = 0
var hKey uint16
bitmapsFiltered := bitmaps[:0]
for _, b := range bitmaps {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,163 +0,0 @@
package roaring
import (
"fmt"
)
// common to rle32.go and rle16.go
// rleVerbose controls whether p() prints show up.
// The testing package sets this based on
// testing.Verbose().
var rleVerbose bool
// p is a shorthand for fmt.Printf with beginning and
// trailing newlines. p() makes it easy
// to add diagnostic print statements.
func p(format string, args ...interface{}) {
if rleVerbose {
fmt.Printf("\n"+format+"\n", args...)
}
}
// MaxUint32 is the largest uint32 value.
const MaxUint32 = 4294967295
// MaxUint16 is the largest 16 bit unsigned int.
// This is the largest value an interval16 can store.
const MaxUint16 = 65535
// searchOptions allows us to accelerate runContainer32.search with
// prior knowledge of (mostly lower) bounds. This is used by Union
// and Intersect.
type searchOptions struct {
// start here instead of at 0
startIndex int64
// upper bound instead of len(rc.iv);
// endxIndex == 0 means ignore the bound and use
// endxIndex == n ==len(rc.iv) which is also
// naturally the default for search()
// when opt = nil.
endxIndex int64
}
// And finds the intersection of rc and b.
func (rc *runContainer32) And(b *Bitmap) *Bitmap {
out := NewBitmap()
for _, p := range rc.iv {
for i := p.start; i <= p.last; i++ {
if b.Contains(i) {
out.Add(i)
}
}
}
return out
}
// Xor returns the exclusive-or of rc and b.
func (rc *runContainer32) Xor(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
for v := p.start; v <= p.last; v++ {
if out.Contains(v) {
out.RemoveRange(uint64(v), uint64(v+1))
} else {
out.Add(v)
}
}
}
return out
}
// Or returns the union of rc and b.
func (rc *runContainer32) Or(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
for v := p.start; v <= p.last; v++ {
out.Add(v)
}
}
return out
}
// trial is used in the randomized testing of runContainers
type trial struct {
n int
percentFill float64
ntrial int
// only in the union test
// only subtract test
percentDelete float64
// only in 067 randomized operations
// we do this + 1 passes
numRandomOpsPass int
// allow sampling range control
// only recent tests respect this.
srang *interval16
}
// And finds the intersection of rc and b.
func (rc *runContainer16) And(b *Bitmap) *Bitmap {
out := NewBitmap()
for _, p := range rc.iv {
plast := p.last()
for i := p.start; i <= plast; i++ {
if b.Contains(uint32(i)) {
out.Add(uint32(i))
}
}
}
return out
}
// Xor returns the exclusive-or of rc and b.
func (rc *runContainer16) Xor(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
plast := p.last()
for v := p.start; v <= plast; v++ {
w := uint32(v)
if out.Contains(w) {
out.RemoveRange(uint64(w), uint64(w+1))
} else {
out.Add(w)
}
}
}
return out
}
// Or returns the union of rc and b.
func (rc *runContainer16) Or(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
plast := p.last()
for v := p.start; v <= plast; v++ {
out.Add(uint32(v))
}
}
return out
}
//func (rc *runContainer32) and(container) container {
// panic("TODO. not yet implemented")
//}
// serializedSizeInBytes returns the number of bytes of memory
// required by this runContainer16. This is for the
// Roaring format, as specified https://github.com/RoaringBitmap/RoaringFormatSpec/
func (rc *runContainer16) serializedSizeInBytes() int {
// number of runs in one uint16, then each run
// needs two more uint16
return 2 + len(rc.iv)*4
}
// serializedSizeInBytes returns the number of bytes of memory
// required by this runContainer32.
func (rc *runContainer32) serializedSizeInBytes() int {
return 4 + len(rc.iv)*8
}

View File

@ -1,695 +0,0 @@
package roaring
///////////////////////////////////////////////////
//
// container interface methods for runContainer16
//
///////////////////////////////////////////////////
import (
"fmt"
)
// compile time verify we meet interface requirements
var _ container = &runContainer16{}
func (rc *runContainer16) clone() container {
return newRunContainer16CopyIv(rc.iv)
}
func (rc *runContainer16) minimum() uint16 {
return rc.iv[0].start // assume not empty
}
func (rc *runContainer16) maximum() uint16 {
return rc.iv[len(rc.iv)-1].last() // assume not empty
}
func (rc *runContainer16) isFull() bool {
return (len(rc.iv) == 1) && ((rc.iv[0].start == 0) && (rc.iv[0].last() == MaxUint16))
}
func (rc *runContainer16) and(a container) container {
if rc.isFull() {
return a.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.intersect(c)
case *arrayContainer:
return rc.andArray(c)
case *bitmapContainer:
return rc.andBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) andCardinality(a container) int {
switch c := a.(type) {
case *runContainer16:
return int(rc.intersectCardinality(c))
case *arrayContainer:
return rc.andArrayCardinality(c)
case *bitmapContainer:
return rc.andBitmapContainerCardinality(c)
}
panic("unsupported container type")
}
// andBitmapContainer finds the intersection of rc and b.
func (rc *runContainer16) andBitmapContainer(bc *bitmapContainer) container {
bc2 := newBitmapContainerFromRun(rc)
return bc2.andBitmap(bc)
}
func (rc *runContainer16) andArrayCardinality(ac *arrayContainer) int {
pos := 0
answer := 0
maxpos := ac.getCardinality()
if maxpos == 0 {
return 0 // won't happen in actual code
}
v := ac.content[pos]
mainloop:
for _, p := range rc.iv {
for v < p.start {
pos++
if pos == maxpos {
break mainloop
}
v = ac.content[pos]
}
for v <= p.last() {
answer++
pos++
if pos == maxpos {
break mainloop
}
v = ac.content[pos]
}
}
return answer
}
func (rc *runContainer16) iand(a container) container {
if rc.isFull() {
return a.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.inplaceIntersect(c)
case *arrayContainer:
return rc.andArray(c)
case *bitmapContainer:
return rc.iandBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container {
// TODO: optimize by doing less allocation, possibly?
// sect will be new
sect := rc.intersect(rc2)
*rc = *sect
return rc
}
func (rc *runContainer16) iandBitmapContainer(bc *bitmapContainer) container {
isect := rc.andBitmapContainer(bc)
*rc = *newRunContainer16FromContainer(isect)
return rc
}
func (rc *runContainer16) andArray(ac *arrayContainer) container {
if len(rc.iv) == 0 {
return newArrayContainer()
}
acCardinality := ac.getCardinality()
c := newArrayContainerCapacity(acCardinality)
for rlePos, arrayPos := 0, 0; arrayPos < acCardinality; {
iv := rc.iv[rlePos]
arrayVal := ac.content[arrayPos]
for iv.last() < arrayVal {
rlePos++
if rlePos == len(rc.iv) {
return c
}
iv = rc.iv[rlePos]
}
if iv.start > arrayVal {
arrayPos = advanceUntil(ac.content, arrayPos, len(ac.content), iv.start)
} else {
c.content = append(c.content, arrayVal)
arrayPos++
}
}
return c
}
func (rc *runContainer16) andNot(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.andNotArray(c)
case *bitmapContainer:
return rc.andNotBitmap(c)
case *runContainer16:
return rc.andNotRunContainer16(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
k := 0
var val int64
for _, p := range rc.iv {
n := p.runlen()
for j := int64(0); j < n; j++ {
val = int64(p.start) + j
x[k+i] = uint32(val) | mask
k++
}
}
}
func (rc *runContainer16) getShortIterator() shortIterable {
return rc.newRunIterator16()
}
func (rc *runContainer16) getManyIterator() manyIterable {
return rc.newManyRunIterator16()
}
// add the values in the range [firstOfRange, endx). endx
// is still abe to express 2^16 because it is an int not an uint16.
func (rc *runContainer16) iaddRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx))
}
addme := newRunContainer16TakeOwnership([]interval16{
{
start: uint16(firstOfRange),
length: uint16(endx - 1 - firstOfRange),
},
})
*rc = *rc.union(addme)
return rc
}
// remove the values in the range [firstOfRange,endx)
func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+
" nothing to do.", firstOfRange, endx))
//return rc
}
x := newInterval16Range(uint16(firstOfRange), uint16(endx-1))
rc.isubtract(x)
return rc
}
// not flip the values in the range [firstOfRange,endx)
func (rc *runContainer16) not(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
}
return rc.Not(firstOfRange, endx)
}
// Not flips the values in the range [firstOfRange,endx).
// This is not inplace. Only the returned value has the flipped bits.
//
// Currently implemented as (!A intersect B) union (A minus B),
// where A is rc, and B is the supplied [firstOfRange, endx) interval.
//
// TODO(time optimization): convert this to a single pass
// algorithm by copying AndNotRunContainer16() and modifying it.
// Current routine is correct but
// makes 2 more passes through the arrays than should be
// strictly necessary. Measure both ways though--this may not matter.
//
func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange))
}
if firstOfRange >= endx {
return rc.Clone()
}
a := rc
// algo:
// (!A intersect B) union (A minus B)
nota := a.invert()
bs := []interval16{newInterval16Range(uint16(firstOfRange), uint16(endx-1))}
b := newRunContainer16TakeOwnership(bs)
notAintersectB := nota.intersect(b)
aMinusB := a.AndNotRunContainer16(b)
rc2 := notAintersectB.union(aMinusB)
return rc2
}
// equals is now logical equals; it does not require the
// same underlying container type.
func (rc *runContainer16) equals(o container) bool {
srb, ok := o.(*runContainer16)
if !ok {
// maybe value instead of pointer
val, valok := o.(*runContainer16)
if valok {
srb = val
ok = true
}
}
if ok {
// Check if the containers are the same object.
if rc == srb {
return true
}
if len(srb.iv) != len(rc.iv) {
return false
}
for i, v := range rc.iv {
if v != srb.iv[i] {
return false
}
}
return true
}
// use generic comparison
if o.getCardinality() != rc.getCardinality() {
return false
}
rit := rc.getShortIterator()
bit := o.getShortIterator()
//k := 0
for rit.hasNext() {
if bit.next() != rit.next() {
return false
}
//k++
}
return true
}
func (rc *runContainer16) iaddReturnMinimized(x uint16) container {
rc.Add(x)
return rc
}
func (rc *runContainer16) iadd(x uint16) (wasNew bool) {
return rc.Add(x)
}
func (rc *runContainer16) iremoveReturnMinimized(x uint16) container {
rc.removeKey(x)
return rc
}
func (rc *runContainer16) iremove(x uint16) bool {
return rc.removeKey(x)
}
func (rc *runContainer16) or(a container) container {
if rc.isFull() {
return rc.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.union(c)
case *arrayContainer:
return rc.orArray(c)
case *bitmapContainer:
return rc.orBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) orCardinality(a container) int {
switch c := a.(type) {
case *runContainer16:
return int(rc.unionCardinality(c))
case *arrayContainer:
return rc.orArrayCardinality(c)
case *bitmapContainer:
return rc.orBitmapContainerCardinality(c)
}
panic("unsupported container type")
}
// orBitmapContainer finds the union of rc and bc.
func (rc *runContainer16) orBitmapContainer(bc *bitmapContainer) container {
bc2 := newBitmapContainerFromRun(rc)
return bc2.iorBitmap(bc)
}
func (rc *runContainer16) andBitmapContainerCardinality(bc *bitmapContainer) int {
answer := 0
for i := range rc.iv {
answer += bc.getCardinalityInRange(uint(rc.iv[i].start), uint(rc.iv[i].last())+1)
}
//bc.computeCardinality()
return answer
}
func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int {
return rc.getCardinality() + bc.getCardinality() - rc.andBitmapContainerCardinality(bc)
}
// orArray finds the union of rc and ac.
func (rc *runContainer16) orArray(ac *arrayContainer) container {
bc1 := newBitmapContainerFromRun(rc)
bc2 := ac.toBitmapContainer()
return bc1.orBitmap(bc2)
}
// orArray finds the union of rc and ac.
func (rc *runContainer16) orArrayCardinality(ac *arrayContainer) int {
return ac.getCardinality() + rc.getCardinality() - rc.andArrayCardinality(ac)
}
func (rc *runContainer16) ior(a container) container {
if rc.isFull() {
return rc
}
switch c := a.(type) {
case *runContainer16:
return rc.inplaceUnion(c)
case *arrayContainer:
return rc.iorArray(c)
case *bitmapContainer:
return rc.iorBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container {
p("rc.inplaceUnion with len(rc2.iv)=%v", len(rc2.iv))
for _, p := range rc2.iv {
last := int64(p.last())
for i := int64(p.start); i <= last; i++ {
rc.Add(uint16(i))
}
}
return rc
}
func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container {
it := bc.getShortIterator()
for it.hasNext() {
rc.Add(it.next())
}
return rc
}
func (rc *runContainer16) iorArray(ac *arrayContainer) container {
it := ac.getShortIterator()
for it.hasNext() {
rc.Add(it.next())
}
return rc
}
// lazyIOR is described (not yet implemented) in
// this nice note from @lemire on
// https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737
//
// Description of lazyOR and lazyIOR from @lemire:
//
// Lazy functions are optional and can be simply
// wrapper around non-lazy functions.
//
// The idea of "laziness" is as follows. It is
// inspired by the concept of lazy evaluation
// you might be familiar with (functional programming
// and all that). So a roaring bitmap is
// such that all its containers are, in some
// sense, chosen to use as little memory as
// possible. This is nice. Also, all bitsets
// are "cardinality aware" so that you can do
// fast rank/select queries, or query the
// cardinality of the whole bitmap... very fast,
// without latency.
//
// However, imagine that you are aggregating 100
// bitmaps together. So you OR the first two, then OR
// that with the third one and so forth. Clearly,
// intermediate bitmaps don't need to be as
// compressed as possible, right? They can be
// in a "dirty state". You only need the end
// result to be in a nice state... which you
// can achieve by calling repairAfterLazy at the end.
//
// The Java/C code does something special for
// the in-place lazy OR runs. The idea is that
// instead of taking two run containers and
// generating a new one, we actually try to
// do the computation in-place through a
// technique invented by @gssiyankai (pinging him!).
// What you do is you check whether the host
// run container has lots of extra capacity.
// If it does, you move its data at the end of
// the backing array, and then you write
// the answer at the beginning. What this
// trick does is minimize memory allocations.
//
func (rc *runContainer16) lazyIOR(a container) container {
// not lazy at the moment
// TODO: make it lazy
return rc.ior(a)
/*
switch c := a.(type) {
case *arrayContainer:
return rc.lazyIorArray(c)
case *bitmapContainer:
return rc.lazyIorBitmap(c)
case *runContainer16:
return rc.lazyIorRun16(c)
}
panic("unsupported container type")
*/
}
// lazyOR is described above in lazyIOR.
func (rc *runContainer16) lazyOR(a container) container {
// not lazy at the moment
// TODO: make it lazy
return rc.or(a)
/*
switch c := a.(type) {
case *arrayContainer:
return rc.lazyOrArray(c)
case *bitmapContainer:
return rc.lazyOrBitmap(c)
case *runContainer16:
return rc.lazyOrRunContainer16(c)
}
panic("unsupported container type")
*/
}
func (rc *runContainer16) intersects(a container) bool {
// TODO: optimize by doing inplace/less allocation, possibly?
isect := rc.and(a)
return isect.getCardinality() > 0
}
func (rc *runContainer16) xor(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.xorArray(c)
case *bitmapContainer:
return rc.xorBitmap(c)
case *runContainer16:
return rc.xorRunContainer16(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) iandNot(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.iandNotArray(c)
case *bitmapContainer:
return rc.iandNotBitmap(c)
case *runContainer16:
return rc.iandNotRunContainer16(c)
}
panic("unsupported container type")
}
// flip the values in the range [firstOfRange,endx)
func (rc *runContainer16) inot(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
}
// TODO: minimize copies, do it all inplace; not() makes a copy.
rc = rc.Not(firstOfRange, endx)
return rc
}
func (rc *runContainer16) getCardinality() int {
return int(rc.cardinality())
}
func (rc *runContainer16) rank(x uint16) int {
n := int64(len(rc.iv))
xx := int64(x)
w, already, _ := rc.search(xx, nil)
if w < 0 {
return 0
}
if !already && w == n-1 {
return rc.getCardinality()
}
var rnk int64
if !already {
for i := int64(0); i <= w; i++ {
rnk += rc.iv[i].runlen()
}
return int(rnk)
}
for i := int64(0); i < w; i++ {
rnk += rc.iv[i].runlen()
}
rnk += int64(x-rc.iv[w].start) + 1
return int(rnk)
}
func (rc *runContainer16) selectInt(x uint16) int {
return rc.selectInt16(x)
}
func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container {
return rc.AndNotRunContainer16(b)
}
func (rc *runContainer16) andNotArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
return rcb.andNotBitmap(acb)
}
func (rc *runContainer16) andNotBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
return rcb.andNotBitmap(bc)
}
func (rc *runContainer16) toBitmapContainer() *bitmapContainer {
p("run16 toBitmap starting; rc has %v ranges", len(rc.iv))
bc := newBitmapContainer()
for i := range rc.iv {
bc.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
}
bc.computeCardinality()
return bc
}
func (rc *runContainer16) iandNotRunContainer16(x2 *runContainer16) container {
rcb := rc.toBitmapContainer()
x2b := x2.toBitmapContainer()
rcb.iandNotBitmapSurely(x2b)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) iandNotArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
rcb.iandNotBitmapSurely(acb)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) iandNotBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
rcb.iandNotBitmapSurely(bc)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) xorRunContainer16(x2 *runContainer16) container {
rcb := rc.toBitmapContainer()
x2b := x2.toBitmapContainer()
return rcb.xorBitmap(x2b)
}
func (rc *runContainer16) xorArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
return rcb.xorBitmap(acb)
}
func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
return rcb.xorBitmap(bc)
}
// convert to bitmap or array *if needed*
func (rc *runContainer16) toEfficientContainer() container {
// runContainer16SerializedSizeInBytes(numRuns)
sizeAsRunContainer := rc.getSizeInBytes()
sizeAsBitmapContainer := bitmapContainerSizeInBytes()
card := int(rc.cardinality())
sizeAsArrayContainer := arrayContainerSizeInBytes(card)
if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
return rc
}
if card <= arrayDefaultMaxSize {
return rc.toArrayContainer()
}
bc := newBitmapContainerFromRun(rc)
return bc
}
func (rc *runContainer16) toArrayContainer() *arrayContainer {
ac := newArrayContainer()
for i := range rc.iv {
ac.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
}
return ac
}
func newRunContainer16FromContainer(c container) *runContainer16 {
switch x := c.(type) {
case *runContainer16:
return x.Clone()
case *arrayContainer:
return newRunContainer16FromArray(x)
case *bitmapContainer:
return newRunContainer16FromBitmapContainer(x)
}
panic("unsupported container type")
}

View File

@ -52,7 +52,7 @@ func (rb *Bitmap) ToBytes() ([]byte, error) {
return rb.highlowcontainer.toBytes()
}
// WriteToMsgpack writes a msgpack2/snappy-streaming compressed serialized
// Deprecated: WriteToMsgpack writes a msgpack2/snappy-streaming compressed serialized
// version of this bitmap to stream. The format is not
// compatible with the WriteTo() format, and is
// experimental: it may produce smaller on disk
@ -101,7 +101,7 @@ func (rb *Bitmap) HasRunCompression() bool {
return rb.highlowcontainer.hasRunCompression()
}
// ReadFromMsgpack reads a msgpack2/snappy-streaming serialized
// Deprecated: ReadFromMsgpack reads a msgpack2/snappy-streaming serialized
// version of this bitmap from stream. The format is
// expected is that written by the WriteToMsgpack()
// call; see additional notes there.
@ -252,6 +252,45 @@ func newIntIterator(a *Bitmap) *intIterator {
return p
}
type intReverseIterator struct {
pos int
hs uint32
iter shortIterable
highlowcontainer *roaringArray
}
// HasNext returns true if there are more integers to iterate over
func (ii *intReverseIterator) HasNext() bool {
return ii.pos >= 0
}
func (ii *intReverseIterator) init() {
if ii.pos >= 0 {
ii.iter = ii.highlowcontainer.getContainerAtIndex(ii.pos).getReverseIterator()
ii.hs = uint32(ii.highlowcontainer.getKeyAtIndex(ii.pos)) << 16
} else {
ii.iter = nil
}
}
// Next returns the next integer
func (ii *intReverseIterator) Next() uint32 {
x := uint32(ii.iter.next()) | ii.hs
if !ii.iter.hasNext() {
ii.pos = ii.pos - 1
ii.init()
}
return x
}
func newIntReverseIterator(a *Bitmap) *intReverseIterator {
p := new(intReverseIterator)
p.highlowcontainer = &a.highlowcontainer
p.pos = a.highlowcontainer.size() - 1
p.init()
return p
}
// ManyIntIterable allows you to iterate over the values in a Bitmap
type ManyIntIterable interface {
// pass in a buffer to fill up with values, returns how many values were returned
@ -330,7 +369,12 @@ func (rb *Bitmap) Iterator() IntIterable {
return newIntIterator(rb)
}
// Iterator creates a new ManyIntIterable to iterate over the integers contained in the bitmap, in sorted order
// ReverseIterator creates a new IntIterable to iterate over the integers contained in the bitmap, in sorted order
func (rb *Bitmap) ReverseIterator() IntIterable {
return newIntReverseIterator(rb)
}
// ManyIterator creates a new ManyIntIterable to iterate over the integers contained in the bitmap, in sorted order
func (rb *Bitmap) ManyIterator() ManyIntIterable {
return newManyIntIterator(rb)
}
@ -374,6 +418,45 @@ func (rb *Bitmap) Equals(o interface{}) bool {
return false
}
func AddOffset(x *Bitmap, offset uint32) (answer *Bitmap) {
containerOffset := highbits(offset)
inOffset := lowbits(offset)
if inOffset == 0 {
answer = x.Clone()
for pos := 0; pos < answer.highlowcontainer.size(); pos++ {
key := answer.highlowcontainer.getKeyAtIndex(pos)
key += containerOffset
answer.highlowcontainer.keys[pos] = key
}
} else {
answer = New()
for pos := 0; pos < x.highlowcontainer.size(); pos++ {
key := x.highlowcontainer.getKeyAtIndex(pos)
key += containerOffset
c := x.highlowcontainer.getContainerAtIndex(pos)
offsetted := c.addOffset(inOffset)
if offsetted[0].getCardinality() > 0 {
curSize := answer.highlowcontainer.size()
lastkey := uint16(0)
if curSize > 0 {
lastkey = answer.highlowcontainer.getKeyAtIndex(curSize - 1)
}
if curSize > 0 && lastkey == key {
prev := answer.highlowcontainer.getContainerAtIndex(curSize - 1)
orrseult := prev.ior(offsetted[0])
answer.highlowcontainer.setContainerAtIndex(curSize-1, orrseult)
} else {
answer.highlowcontainer.appendContainer(key, offsetted[0], false)
}
}
if offsetted[1].getCardinality() > 0 {
answer.highlowcontainer.appendContainer(key+1, offsetted[1], false)
}
}
}
return answer
}
// Add the integer x to the bitmap
func (rb *Bitmap) Add(x uint32) {
hb := highbits(x)
@ -794,11 +877,6 @@ main:
}
}
/*func (rb *Bitmap) Or(x2 *Bitmap) {
results := Or(rb, x2) // Todo: could be computed in-place for reduced memory usage
rb.highlowcontainer = results.highlowcontainer
}*/
// AndNot computes the difference between two bitmaps and stores the result in the current bitmap
func (rb *Bitmap) AndNot(x2 *Bitmap) {
pos1 := 0
@ -1086,10 +1164,10 @@ func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) {
return
}
hbStart := highbits(uint32(rangeStart))
lbStart := lowbits(uint32(rangeStart))
hbLast := highbits(uint32(rangeEnd - 1))
lbLast := lowbits(uint32(rangeEnd - 1))
hbStart := uint32(highbits(uint32(rangeStart)))
lbStart := uint32(lowbits(uint32(rangeStart)))
hbLast := uint32(highbits(uint32(rangeEnd - 1)))
lbLast := uint32(lowbits(uint32(rangeEnd - 1)))
var max uint32 = maxLowBit
for hb := hbStart; hb <= hbLast; hb++ {
@ -1102,7 +1180,7 @@ func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) {
containerLast = uint32(lbLast)
}
i := rb.highlowcontainer.getIndex(hb)
i := rb.highlowcontainer.getIndex(uint16(hb))
if i >= 0 {
c := rb.highlowcontainer.getWritableContainerAtIndex(i).inot(int(containerStart), int(containerLast)+1)
@ -1113,7 +1191,7 @@ func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) {
}
} else { // *think* the range of ones must never be
// empty.
rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, rangeOfOnes(int(containerStart), int(containerLast)))
rb.highlowcontainer.insertNewKeyValueAt(-i-1, uint16(hb), rangeOfOnes(int(containerStart), int(containerLast)))
}
}
}
@ -1139,24 +1217,24 @@ func (rb *Bitmap) AddRange(rangeStart, rangeEnd uint64) {
lbLast := uint32(lowbits(uint32(rangeEnd - 1)))
var max uint32 = maxLowBit
for hb := uint16(hbStart); hb <= uint16(hbLast); hb++ {
for hb := hbStart; hb <= hbLast; hb++ {
containerStart := uint32(0)
if hb == uint16(hbStart) {
if hb == hbStart {
containerStart = lbStart
}
containerLast := max
if hb == uint16(hbLast) {
if hb == hbLast {
containerLast = lbLast
}
i := rb.highlowcontainer.getIndex(hb)
i := rb.highlowcontainer.getIndex(uint16(hb))
if i >= 0 {
c := rb.highlowcontainer.getWritableContainerAtIndex(i).iaddRange(int(containerStart), int(containerLast)+1)
rb.highlowcontainer.setContainerAtIndex(i, c)
} else { // *think* the range of ones must never be
// empty.
rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, rangeOfOnes(int(containerStart), int(containerLast)))
rb.highlowcontainer.insertNewKeyValueAt(-i-1, uint16(hb), rangeOfOnes(int(containerStart), int(containerLast)))
}
}
}
@ -1243,13 +1321,13 @@ func Flip(bm *Bitmap, rangeStart, rangeEnd uint64) *Bitmap {
}
answer := NewBitmap()
hbStart := highbits(uint32(rangeStart))
lbStart := lowbits(uint32(rangeStart))
hbLast := highbits(uint32(rangeEnd - 1))
lbLast := lowbits(uint32(rangeEnd - 1))
hbStart := uint32(highbits(uint32(rangeStart)))
lbStart := uint32(lowbits(uint32(rangeStart)))
hbLast := uint32(highbits(uint32(rangeEnd - 1)))
lbLast := uint32(lowbits(uint32(rangeEnd - 1)))
// copy the containers before the active area
answer.highlowcontainer.appendCopiesUntil(bm.highlowcontainer, hbStart)
answer.highlowcontainer.appendCopiesUntil(bm.highlowcontainer, uint16(hbStart))
var max uint32 = maxLowBit
for hb := hbStart; hb <= hbLast; hb++ {
@ -1262,23 +1340,23 @@ func Flip(bm *Bitmap, rangeStart, rangeEnd uint64) *Bitmap {
containerLast = uint32(lbLast)
}
i := bm.highlowcontainer.getIndex(hb)
j := answer.highlowcontainer.getIndex(hb)
i := bm.highlowcontainer.getIndex(uint16(hb))
j := answer.highlowcontainer.getIndex(uint16(hb))
if i >= 0 {
c := bm.highlowcontainer.getContainerAtIndex(i).not(int(containerStart), int(containerLast)+1)
if c.getCardinality() > 0 {
answer.highlowcontainer.insertNewKeyValueAt(-j-1, hb, c)
answer.highlowcontainer.insertNewKeyValueAt(-j-1, uint16(hb), c)
}
} else { // *think* the range of ones must never be
// empty.
answer.highlowcontainer.insertNewKeyValueAt(-j-1, hb,
answer.highlowcontainer.insertNewKeyValueAt(-j-1, uint16(hb),
rangeOfOnes(int(containerStart), int(containerLast)))
}
}
// copy the containers after the active area.
answer.highlowcontainer.appendCopiesAfter(bm.highlowcontainer, hbLast)
answer.highlowcontainer.appendCopiesAfter(bm.highlowcontainer, uint16(hbLast))
return answer
}

View File

@ -14,6 +14,8 @@ import (
//go:generate msgp -unexported
type container interface {
addOffset(uint16) []container
clone() container
and(container) container
andCardinality(container) int
@ -38,6 +40,7 @@ type container interface {
inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx)
xor(r container) container
getShortIterator() shortIterable
getReverseIterator() shortIterable
getManyIterator() manyIterable
contains(i uint16) bool
maximum() uint16
@ -456,8 +459,7 @@ func (ra *roaringArray) serializedSizeInBytes() uint64 {
//
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
//
func (ra *roaringArray) toBytes() ([]byte, error) {
stream := &bytes.Buffer{}
func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) {
hasRun := ra.hasRunCompression()
isRunSizeInBytes := 0
cookieSize := 8
@ -522,33 +524,29 @@ func (ra *roaringArray) toBytes() ([]byte, error) {
}
}
_, err := stream.Write(buf[:nw])
written, err := w.Write(buf[:nw])
if err != nil {
return nil, err
return n, err
}
for i, c := range ra.containers {
_ = i
_, err := c.writeTo(stream)
n += int64(written)
for _, c := range ra.containers {
written, err := c.writeTo(w)
if err != nil {
return nil, err
return n, err
}
n += int64(written)
}
return stream.Bytes(), nil
return n, nil
}
//
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
//
func (ra *roaringArray) writeTo(out io.Writer) (int64, error) {
by, err := ra.toBytes()
if err != nil {
return 0, err
}
n, err := out.Write(by)
if err == nil && n < len(by) {
err = io.ErrShortWrite
}
return int64(n), err
func (ra *roaringArray) toBytes() ([]byte, error) {
var buf bytes.Buffer
_, err := ra.writeTo(&buf)
return buf.Bytes(), err
}
func (ra *roaringArray) fromBuffer(buf []byte) (int64, error) {

View File

@ -8,7 +8,7 @@ import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -48,7 +48,7 @@ func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "t"
@ -72,7 +72,7 @@ func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@ -88,7 +88,7 @@ func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -129,13 +129,13 @@ func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *containerSerz) Msgsize() (s int) {
s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zajw uint8
@ -148,7 +148,7 @@ func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z contype) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteUint8(uint8(z))
if err != nil {
@ -157,14 +157,14 @@ func (z contype) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z contype) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint8(o, uint8(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zwht uint8
@ -178,13 +178,13 @@ func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z contype) Msgsize() (s int) {
s = msgp.Uint8Size
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -295,7 +295,7 @@ func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "keys"
@ -370,7 +370,7 @@ func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
@ -407,7 +407,7 @@ func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -519,7 +519,7 @@ func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *roaringArray) Msgsize() (s int) {
s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize
for zxhx := range z.conserz {

View File

@ -853,6 +853,21 @@ func (rc *runContainer16) numIntervals() int {
return len(rc.iv)
}
// searchOptions allows us to accelerate search with
// prior knowledge of (mostly lower) bounds. This is used by Union
// and Intersect.
type searchOptions struct {
// start here instead of at 0
startIndex int64
// upper bound instead of len(rc.iv);
// endxIndex == 0 means ignore the bound and use
// endxIndex == n ==len(rc.iv) which is also
// naturally the default for search()
// when opt = nil.
endxIndex int64
}
// search returns alreadyPresent to indicate if the
// key is already in one of our interval16s.
//
@ -1204,6 +1219,78 @@ func (ri *runIterator16) remove() uint16 {
return cur
}
// runReverseIterator16 advice: you must call next() at least once
// before calling cur(); and you should call hasNext()
// before calling next() to insure there are contents.
type runReverseIterator16 struct {
rc *runContainer16
curIndex int64 // index into rc.iv
curPosInIndex uint16 // offset in rc.iv[curIndex]
curSeq int64 // 0->cardinality, performance optimization in hasNext()
}
// newRunReverseIterator16 returns a new empty run iterator.
func (rc *runContainer16) newRunReverseIterator16() *runReverseIterator16 {
return &runReverseIterator16{rc: rc, curIndex: -2}
}
// hasNext returns false if calling next will panic. It
// returns true when there is at least one more value
// available in the iteration sequence.
func (ri *runReverseIterator16) hasNext() bool {
if len(ri.rc.iv) == 0 {
return false
}
if ri.curIndex == -2 {
return true
}
return ri.rc.cardinality()-ri.curSeq > 1
}
// cur returns the current value pointed to by the iterator.
func (ri *runReverseIterator16) cur() uint16 {
return ri.rc.iv[ri.curIndex].start + ri.curPosInIndex
}
// next returns the next value in the iteration sequence.
func (ri *runReverseIterator16) next() uint16 {
if !ri.hasNext() {
panic("no next available")
}
if ri.curIndex == -1 {
panic("runReverseIterator.next() going beyond what is available")
}
if ri.curIndex == -2 {
// first time is special
ri.curIndex = int64(len(ri.rc.iv)) - 1
ri.curPosInIndex = ri.rc.iv[ri.curIndex].length
} else {
if ri.curPosInIndex > 0 {
ri.curPosInIndex--
} else {
ri.curIndex--
ri.curPosInIndex = ri.rc.iv[ri.curIndex].length
}
ri.curSeq++
}
return ri.cur()
}
// remove removes the element that the iterator
// is on from the run container. You can use
// cur if you want to double check what is about
// to be deleted.
func (ri *runReverseIterator16) remove() uint16 {
n := ri.rc.cardinality()
if n == 0 {
panic("runReverseIterator.Remove called on empty runContainer16")
}
cur := ri.cur()
ri.rc.deleteAt(&ri.curIndex, &ri.curPosInIndex, &ri.curSeq)
return cur
}
type manyRunIterator16 struct {
rc *runContainer16
curIndex int64
@ -1241,9 +1328,9 @@ func (ri *manyRunIterator16) nextMany(hs uint32, buf []uint32) int {
}
buf[n] = uint32(ri.rc.iv[ri.curIndex].start) | hs
if ri.curIndex != 0 {
ri.curSeq += 1
ri.curSeq++
}
n += 1
n++
// not strictly necessarily due to len(buf)-n min check, but saves some work
continue
}
@ -1724,24 +1811,750 @@ func (rc *runContainer16) containerType() contype {
}
func (rc *runContainer16) equals16(srb *runContainer16) bool {
//p("both rc16")
// Check if the containers are the same object.
if rc == srb {
//p("same object")
return true
}
if len(srb.iv) != len(rc.iv) {
//p("iv len differ")
return false
}
for i, v := range rc.iv {
if v != srb.iv[i] {
//p("differ at iv i=%v, srb.iv[i]=%v, rc.iv[i]=%v", i, srb.iv[i], rc.iv[i])
return false
}
}
//p("all intervals same, returning true")
return true
}
// compile time verify we meet interface requirements
var _ container = &runContainer16{}
func (rc *runContainer16) clone() container {
return newRunContainer16CopyIv(rc.iv)
}
func (rc *runContainer16) minimum() uint16 {
return rc.iv[0].start // assume not empty
}
func (rc *runContainer16) maximum() uint16 {
return rc.iv[len(rc.iv)-1].last() // assume not empty
}
func (rc *runContainer16) isFull() bool {
return (len(rc.iv) == 1) && ((rc.iv[0].start == 0) && (rc.iv[0].last() == MaxUint16))
}
func (rc *runContainer16) and(a container) container {
if rc.isFull() {
return a.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.intersect(c)
case *arrayContainer:
return rc.andArray(c)
case *bitmapContainer:
return rc.andBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) andCardinality(a container) int {
switch c := a.(type) {
case *runContainer16:
return int(rc.intersectCardinality(c))
case *arrayContainer:
return rc.andArrayCardinality(c)
case *bitmapContainer:
return rc.andBitmapContainerCardinality(c)
}
panic("unsupported container type")
}
// andBitmapContainer finds the intersection of rc and b.
func (rc *runContainer16) andBitmapContainer(bc *bitmapContainer) container {
bc2 := newBitmapContainerFromRun(rc)
return bc2.andBitmap(bc)
}
func (rc *runContainer16) andArrayCardinality(ac *arrayContainer) int {
pos := 0
answer := 0
maxpos := ac.getCardinality()
if maxpos == 0 {
return 0 // won't happen in actual code
}
v := ac.content[pos]
mainloop:
for _, p := range rc.iv {
for v < p.start {
pos++
if pos == maxpos {
break mainloop
}
v = ac.content[pos]
}
for v <= p.last() {
answer++
pos++
if pos == maxpos {
break mainloop
}
v = ac.content[pos]
}
}
return answer
}
func (rc *runContainer16) iand(a container) container {
if rc.isFull() {
return a.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.inplaceIntersect(c)
case *arrayContainer:
return rc.andArray(c)
case *bitmapContainer:
return rc.iandBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container {
// TODO: optimize by doing less allocation, possibly?
// sect will be new
sect := rc.intersect(rc2)
*rc = *sect
return rc
}
func (rc *runContainer16) iandBitmapContainer(bc *bitmapContainer) container {
isect := rc.andBitmapContainer(bc)
*rc = *newRunContainer16FromContainer(isect)
return rc
}
func (rc *runContainer16) andArray(ac *arrayContainer) container {
if len(rc.iv) == 0 {
return newArrayContainer()
}
acCardinality := ac.getCardinality()
c := newArrayContainerCapacity(acCardinality)
for rlePos, arrayPos := 0, 0; arrayPos < acCardinality; {
iv := rc.iv[rlePos]
arrayVal := ac.content[arrayPos]
for iv.last() < arrayVal {
rlePos++
if rlePos == len(rc.iv) {
return c
}
iv = rc.iv[rlePos]
}
if iv.start > arrayVal {
arrayPos = advanceUntil(ac.content, arrayPos, len(ac.content), iv.start)
} else {
c.content = append(c.content, arrayVal)
arrayPos++
}
}
return c
}
func (rc *runContainer16) andNot(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.andNotArray(c)
case *bitmapContainer:
return rc.andNotBitmap(c)
case *runContainer16:
return rc.andNotRunContainer16(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
k := 0
var val int64
for _, p := range rc.iv {
n := p.runlen()
for j := int64(0); j < n; j++ {
val = int64(p.start) + j
x[k+i] = uint32(val) | mask
k++
}
}
}
func (rc *runContainer16) getShortIterator() shortIterable {
return rc.newRunIterator16()
}
func (rc *runContainer16) getReverseIterator() shortIterable {
return rc.newRunReverseIterator16()
}
func (rc *runContainer16) getManyIterator() manyIterable {
return rc.newManyRunIterator16()
}
// add the values in the range [firstOfRange, endx). endx
// is still abe to express 2^16 because it is an int not an uint16.
func (rc *runContainer16) iaddRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx))
}
addme := newRunContainer16TakeOwnership([]interval16{
{
start: uint16(firstOfRange),
length: uint16(endx - 1 - firstOfRange),
},
})
*rc = *rc.union(addme)
return rc
}
// remove the values in the range [firstOfRange,endx)
func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+
" nothing to do.", firstOfRange, endx))
//return rc
}
x := newInterval16Range(uint16(firstOfRange), uint16(endx-1))
rc.isubtract(x)
return rc
}
// not flip the values in the range [firstOfRange,endx)
func (rc *runContainer16) not(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
}
return rc.Not(firstOfRange, endx)
}
// Not flips the values in the range [firstOfRange,endx).
// This is not inplace. Only the returned value has the flipped bits.
//
// Currently implemented as (!A intersect B) union (A minus B),
// where A is rc, and B is the supplied [firstOfRange, endx) interval.
//
// TODO(time optimization): convert this to a single pass
// algorithm by copying AndNotRunContainer16() and modifying it.
// Current routine is correct but
// makes 2 more passes through the arrays than should be
// strictly necessary. Measure both ways though--this may not matter.
//
func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange))
}
if firstOfRange >= endx {
return rc.Clone()
}
a := rc
// algo:
// (!A intersect B) union (A minus B)
nota := a.invert()
bs := []interval16{newInterval16Range(uint16(firstOfRange), uint16(endx-1))}
b := newRunContainer16TakeOwnership(bs)
notAintersectB := nota.intersect(b)
aMinusB := a.AndNotRunContainer16(b)
rc2 := notAintersectB.union(aMinusB)
return rc2
}
// equals is now logical equals; it does not require the
// same underlying container type.
func (rc *runContainer16) equals(o container) bool {
srb, ok := o.(*runContainer16)
if !ok {
// maybe value instead of pointer
val, valok := o.(*runContainer16)
if valok {
srb = val
ok = true
}
}
if ok {
// Check if the containers are the same object.
if rc == srb {
return true
}
if len(srb.iv) != len(rc.iv) {
return false
}
for i, v := range rc.iv {
if v != srb.iv[i] {
return false
}
}
return true
}
// use generic comparison
if o.getCardinality() != rc.getCardinality() {
return false
}
rit := rc.getShortIterator()
bit := o.getShortIterator()
//k := 0
for rit.hasNext() {
if bit.next() != rit.next() {
return false
}
//k++
}
return true
}
func (rc *runContainer16) iaddReturnMinimized(x uint16) container {
rc.Add(x)
return rc
}
func (rc *runContainer16) iadd(x uint16) (wasNew bool) {
return rc.Add(x)
}
func (rc *runContainer16) iremoveReturnMinimized(x uint16) container {
rc.removeKey(x)
return rc
}
func (rc *runContainer16) iremove(x uint16) bool {
return rc.removeKey(x)
}
func (rc *runContainer16) or(a container) container {
if rc.isFull() {
return rc.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.union(c)
case *arrayContainer:
return rc.orArray(c)
case *bitmapContainer:
return rc.orBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) orCardinality(a container) int {
switch c := a.(type) {
case *runContainer16:
return int(rc.unionCardinality(c))
case *arrayContainer:
return rc.orArrayCardinality(c)
case *bitmapContainer:
return rc.orBitmapContainerCardinality(c)
}
panic("unsupported container type")
}
// orBitmapContainer finds the union of rc and bc.
func (rc *runContainer16) orBitmapContainer(bc *bitmapContainer) container {
bc2 := newBitmapContainerFromRun(rc)
return bc2.iorBitmap(bc)
}
func (rc *runContainer16) andBitmapContainerCardinality(bc *bitmapContainer) int {
answer := 0
for i := range rc.iv {
answer += bc.getCardinalityInRange(uint(rc.iv[i].start), uint(rc.iv[i].last())+1)
}
//bc.computeCardinality()
return answer
}
func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int {
return rc.getCardinality() + bc.getCardinality() - rc.andBitmapContainerCardinality(bc)
}
// orArray finds the union of rc and ac.
func (rc *runContainer16) orArray(ac *arrayContainer) container {
bc1 := newBitmapContainerFromRun(rc)
bc2 := ac.toBitmapContainer()
return bc1.orBitmap(bc2)
}
// orArray finds the union of rc and ac.
func (rc *runContainer16) orArrayCardinality(ac *arrayContainer) int {
return ac.getCardinality() + rc.getCardinality() - rc.andArrayCardinality(ac)
}
func (rc *runContainer16) ior(a container) container {
if rc.isFull() {
return rc
}
switch c := a.(type) {
case *runContainer16:
return rc.inplaceUnion(c)
case *arrayContainer:
return rc.iorArray(c)
case *bitmapContainer:
return rc.iorBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container {
for _, p := range rc2.iv {
last := int64(p.last())
for i := int64(p.start); i <= last; i++ {
rc.Add(uint16(i))
}
}
return rc
}
func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container {
it := bc.getShortIterator()
for it.hasNext() {
rc.Add(it.next())
}
return rc
}
func (rc *runContainer16) iorArray(ac *arrayContainer) container {
it := ac.getShortIterator()
for it.hasNext() {
rc.Add(it.next())
}
return rc
}
// lazyIOR is described (not yet implemented) in
// this nice note from @lemire on
// https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737
//
// Description of lazyOR and lazyIOR from @lemire:
//
// Lazy functions are optional and can be simply
// wrapper around non-lazy functions.
//
// The idea of "laziness" is as follows. It is
// inspired by the concept of lazy evaluation
// you might be familiar with (functional programming
// and all that). So a roaring bitmap is
// such that all its containers are, in some
// sense, chosen to use as little memory as
// possible. This is nice. Also, all bitsets
// are "cardinality aware" so that you can do
// fast rank/select queries, or query the
// cardinality of the whole bitmap... very fast,
// without latency.
//
// However, imagine that you are aggregating 100
// bitmaps together. So you OR the first two, then OR
// that with the third one and so forth. Clearly,
// intermediate bitmaps don't need to be as
// compressed as possible, right? They can be
// in a "dirty state". You only need the end
// result to be in a nice state... which you
// can achieve by calling repairAfterLazy at the end.
//
// The Java/C code does something special for
// the in-place lazy OR runs. The idea is that
// instead of taking two run containers and
// generating a new one, we actually try to
// do the computation in-place through a
// technique invented by @gssiyankai (pinging him!).
// What you do is you check whether the host
// run container has lots of extra capacity.
// If it does, you move its data at the end of
// the backing array, and then you write
// the answer at the beginning. What this
// trick does is minimize memory allocations.
//
func (rc *runContainer16) lazyIOR(a container) container {
// not lazy at the moment
return rc.ior(a)
}
// lazyOR is described above in lazyIOR.
func (rc *runContainer16) lazyOR(a container) container {
// not lazy at the moment
return rc.or(a)
}
func (rc *runContainer16) intersects(a container) bool {
// TODO: optimize by doing inplace/less allocation, possibly?
isect := rc.and(a)
return isect.getCardinality() > 0
}
func (rc *runContainer16) xor(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.xorArray(c)
case *bitmapContainer:
return rc.xorBitmap(c)
case *runContainer16:
return rc.xorRunContainer16(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) iandNot(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.iandNotArray(c)
case *bitmapContainer:
return rc.iandNotBitmap(c)
case *runContainer16:
return rc.iandNotRunContainer16(c)
}
panic("unsupported container type")
}
// flip the values in the range [firstOfRange,endx)
func (rc *runContainer16) inot(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
}
// TODO: minimize copies, do it all inplace; not() makes a copy.
rc = rc.Not(firstOfRange, endx)
return rc
}
func (rc *runContainer16) getCardinality() int {
return int(rc.cardinality())
}
func (rc *runContainer16) rank(x uint16) int {
n := int64(len(rc.iv))
xx := int64(x)
w, already, _ := rc.search(xx, nil)
if w < 0 {
return 0
}
if !already && w == n-1 {
return rc.getCardinality()
}
var rnk int64
if !already {
for i := int64(0); i <= w; i++ {
rnk += rc.iv[i].runlen()
}
return int(rnk)
}
for i := int64(0); i < w; i++ {
rnk += rc.iv[i].runlen()
}
rnk += int64(x-rc.iv[w].start) + 1
return int(rnk)
}
func (rc *runContainer16) selectInt(x uint16) int {
return rc.selectInt16(x)
}
func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container {
return rc.AndNotRunContainer16(b)
}
func (rc *runContainer16) andNotArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
return rcb.andNotBitmap(acb)
}
func (rc *runContainer16) andNotBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
return rcb.andNotBitmap(bc)
}
func (rc *runContainer16) toBitmapContainer() *bitmapContainer {
bc := newBitmapContainer()
for i := range rc.iv {
bc.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
}
bc.computeCardinality()
return bc
}
func (rc *runContainer16) iandNotRunContainer16(x2 *runContainer16) container {
rcb := rc.toBitmapContainer()
x2b := x2.toBitmapContainer()
rcb.iandNotBitmapSurely(x2b)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) iandNotArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
rcb.iandNotBitmapSurely(acb)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) iandNotBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
rcb.iandNotBitmapSurely(bc)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) xorRunContainer16(x2 *runContainer16) container {
rcb := rc.toBitmapContainer()
x2b := x2.toBitmapContainer()
return rcb.xorBitmap(x2b)
}
func (rc *runContainer16) xorArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
return rcb.xorBitmap(acb)
}
func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
return rcb.xorBitmap(bc)
}
// convert to bitmap or array *if needed*
func (rc *runContainer16) toEfficientContainer() container {
// runContainer16SerializedSizeInBytes(numRuns)
sizeAsRunContainer := rc.getSizeInBytes()
sizeAsBitmapContainer := bitmapContainerSizeInBytes()
card := int(rc.cardinality())
sizeAsArrayContainer := arrayContainerSizeInBytes(card)
if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
return rc
}
if card <= arrayDefaultMaxSize {
return rc.toArrayContainer()
}
bc := newBitmapContainerFromRun(rc)
return bc
}
func (rc *runContainer16) toArrayContainer() *arrayContainer {
ac := newArrayContainer()
for i := range rc.iv {
ac.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
}
return ac
}
func newRunContainer16FromContainer(c container) *runContainer16 {
switch x := c.(type) {
case *runContainer16:
return x.Clone()
case *arrayContainer:
return newRunContainer16FromArray(x)
case *bitmapContainer:
return newRunContainer16FromBitmapContainer(x)
}
panic("unsupported container type")
}
// And finds the intersection of rc and b.
func (rc *runContainer16) And(b *Bitmap) *Bitmap {
out := NewBitmap()
for _, p := range rc.iv {
plast := p.last()
for i := p.start; i <= plast; i++ {
if b.Contains(uint32(i)) {
out.Add(uint32(i))
}
}
}
return out
}
// Xor returns the exclusive-or of rc and b.
func (rc *runContainer16) Xor(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
plast := p.last()
for v := p.start; v <= plast; v++ {
w := uint32(v)
if out.Contains(w) {
out.RemoveRange(uint64(w), uint64(w+1))
} else {
out.Add(w)
}
}
}
return out
}
// Or returns the union of rc and b.
func (rc *runContainer16) Or(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
plast := p.last()
for v := p.start; v <= plast; v++ {
out.Add(uint32(v))
}
}
return out
}
// serializedSizeInBytes returns the number of bytes of memory
// required by this runContainer16. This is for the
// Roaring format, as specified https://github.com/RoaringBitmap/RoaringFormatSpec/
func (rc *runContainer16) serializedSizeInBytes() int {
// number of runs in one uint16, then each run
// needs two more uint16
return 2 + len(rc.iv)*4
}
func (rc *runContainer16) addOffset(x uint16) []container {
low := newRunContainer16()
high := newRunContainer16()
for _, iv := range rc.iv {
val := int(iv.start) + int(x)
finalVal := int(val) + int(iv.length)
if val <= 0xffff {
if finalVal <= 0xffff {
low.iv = append(low.iv, interval16{uint16(val), iv.length})
} else {
low.iv = append(low.iv, interval16{uint16(val), uint16(0xffff - val)})
high.iv = append(high.iv, interval16{uint16(0), uint16(finalVal & 0xffff)})
}
} else {
high.iv = append(high.iv, interval16{uint16(val & 0xffff), iv.length})
}
}
return []container{low, high}
}

View File

@ -6,7 +6,7 @@ package roaring
import "github.com/tinylib/msgp/msgp"
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *addHelper16) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -169,7 +169,7 @@ func (z *addHelper16) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *addHelper16) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 5
// write "runstart"
@ -284,7 +284,7 @@ func (z *addHelper16) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *addHelper16) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
@ -334,7 +334,7 @@ func (z *addHelper16) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *addHelper16) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -498,7 +498,7 @@ func (z *addHelper16) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *addHelper16) Msgsize() (s int) {
s = 1 + 9 + msgp.Uint16Size + 7 + msgp.Uint16Size + 14 + msgp.Uint16Size + 2 + msgp.ArrayHeaderSize + (len(z.m) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 3
if z.rc == nil {
@ -509,7 +509,7 @@ func (z *addHelper16) Msgsize() (s int) {
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *interval16) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -546,7 +546,7 @@ func (z *interval16) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z interval16) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "start"
@ -570,7 +570,7 @@ func (z interval16) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z interval16) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@ -583,7 +583,7 @@ func (z interval16) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *interval16) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -621,13 +621,13 @@ func (z *interval16) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z interval16) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint16Size + 5 + msgp.Uint16Size
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *runContainer16) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -701,7 +701,7 @@ func (z *runContainer16) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *runContainer16) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "iv"
@ -746,7 +746,7 @@ func (z *runContainer16) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *runContainer16) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@ -768,7 +768,7 @@ func (z *runContainer16) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *runContainer16) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -843,13 +843,13 @@ func (z *runContainer16) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *runContainer16) Msgsize() (s int) {
s = 1 + 3 + msgp.ArrayHeaderSize + (len(z.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *runIterator16) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
@ -906,7 +906,7 @@ func (z *runIterator16) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z *runIterator16) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "rc"
@ -955,7 +955,7 @@ func (z *runIterator16) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z *runIterator16) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
@ -981,7 +981,7 @@ func (z *runIterator16) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *runIterator16) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
@ -1039,7 +1039,7 @@ func (z *runIterator16) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *runIterator16) Msgsize() (s int) {
s = 1 + 3
if z.rc == nil {
@ -1051,7 +1051,7 @@ func (z *runIterator16) Msgsize() (s int) {
return
}
// DecodeMsg implements msgp.Decodable
// Deprecated: DecodeMsg implements msgp.Decodable
func (z *uint16Slice) DecodeMsg(dc *msgp.Reader) (err error) {
var zjpj uint32
zjpj, err = dc.ReadArrayHeader()
@ -1072,7 +1072,7 @@ func (z *uint16Slice) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
// EncodeMsg implements msgp.Encodable
// Deprecated: EncodeMsg implements msgp.Encodable
func (z uint16Slice) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteArrayHeader(uint32(len(z)))
if err != nil {
@ -1087,7 +1087,7 @@ func (z uint16Slice) EncodeMsg(en *msgp.Writer) (err error) {
return
}
// MarshalMsg implements msgp.Marshaler
// Deprecated: MarshalMsg implements msgp.Marshaler
func (z uint16Slice) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendArrayHeader(o, uint32(len(z)))
@ -1097,7 +1097,7 @@ func (z uint16Slice) MarshalMsg(b []byte) (o []byte, err error) {
return
}
// UnmarshalMsg implements msgp.Unmarshaler
// Deprecated: UnmarshalMsg implements msgp.Unmarshaler
func (z *uint16Slice) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zgmo uint32
zgmo, bts, err = msgp.ReadArrayHeaderBytes(bts)
@ -1119,7 +1119,7 @@ func (z *uint16Slice) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z uint16Slice) Msgsize() (s int) {
s = msgp.ArrayHeaderSize + (len(z) * (msgp.Uint16Size))
return

View File

@ -22,14 +22,6 @@ func (b *runContainer16) writeTo(stream io.Writer) (int, error) {
return stream.Write(buf)
}
func (b *runContainer32) writeToMsgpack(stream io.Writer) (int, error) {
bts, err := b.MarshalMsg(nil)
if err != nil {
return 0, err
}
return stream.Write(bts)
}
func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) {
bts, err := b.MarshalMsg(nil)
if err != nil {
@ -38,11 +30,6 @@ func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) {
return stream.Write(bts)
}
func (b *runContainer32) readFromMsgpack(stream io.Reader) (int, error) {
err := msgp.Decode(stream, b)
return 0, err
}
func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) {
err := msgp.Decode(stream, b)
return 0, err

View File

@ -4,6 +4,7 @@ package roaring
import (
"encoding/binary"
"errors"
"io"
)
@ -26,6 +27,10 @@ func (b *arrayContainer) readFrom(stream io.Reader) (int, error) {
}
func (b *bitmapContainer) writeTo(stream io.Writer) (int, error) {
if b.cardinality <= arrayDefaultMaxSize {
return 0, errors.New("refusing to write bitmap container with cardinality of array container")
}
// Write set
buf := make([]byte, 8*len(b.bitmap))
for i, v := range b.bitmap {

View File

@ -3,6 +3,7 @@
package roaring
import (
"errors"
"io"
"reflect"
"unsafe"
@ -14,6 +15,9 @@ func (ac *arrayContainer) writeTo(stream io.Writer) (int, error) {
}
func (bc *bitmapContainer) writeTo(stream io.Writer) (int, error) {
if bc.cardinality <= arrayDefaultMaxSize {
return 0, errors.New("refusing to write bitmap container with cardinality of array container")
}
buf := uint64SliceAsByteSlice(bc.bitmap)
return stream.Write(buf)
}

View File

@ -19,3 +19,18 @@ func (si *shortIterator) next() uint16 {
si.loc++
return a
}
type reverseIterator struct {
slice []uint16
loc int
}
func (si *reverseIterator) hasNext() bool {
return si.loc >= 0
}
func (si *reverseIterator) next() uint16 {
a := si.slice[si.loc]
si.loc--
return a
}

View File

@ -14,6 +14,17 @@ const (
serialCookie = 12347 // runs, arrays, and bitmaps
noOffsetThreshold = 4
// MaxUint32 is the largest uint32 value.
MaxUint32 = 4294967295
// MaxRange is One more than the maximum allowed bitmap bit index. For use as an upper
// bound for ranges.
MaxRange uint64 = MaxUint32 + 1
// MaxUint16 is the largest 16 bit unsigned int.
// This is the largest value an interval16 can store.
MaxUint16 = 65535
// Compute wordSizeInBytes, the size of a word in bytes.
_m = ^uint64(0)
_logS = _m>>8&1 + _m>>16&1 + _m>>32&1
@ -114,7 +125,6 @@ func flipBitmapRange(bitmap []uint64, start int, end int) {
endword := (end - 1) / 64
bitmap[firstword] ^= ^(^uint64(0) << uint(start%64))
for i := firstword; i < endword; i++ {
//p("flipBitmapRange on i=%v", i)
bitmap[i] = ^bitmap[i]
}
bitmap[endword] ^= ^uint64(0) >> (uint(-end) % 64)

View File

@ -157,6 +157,21 @@ Yes, however this implementation is only aware of the byte representation you ch
In my work on the [Bleve](https://github.com/blevesearch/bleve) project I became aware of the power of the FST for many search-related tasks. The obvious starting point for such a thing in Go was the [mafsa](https://github.com/smartystreets/mafsa) project. While working with mafsa I encountered some issues. First, it did not stream data to disk while building. Second, it chose to use a rune as the fundamental unit of transition in the FST, but I felt using a byte would be more powerful in the end. My hope is that higher-level encoding-aware traversals will be possible when necessary. Finally, as I reported bugs and submitted PRs I learned that the mafsa project was mainly a research project and no longer being maintained. I wanted to build something that could be used in production. As the project advanced more and more techniques from the [BurntSushi/fst](https://github.com/BurntSushi/fst) were adapted to our implementation.
### Are there tools to work with vellum files?
Under the cmd/vellum subdirectory, there's a command-line tool which
features subcommands that can allow you to create, inspect and query
vellum files.
### How can I generate a state transition diagram from a vellum file?
The vellum command-line tool has a "dot" subcommand that can emit
graphviz dot output data from an input vellum file. The dot file can
in turn be converted into an image using graphviz tools. Example...
$ vellum dot myFile.vellum > output.dot
$ dot -Tpng output.dot -o output.png
## Related Work
Much credit goes to two existing projects:

View File

@ -81,5 +81,5 @@ func (m *AlwaysMatch) Accept(int, byte) int {
return 0
}
// creating an alwaysMatchAutomaton to avoid unnecesary repeated allocations.
// creating an alwaysMatchAutomaton to avoid unnecessary repeated allocations.
var alwaysMatchAutomaton = &AlwaysMatch{}

View File

@ -219,7 +219,7 @@ func (f *fstStateV1) Final() bool {
}
func (f *fstStateV1) FinalOutput() uint64 {
if f.numTrans > 0 && f.final && f.outSize > 0 {
if f.final && f.outSize > 0 {
return readPackedUint(f.data[f.outFinal : f.outFinal+f.outSize])
}
return 0

View File

@ -74,8 +74,8 @@ func (f *FST) get(input []byte, prealloc fstState) (uint64, bool, error) {
if err != nil {
return 0, false, err
}
for i := range input {
_, curr, output := state.TransitionFor(input[i])
for _, c := range input {
_, curr, output := state.TransitionFor(c)
if curr == noneAddr {
return 0, false, nil
}

View File

@ -76,7 +76,8 @@ func newIterator(f *FST, startKeyInclusive, endKeyExclusive []byte,
// Reset resets the Iterator' internal state to allow for iterator
// reuse (e.g. pooling).
func (i *FSTIterator) Reset(f *FST, startKeyInclusive, endKeyExclusive []byte, aut Automaton) error {
func (i *FSTIterator) Reset(f *FST,
startKeyInclusive, endKeyExclusive []byte, aut Automaton) error {
if aut == nil {
aut = alwaysMatchAutomaton
}
@ -91,14 +92,14 @@ func (i *FSTIterator) Reset(f *FST, startKeyInclusive, endKeyExclusive []byte, a
// pointTo attempts to point us to the specified location
func (i *FSTIterator) pointTo(key []byte) error {
// tried to seek before start
if bytes.Compare(key, i.startKeyInclusive) < 0 {
key = i.startKeyInclusive
}
// trid to see past end
if i.endKeyExclusive != nil && bytes.Compare(key, i.endKeyExclusive) > 0 {
// tried to see past end
if i.endKeyExclusive != nil &&
bytes.Compare(key, i.endKeyExclusive) > 0 {
key = i.endKeyExclusive
}
@ -121,21 +122,23 @@ func (i *FSTIterator) pointTo(key []byte) error {
i.statesStack = append(i.statesStack, root)
i.autStatesStack = append(i.autStatesStack, autStart)
for j := 0; j < len(key); j++ {
keyJ := key[j]
curr := i.statesStack[len(i.statesStack)-1]
autCurr := i.autStatesStack[len(i.autStatesStack)-1]
pos, nextAddr, nextVal := curr.TransitionFor(key[j])
pos, nextAddr, nextVal := curr.TransitionFor(keyJ)
if nextAddr == noneAddr {
// needed transition doesn't exist
// find last trans before the one we needed
for q := 0; q < curr.NumTransitions(); q++ {
if curr.TransitionAt(q) < key[j] {
for q := curr.NumTransitions() - 1; q >= 0; q-- {
if curr.TransitionAt(q) < keyJ {
maxQ = q
break
}
}
break
}
autNext := i.aut.Accept(autCurr, key[j])
autNext := i.aut.Accept(autCurr, keyJ)
next, err := i.f.decoder.stateAt(nextAddr, nil)
if err != nil {
@ -143,14 +146,16 @@ func (i *FSTIterator) pointTo(key []byte) error {
}
i.statesStack = append(i.statesStack, next)
i.keysStack = append(i.keysStack, key[j])
i.keysStack = append(i.keysStack, keyJ)
i.keysPosStack = append(i.keysPosStack, pos)
i.valsStack = append(i.valsStack, nextVal)
i.autStatesStack = append(i.autStatesStack, autNext)
continue
}
if !i.statesStack[len(i.statesStack)-1].Final() || !i.aut.IsMatch(i.autStatesStack[len(i.autStatesStack)-1]) || bytes.Compare(i.keysStack, key) < 0 {
if !i.statesStack[len(i.statesStack)-1].Final() ||
!i.aut.IsMatch(i.autStatesStack[len(i.autStatesStack)-1]) ||
bytes.Compare(i.keysStack, key) < 0 {
return i.next(maxQ)
}
@ -181,15 +186,12 @@ func (i *FSTIterator) Next() error {
}
func (i *FSTIterator) next(lastOffset int) error {
// remember where we started
if cap(i.nextStart) < len(i.keysStack) {
i.nextStart = make([]byte, len(i.keysStack))
} else {
i.nextStart = i.nextStart[0:len(i.keysStack)]
}
copy(i.nextStart, i.keysStack)
i.nextStart = append(i.nextStart[:0], i.keysStack...)
nextOffset := lastOffset + 1
OUTER:
for true {
curr := i.statesStack[len(i.statesStack)-1]
autCurr := i.autStatesStack[len(i.autStatesStack)-1]
@ -200,58 +202,62 @@ func (i *FSTIterator) next(lastOffset int) error {
return nil
}
nextOffset := lastOffset + 1
if nextOffset < curr.NumTransitions() {
numTrans := curr.NumTransitions()
INNER:
for nextOffset < numTrans {
t := curr.TransitionAt(nextOffset)
autNext := i.aut.Accept(autCurr, t)
if i.aut.CanMatch(autNext) {
pos, nextAddr, v := curr.TransitionFor(t)
// the next slot in the statesStack might have an
// fstState instance that we can reuse
var nextPrealloc fstState
if len(i.statesStack) < cap(i.statesStack) {
nextPrealloc = i.statesStack[0:cap(i.statesStack)][len(i.statesStack)]
}
// push onto stack
next, err := i.f.decoder.stateAt(nextAddr, nextPrealloc)
if err != nil {
return err
}
i.statesStack = append(i.statesStack, next)
i.keysStack = append(i.keysStack, t)
i.keysPosStack = append(i.keysPosStack, pos)
i.valsStack = append(i.valsStack, v)
i.autStatesStack = append(i.autStatesStack, autNext)
lastOffset = -1
// check to see if new keystack might have gone too far
if i.endKeyExclusive != nil && bytes.Compare(i.keysStack, i.endKeyExclusive) >= 0 {
return ErrIteratorDone
}
} else {
lastOffset = nextOffset
if !i.aut.CanMatch(autNext) {
nextOffset += 1
continue INNER
}
continue
pos, nextAddr, v := curr.TransitionFor(t)
// the next slot in the statesStack might have an
// fstState instance that we can reuse
var nextPrealloc fstState
if len(i.statesStack) < cap(i.statesStack) {
nextPrealloc = i.statesStack[0:cap(i.statesStack)][len(i.statesStack)]
}
// push onto stack
next, err := i.f.decoder.stateAt(nextAddr, nextPrealloc)
if err != nil {
return err
}
i.statesStack = append(i.statesStack, next)
i.keysStack = append(i.keysStack, t)
i.keysPosStack = append(i.keysPosStack, pos)
i.valsStack = append(i.valsStack, v)
i.autStatesStack = append(i.autStatesStack, autNext)
// check to see if new keystack might have gone too far
if i.endKeyExclusive != nil &&
bytes.Compare(i.keysStack, i.endKeyExclusive) >= 0 {
return ErrIteratorDone
}
nextOffset = 0
continue OUTER
}
if len(i.statesStack) > 1 {
// no transitions, and still room to pop
i.statesStack = i.statesStack[:len(i.statesStack)-1]
i.keysStack = i.keysStack[:len(i.keysStack)-1]
lastOffset = i.keysPosStack[len(i.keysPosStack)-1]
i.keysPosStack = i.keysPosStack[:len(i.keysPosStack)-1]
i.valsStack = i.valsStack[:len(i.valsStack)-1]
i.autStatesStack = i.autStatesStack[:len(i.autStatesStack)-1]
continue
} else {
if len(i.statesStack) <= 1 {
// stack len is 1 (root), can't go back further, we're done
break
}
// no transitions, and still room to pop
i.statesStack = i.statesStack[:len(i.statesStack)-1]
i.keysStack = i.keysStack[:len(i.keysStack)-1]
nextOffset = i.keysPosStack[len(i.keysPosStack)-1] + 1
i.keysPosStack = i.keysPosStack[:len(i.keysPosStack)-1]
i.valsStack = i.valsStack[:len(i.valsStack)-1]
i.autStatesStack = i.autStatesStack[:len(i.autStatesStack)-1]
}
return ErrIteratorDone
@ -262,15 +268,12 @@ func (i *FSTIterator) next(lastOffset int) error {
// seek operation would go past the last key, or outside the configured
// startKeyInclusive/endKeyExclusive then ErrIteratorDone is returned.
func (i *FSTIterator) Seek(key []byte) error {
err := i.pointTo(key)
if err != nil {
return err
}
return nil
return i.pointTo(key)
}
// Close will free any resources held by this iterator.
func (i *FSTIterator) Close() error {
// at the moment we don't do anything, but wanted this for API completeness
// at the moment we don't do anything,
// but wanted this for API completeness
return nil
}

View File

@ -19,9 +19,17 @@ import (
"fmt"
"unicode"
unicode_utf8 "unicode/utf8"
"github.com/couchbase/vellum/utf8"
)
var sequences0ToMaxRune utf8.Sequences
func init() {
sequences0ToMaxRune, _ = utf8.NewSequences(0, unicode.MaxRune)
}
type dfa struct {
states statesStack
}
@ -58,17 +66,25 @@ type dfaBuilder struct {
lev *dynamicLevenshtein
cache map[string]int
keyBuf []byte
sequences utf8.Sequences
rangeStack utf8.RangeStack
startBytes []byte
endBytes []byte
nexts []int
}
func newDfaBuilder(lev *dynamicLevenshtein) *dfaBuilder {
dfab := &dfaBuilder{
dfa: &dfa{
states: make([]*state, 0, 16),
states: make([]state, 0, 16),
},
lev: lev,
cache: make(map[string]int, 1024),
lev: lev,
cache: make(map[string]int, 1024),
startBytes: make([]byte, unicode_utf8.UTFMax),
endBytes: make([]byte, unicode_utf8.UTFMax),
}
dfab.newState(false) // create state 0, invalid
_, dfab.nexts = dfab.newState(false, nil) // create state 0, invalid
return dfab
}
@ -101,7 +117,7 @@ func (b *dfaBuilder) build() (*dfa, error) {
levNext := b.lev.accept(levState, &r)
nextSi := b.cachedState(levNext)
if nextSi != 0 {
err = b.addUtf8Sequences(true, dfaSi, nextSi, r, r)
err = b.addUtf8RuneRange(true, dfaSi, nextSi, r, r)
if err != nil {
return nil, err
}
@ -150,7 +166,7 @@ func (b *dfaBuilder) cached(levState []int) (int, bool) {
return v, true
}
match := b.lev.isMatch(levState)
b.dfa.states = b.dfa.states.Push(&state{
b.dfa.states = append(b.dfa.states, state{
next: make([]int, 256),
match: match,
})
@ -165,42 +181,64 @@ func (b *dfaBuilder) addMismatchUtf8States(fromSi int, levState []int) (int, []i
if toSi == 0 {
return 0, nil, nil
}
err := b.addUtf8Sequences(false, fromSi, toSi, 0, unicode.MaxRune)
if err != nil {
return 0, nil, err
}
b.addUtf8Sequences(false, fromSi, toSi, sequences0ToMaxRune)
return toSi, mmState, nil
}
func (b *dfaBuilder) addUtf8Sequences(overwrite bool, fromSi, toSi int, fromChar, toChar rune) error {
sequences, err := utf8.NewSequences(fromChar, toChar)
func (b *dfaBuilder) addUtf8RuneRange(overwrite bool, fromSi, toSi int,
fromChar, toChar rune) (
err error) {
b.sequences, b.rangeStack, err = utf8.NewSequencesPrealloc(fromChar, toChar,
b.sequences, b.rangeStack, b.startBytes, b.endBytes)
if err != nil {
return err
}
b.addUtf8Sequences(overwrite, fromSi, toSi, b.sequences)
return nil
}
func (b *dfaBuilder) addUtf8Sequences(overwrite bool, fromSi, toSi int,
sequences utf8.Sequences) {
for _, seq := range sequences {
fsi := fromSi
for _, utf8r := range seq[:len(seq)-1] {
tsi := b.newState(false)
var tsi int
tsi, b.nexts = b.newState(false, b.nexts)
b.addUtf8Range(overwrite, fsi, tsi, utf8r)
fsi = tsi
}
b.addUtf8Range(overwrite, fsi, toSi, seq[len(seq)-1])
}
return nil
}
func (b *dfaBuilder) addUtf8Range(overwrite bool, from, to int, rang *utf8.Range) {
for by := rang.Start; by <= rang.End; by++ {
if overwrite || b.dfa.states[from].next[by] == 0 {
b.dfa.states[from].next[by] = to
func (b *dfaBuilder) addUtf8Range(overwrite bool, from, to int, rang utf8.Range) {
fromNext := b.dfa.states[from].next
if overwrite {
for by := rang.Start; by <= rang.End; by++ {
fromNext[by] = to
}
} else {
for by := rang.Start; by <= rang.End; by++ {
if fromNext[by] == 0 {
fromNext[by] = to
}
}
}
}
func (b *dfaBuilder) newState(match bool) int {
b.dfa.states = append(b.dfa.states, &state{
next: make([]int, 256),
func (b *dfaBuilder) newState(match bool, prealloc []int) (int, []int) {
if len(prealloc) < 256 {
prealloc = make([]int, 16384)
}
next := prealloc[0:256]
prealloc = prealloc[256:]
b.dfa.states = append(b.dfa.states, state{
next: next,
match: match,
})
return len(b.dfa.states) - 1
return len(b.dfa.states) - 1, prealloc
}

View File

@ -22,7 +22,7 @@ import (
const StateLimit = 10000
// ErrTooManyStates is returned if you attempt to build a Levenshtein
// automaton which requries too many states.
// automaton which requires too many states.
var ErrTooManyStates = fmt.Errorf("dfa contains more than %d states", StateLimit)
// Levenshtein implements the vellum.Automaton interface for matching

View File

@ -41,14 +41,9 @@ func (d *dynamicLevenshtein) isMatch(state []int) bool {
}
func (d *dynamicLevenshtein) canMatch(state []int) bool {
if len(state) > 0 {
min := state[0]
for i := 1; i < len(state); i++ {
if state[i] < min {
min = state[i]
}
}
if uint(min) <= d.distance {
distance := int(d.distance)
for _, v := range state {
if v <= distance {
return true
}
}
@ -56,7 +51,8 @@ func (d *dynamicLevenshtein) canMatch(state []int) bool {
}
func (d *dynamicLevenshtein) accept(state []int, r *rune) []int {
next := []int{state[0] + 1}
next := make([]int, 0, len(d.query)+1)
next = append(next, state[0]+1)
i := 0
for _, c := range d.query {
var cost int

View File

@ -16,7 +16,7 @@ package levenshtein
import "fmt"
type statesStack []*state
type statesStack []state
func (s statesStack) String() string {
rv := ""
@ -30,10 +30,6 @@ func (s statesStack) String() string {
return rv
}
func (s statesStack) Push(v *state) statesStack {
return append(s, v)
}
type intsStack [][]int
func (s intsStack) Push(v []int) intsStack {

View File

@ -18,17 +18,27 @@ import (
"regexp/syntax"
"unicode"
unicode_utf8 "unicode/utf8"
"github.com/couchbase/vellum/utf8"
)
type compiler struct {
sizeLimit uint
insts prog
instsPool []inst
sequences utf8.Sequences
rangeStack utf8.RangeStack
startBytes []byte
endBytes []byte
}
func newCompiler(sizeLimit uint) *compiler {
return &compiler{
sizeLimit: sizeLimit,
sizeLimit: sizeLimit,
startBytes: make([]byte, unicode_utf8.UTFMax),
endBytes: make([]byte, unicode_utf8.UTFMax),
}
}
@ -37,13 +47,13 @@ func (c *compiler) compile(ast *syntax.Regexp) (prog, error) {
if err != nil {
return nil, err
}
c.insts = append(c.insts, &inst{
op: OpMatch,
})
inst := c.allocInst()
inst.op = OpMatch
c.insts = append(c.insts, inst)
return c.insts, nil
}
func (c *compiler) c(ast *syntax.Regexp) error {
func (c *compiler) c(ast *syntax.Regexp) (err error) {
if ast.Flags&syntax.NonGreedy > 1 {
return ErrNoLazy
}
@ -67,11 +77,12 @@ func (c *compiler) c(ast *syntax.Regexp) error {
next.Rune = next.Rune0[0:2]
return c.c(&next)
}
seqs, err := utf8.NewSequences(r, r)
c.sequences, c.rangeStack, err = utf8.NewSequencesPrealloc(
r, r, c.sequences, c.rangeStack, c.startBytes, c.endBytes)
if err != nil {
return err
}
for _, seq := range seqs {
for _, seq := range c.sequences {
c.compileUtf8Ranges(seq)
}
}
@ -106,8 +117,7 @@ func (c *compiler) c(ast *syntax.Regexp) error {
if len(ast.Sub) == 0 {
return nil
}
jmpsToEnd := []uint{}
jmpsToEnd := make([]uint, 0, len(ast.Sub)-1)
// does not handle last entry
for i := 0; i < len(ast.Sub)-1; i++ {
sub := ast.Sub[i]
@ -188,7 +198,8 @@ func (c *compiler) c(ast *syntax.Regexp) error {
return err
}
}
var splits, starts []uint
splits := make([]uint, 0, ast.Max-ast.Min)
starts := make([]uint, 0, ast.Max-ast.Min)
for i := ast.Min; i < ast.Max; i++ {
splits = append(splits, c.emptySplit())
starts = append(starts, uint(len(c.insts)))
@ -218,8 +229,7 @@ func (c *compiler) compileClass(ast *syntax.Regexp) error {
if len(ast.Rune) == 0 {
return nil
}
var jmps []uint
jmps := make([]uint, 0, len(ast.Rune)-2)
// does not do last pair
for i := 0; i < len(ast.Rune)-2; i += 2 {
rstart := ast.Rune[i]
@ -249,16 +259,16 @@ func (c *compiler) compileClass(ast *syntax.Regexp) error {
return nil
}
func (c *compiler) compileClassRange(startR, endR rune) error {
seqs, err := utf8.NewSequences(startR, endR)
func (c *compiler) compileClassRange(startR, endR rune) (err error) {
c.sequences, c.rangeStack, err = utf8.NewSequencesPrealloc(
startR, endR, c.sequences, c.rangeStack, c.startBytes, c.endBytes)
if err != nil {
return err
}
var jmps []uint
jmps := make([]uint, 0, len(c.sequences)-1)
// does not do last entry
for i := 0; i < len(seqs)-1; i++ {
seq := seqs[i]
for i := 0; i < len(c.sequences)-1; i++ {
seq := c.sequences[i]
split := c.emptySplit()
j1 := c.top()
c.compileUtf8Ranges(seq)
@ -267,7 +277,7 @@ func (c *compiler) compileClassRange(startR, endR rune) error {
c.setSplit(split, j1, j2)
}
// handle last entry
c.compileUtf8Ranges(seqs[len(seqs)-1])
c.compileUtf8Ranges(c.sequences[len(c.sequences)-1])
end := c.top()
for _, jmp := range jmps {
c.setJump(jmp, end)
@ -278,25 +288,25 @@ func (c *compiler) compileClassRange(startR, endR rune) error {
func (c *compiler) compileUtf8Ranges(seq utf8.Sequence) {
for _, r := range seq {
c.insts = append(c.insts, &inst{
op: OpRange,
rangeStart: r.Start,
rangeEnd: r.End,
})
inst := c.allocInst()
inst.op = OpRange
inst.rangeStart = r.Start
inst.rangeEnd = r.End
c.insts = append(c.insts, inst)
}
}
func (c *compiler) emptySplit() uint {
c.insts = append(c.insts, &inst{
op: OpSplit,
})
inst := c.allocInst()
inst.op = OpSplit
c.insts = append(c.insts, inst)
return c.top() - 1
}
func (c *compiler) emptyJump() uint {
c.insts = append(c.insts, &inst{
op: OpJmp,
})
inst := c.allocInst()
inst.op = OpJmp
c.insts = append(c.insts, inst)
return c.top() - 1
}
@ -314,3 +324,12 @@ func (c *compiler) setJump(i, pc uint) {
func (c *compiler) top() uint {
return uint(len(c.insts))
}
func (c *compiler) allocInst() *inst {
if len(c.instsPool) <= 0 {
c.instsPool = make([]inst, 16)
}
inst := &c.instsPool[0]
c.instsPool = c.instsPool[1:]
return inst
}

View File

@ -23,7 +23,7 @@ import (
const StateLimit = 10000
// ErrTooManyStates is returned if you attempt to build a Levenshtein
// automaton which requries too many states.
// automaton which requires too many states.
var ErrTooManyStates = fmt.Errorf("dfa contains more than %d states",
StateLimit)
@ -37,12 +37,12 @@ func newDfaBuilder(insts prog) *dfaBuilder {
d := &dfaBuilder{
dfa: &dfa{
insts: insts,
states: make([]*state, 0, 16),
states: make([]state, 0, 16),
},
cache: make(map[string]int, 1024),
}
// add 0 state that is invalid
d.dfa.states = append(d.dfa.states, &state{
d.dfa.states = append(d.dfa.states, state{
next: make([]int, 256),
match: false,
})
@ -54,13 +54,15 @@ func (d *dfaBuilder) build() (*dfa, error) {
next := newSparseSet(uint(len(d.dfa.insts)))
d.dfa.add(cur, 0)
states := intStack{d.cachedState(cur)}
ns, instsReuse := d.cachedState(cur, nil)
states := intStack{ns}
seen := make(map[int]struct{})
var s int
states, s = states.Pop()
for s != 0 {
for b := 0; b < 256; b++ {
ns := d.runState(cur, next, s, byte(b))
var ns int
ns, instsReuse = d.runState(cur, next, s, byte(b), instsReuse)
if ns != 0 {
if _, ok := seen[ns]; !ok {
seen[ns] = struct{}{}
@ -76,15 +78,17 @@ func (d *dfaBuilder) build() (*dfa, error) {
return d.dfa, nil
}
func (d *dfaBuilder) runState(cur, next *sparseSet, state int, b byte) int {
func (d *dfaBuilder) runState(cur, next *sparseSet, state int, b byte, instsReuse []uint) (
int, []uint) {
cur.Clear()
for _, ip := range d.dfa.states[state].insts {
cur.Add(ip)
}
d.dfa.run(cur, next, b)
nextState := d.cachedState(next)
var nextState int
nextState, instsReuse = d.cachedState(next, instsReuse)
d.dfa.states[state].next[b] = nextState
return nextState
return nextState, instsReuse
}
func instsKey(insts []uint, buf []byte) []byte {
@ -99,8 +103,12 @@ func instsKey(insts []uint, buf []byte) []byte {
return buf
}
func (d *dfaBuilder) cachedState(set *sparseSet) int {
var insts []uint
func (d *dfaBuilder) cachedState(set *sparseSet,
instsReuse []uint) (int, []uint) {
insts := instsReuse[:0]
if cap(insts) == 0 {
insts = make([]uint, 0, set.Len())
}
var isMatch bool
for i := uint(0); i < uint(set.Len()); i++ {
ip := set.Get(i)
@ -113,26 +121,26 @@ func (d *dfaBuilder) cachedState(set *sparseSet) int {
}
}
if len(insts) == 0 {
return 0
return 0, insts
}
d.keyBuf = instsKey(insts, d.keyBuf)
v, ok := d.cache[string(d.keyBuf)]
if ok {
return v
return v, insts
}
d.dfa.states = append(d.dfa.states, &state{
d.dfa.states = append(d.dfa.states, state{
insts: insts,
next: make([]int, 256),
match: isMatch,
})
newV := len(d.dfa.states) - 1
d.cache[string(d.keyBuf)] = newV
return newV
return newV, nil
}
type dfa struct {
insts prog
states []*state
states []state
}
func (d *dfa) add(set *sparseSet, ip uint) {

View File

@ -27,7 +27,7 @@ const (
OpRange
)
// instSize is the approxmiate size of the an inst struct in bytes
// instSize is the approximate size of the an inst struct in bytes
const instSize = 40
type inst struct {

View File

@ -35,6 +35,8 @@ var ErrNoLazy = fmt.Errorf("lazy quantifiers are not allowed")
// too many instructions
var ErrCompiledTooBig = fmt.Errorf("too many instructions")
var DefaultLimit = uint(10 * (1 << 20))
// Regexp implements the vellum.Automaton interface for matcing a user
// specified regular expression.
type Regexp struct {
@ -47,7 +49,7 @@ type Regexp struct {
// compiled finite state automaton. If this size is exceeded,
// ErrCompiledTooBig will be returned.
func New(expr string) (*Regexp, error) {
return NewWithLimit(expr, 10*(1<<20))
return NewWithLimit(expr, DefaultLimit)
}
// NewRegexpWithLimit creates a new Regular Expression automaton with
@ -59,6 +61,10 @@ func NewWithLimit(expr string, size uint) (*Regexp, error) {
if err != nil {
return nil, err
}
return NewParsedWithLimit(expr, parsed, size)
}
func NewParsedWithLimit(expr string, parsed *syntax.Regexp, size uint) (*Regexp, error) {
compiler := newCompiler(size)
insts, err := compiler.compile(parsed)
if err != nil {
@ -103,7 +109,7 @@ func (r *Regexp) WillAlwaysMatch(int) bool {
return false
}
// Accept returns the new state, resulting from the transite byte b
// Accept returns the new state, resulting from the transition byte b
// when currently in the state s.
func (r *Regexp) Accept(s int, b byte) int {
if s < len(r.dfa.states) {

View File

@ -25,19 +25,39 @@ type Sequences []Sequence
// NewSequences constructs a collection of Sequence which describe the
// byte ranges covered between the start and end runes.
func NewSequences(start, end rune) (Sequences, error) {
var rv Sequences
rv, _, err := NewSequencesPrealloc(start, end, nil, nil, nil, nil)
return rv, err
}
var rangeStack rangeStack
rangeStack = rangeStack.Push(&scalarRange{start, end})
func NewSequencesPrealloc(start, end rune,
preallocSequences Sequences,
preallocRangeStack RangeStack,
preallocStartBytes, preallocEndBytes []byte) (Sequences, RangeStack, error) {
rv := preallocSequences[:0]
startBytes := preallocStartBytes
if cap(startBytes) < utf8.UTFMax {
startBytes = make([]byte, utf8.UTFMax)
}
startBytes = startBytes[:utf8.UTFMax]
endBytes := preallocEndBytes
if cap(endBytes) < utf8.UTFMax {
endBytes = make([]byte, utf8.UTFMax)
}
endBytes = endBytes[:utf8.UTFMax]
rangeStack := preallocRangeStack[:0]
rangeStack = rangeStack.Push(scalarRange{start, end})
rangeStack, r := rangeStack.Pop()
TOP:
for r != nil {
for r != nilScalarRange {
INNER:
for {
r1, r2 := r.split()
if r1 != nil {
rangeStack = rangeStack.Push(&scalarRange{r2.start, r2.end})
if r1 != nilScalarRange {
rangeStack = rangeStack.Push(scalarRange{r2.start, r2.end})
r.start = r1.start
r.end = r1.end
continue INNER
@ -49,13 +69,13 @@ TOP:
for i := 1; i < utf8.UTFMax; i++ {
max := maxScalarValue(i)
if r.start <= max && max < r.end {
rangeStack = rangeStack.Push(&scalarRange{max + 1, r.end})
rangeStack = rangeStack.Push(scalarRange{max + 1, r.end})
r.end = max
continue INNER
}
}
asciiRange := r.ascii()
if asciiRange != nil {
if asciiRange != nilRange {
rv = append(rv, Sequence{
asciiRange,
})
@ -66,23 +86,21 @@ TOP:
m := rune((1 << (6 * i)) - 1)
if (r.start & ^m) != (r.end & ^m) {
if (r.start & m) != 0 {
rangeStack = rangeStack.Push(&scalarRange{(r.start | m) + 1, r.end})
rangeStack = rangeStack.Push(scalarRange{(r.start | m) + 1, r.end})
r.end = r.start | m
continue INNER
}
if (r.end & m) != m {
rangeStack = rangeStack.Push(&scalarRange{r.end & ^m, r.end})
rangeStack = rangeStack.Push(scalarRange{r.end & ^m, r.end})
r.end = (r.end & ^m) - 1
continue INNER
}
}
}
start := make([]byte, utf8.UTFMax)
end := make([]byte, utf8.UTFMax)
n, m := r.encode(start, end)
seq, err := SequenceFromEncodedRange(start[0:n], end[0:m])
n, m := r.encode(startBytes, endBytes)
seq, err := SequenceFromEncodedRange(startBytes[0:n], endBytes[0:m])
if err != nil {
return nil, err
return nil, nil, err
}
rv = append(rv, seq)
rangeStack, r = rangeStack.Pop()
@ -90,11 +108,11 @@ TOP:
}
}
return rv, nil
return rv, rangeStack, nil
}
// Sequence is a collection of *Range
type Sequence []*Range
// Sequence is a collection of Range
type Sequence []Range
// SequenceFromEncodedRange creates sequence from the encoded bytes
func SequenceFromEncodedRange(start, end []byte) (Sequence, error) {
@ -104,21 +122,21 @@ func SequenceFromEncodedRange(start, end []byte) (Sequence, error) {
switch len(start) {
case 2:
return Sequence{
&Range{start[0], end[0]},
&Range{start[1], end[1]},
Range{start[0], end[0]},
Range{start[1], end[1]},
}, nil
case 3:
return Sequence{
&Range{start[0], end[0]},
&Range{start[1], end[1]},
&Range{start[2], end[2]},
Range{start[0], end[0]},
Range{start[1], end[1]},
Range{start[2], end[2]},
}, nil
case 4:
return Sequence{
&Range{start[0], end[0]},
&Range{start[1], end[1]},
&Range{start[2], end[2]},
&Range{start[3], end[3]},
Range{start[0], end[0]},
Range{start[1], end[1]},
Range{start[2], end[2]},
Range{start[3], end[3]},
}, nil
}
@ -159,6 +177,8 @@ type Range struct {
End byte
}
var nilRange = Range{0xff, 0}
func (u Range) matches(b byte) bool {
if u.Start <= b && b <= u.End {
return true
@ -178,37 +198,39 @@ type scalarRange struct {
end rune
}
var nilScalarRange = scalarRange{0xffff, 0}
func (s *scalarRange) String() string {
return fmt.Sprintf("ScalarRange(%d,%d)", s.start, s.end)
}
// split this scalar range if it overlaps with a surrogate codepoint
func (s *scalarRange) split() (*scalarRange, *scalarRange) {
func (s *scalarRange) split() (scalarRange, scalarRange) {
if s.start < 0xe000 && s.end > 0xd7ff {
return &scalarRange{
return scalarRange{
start: s.start,
end: 0xd7ff,
},
&scalarRange{
scalarRange{
start: 0xe000,
end: s.end,
}
}
return nil, nil
return nilScalarRange, nilScalarRange
}
func (s *scalarRange) valid() bool {
return s.start <= s.end
}
func (s *scalarRange) ascii() *Range {
func (s *scalarRange) ascii() Range {
if s.valid() && s.end <= 0x7f {
return &Range{
return Range{
Start: byte(s.start),
End: byte(s.end),
}
}
return nil
return nilRange
}
// start and end MUST have capacity for utf8.UTFMax bytes
@ -218,16 +240,16 @@ func (s *scalarRange) encode(start, end []byte) (int, int) {
return n, m
}
type rangeStack []*scalarRange
type RangeStack []scalarRange
func (s rangeStack) Push(v *scalarRange) rangeStack {
func (s RangeStack) Push(v scalarRange) RangeStack {
return append(s, v)
}
func (s rangeStack) Pop() (rangeStack, *scalarRange) {
func (s RangeStack) Pop() (RangeStack, scalarRange) {
l := len(s)
if l < 1 {
return s, nil
return s, nilScalarRange
}
return s[:l-1], s[l-1]
}

View File

@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.

View File

@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
type flag uintptr
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
panic("reflect.Value read-only flag has changed semantics")
}

View File

@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
// +build js appengine safe disableunsafe !go1.4
package spew

View File

@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.

View File

@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
case nilFound:
d.w.Write(nilAngleBytes)
case cycleFound == true:
case cycleFound:
d.w.Write(circularBytes)
default:

View File

@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
case nilFound == true:
case nilFound:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
case cycleFound:
f.fs.Write(circularShortBytes)
default:

View File

@ -4,8 +4,12 @@
package brotli
import "io"
import "bufio"
import (
"bufio"
"io"
"github.com/dsnet/compress/internal/errors"
)
// The bitReader preserves the property that it will never read more bytes than
// is necessary. However, this feature dramatically hurts performance because
@ -101,7 +105,7 @@ func (br *bitReader) FeedBits(nb uint) {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
panic(err)
errors.Panic(err)
}
}
cnt := int(64-br.numBits) / 8
@ -125,7 +129,7 @@ func (br *bitReader) FeedBits(nb uint) {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
panic(err)
errors.Panic(err)
}
br.bufBits |= uint64(c) << br.numBits
br.numBits += 8
@ -137,7 +141,7 @@ func (br *bitReader) FeedBits(nb uint) {
// Read reads up to len(buf) bytes into buf.
func (br *bitReader) Read(buf []byte) (cnt int, err error) {
if br.numBits%8 != 0 {
return 0, Error("non-aligned bit buffer")
return 0, errorf(errors.Invalid, "non-aligned bit buffer")
}
if br.numBits > 0 {
for cnt = 0; len(buf) > cnt && br.numBits > 0; cnt++ {
@ -206,7 +210,7 @@ func (br *bitReader) TryReadSymbol(pd *prefixDecoder) (uint, bool) {
// ReadSymbol reads the next prefix symbol using the provided prefixDecoder.
func (br *bitReader) ReadSymbol(pd *prefixDecoder) uint {
if len(pd.chunks) == 0 {
panic(ErrCorrupt) // Decode with empty tree
errors.Panic(errInvalid) // Decode with empty tree
}
nb := uint(pd.minBits)
@ -256,12 +260,12 @@ func (br *bitReader) readSimplePrefixCode(pd *prefixDecoder, maxSyms uint) {
codes[i].sym = uint32(br.ReadBits(clen))
}
var copyLens = func(lens []uint) {
copyLens := func(lens []uint) {
for i := 0; i < nsym; i++ {
codes[i].len = uint32(lens[i])
}
}
var compareSwap = func(i, j int) {
compareSwap := func(i, j int) {
if codes[i].sym > codes[j].sym {
codes[i], codes[j] = codes[j], codes[i]
}
@ -291,7 +295,7 @@ func (br *bitReader) readSimplePrefixCode(pd *prefixDecoder, maxSyms uint) {
compareSwap(1, 2)
}
if uint(codes[nsym-1].sym) >= maxSyms {
panic(ErrCorrupt) // Symbol goes beyond range of alphabet
errors.Panic(errCorrupted) // Symbol goes beyond range of alphabet
}
pd.Init(codes[:nsym], true) // Must have 1..4 symbols
}
@ -317,7 +321,7 @@ func (br *bitReader) readComplexPrefixCode(pd *prefixDecoder, maxSyms, hskip uin
}
}
if len(codeCLens) < 1 {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
br.prefix.Init(codeCLens, true) // Must have 1..len(complexLens) symbols
@ -367,7 +371,7 @@ func (br *bitReader) readComplexPrefixCode(pd *prefixDecoder, maxSyms, hskip uin
}
}
if len(codes) < 2 || sym > maxSyms {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
pd.Init(codes, true) // Must have 2..maxSyms symbols
}

View File

@ -2,41 +2,49 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package brotli implements the Brotli compressed data format.
// Package brotli implements the Brotli compressed data format,
// described in RFC 7932.
package brotli
import "runtime"
import (
"fmt"
// Error is the wrapper type for errors specific to this library.
type Error string
func (e Error) Error() string { return "brotli: " + string(e) }
var (
ErrCorrupt error = Error("stream is corrupted")
"github.com/dsnet/compress/internal/errors"
)
func errRecover(err *error) {
switch ex := recover().(type) {
case nil:
// Do nothing.
case runtime.Error:
panic(ex)
case error:
*err = ex
default:
panic(ex)
func errorf(c int, f string, a ...interface{}) error {
return errors.Error{Code: c, Pkg: "brotli", Msg: fmt.Sprintf(f, a...)}
}
// errWrap converts a lower-level errors.Error to be one from this package.
// The replaceCode passed in will be used to replace the code for any errors
// with the errors.Invalid code.
//
// For the Reader, set this to errors.Corrupted.
// For the Writer, set this to errors.Internal.
func errWrap(err error, replaceCode int) error {
if cerr, ok := err.(errors.Error); ok {
if errors.IsInvalid(cerr) {
cerr.Code = replaceCode
}
err = errorf(cerr.Code, "%s", cerr.Msg)
}
return err
}
var (
errClosed = errorf(errors.Closed, "")
errCorrupted = errorf(errors.Corrupted, "")
errInvalid = errorf(errors.Invalid, "")
errUnaligned = errorf(errors.Invalid, "non-aligned bit buffer")
)
var (
reverseLUT [256]uint8
mtfLUT [256]uint8
)
func init() {
initLUTs()
printLUTs() // Only occurs in debug mode
}
func initLUTs() {
@ -47,9 +55,6 @@ func initLUTs() {
}
func initCommonLUTs() {
for i := range mtfLUT {
mtfLUT[i] = uint8(i)
}
for i := range reverseLUT {
b := uint8(i)
b = (b&0xaa)>>1 | (b&0x55)<<1
@ -61,7 +66,7 @@ func initCommonLUTs() {
// neededBits computes the minimum number of bits needed to encode n elements.
func neededBits(n uint32) (nb uint) {
for n -= 1; n > 0; n >>= 1 {
for n--; n > 0; n >>= 1 {
nb++
}
return
@ -81,55 +86,6 @@ func reverseBits(v uint32, n uint) uint32 {
return reverseUint32(v << (32 - n))
}
// moveToFront is a data structure that allows for more efficient move-to-front
// transformations (described in RFC section 7.3). Since most transformations
// only involve a fairly low number of symbols, it can be quite expensive
// filling out the dict with values 0..255 for every call. Instead, we remember
// what part of the dict was altered and make sure we reset it at the beginning
// of every encode and decode operation.
type moveToFront struct {
dict [256]uint8 // Mapping from indexes to values
tail int // Number of tail bytes that are already ordered
}
func (m *moveToFront) Encode(vals []uint8) {
// Reset dict to be identical to mtfLUT.
copy(m.dict[:], mtfLUT[:256-m.tail])
var max int
for i, val := range vals {
var idx uint8 // Reverse lookup idx in dict
for di, dv := range m.dict {
if dv == val {
idx = uint8(di)
break
}
}
vals[i] = idx
max |= int(idx)
copy(m.dict[1:], m.dict[:idx])
m.dict[0] = val
}
m.tail = 256 - max - 1
}
func (m *moveToFront) Decode(idxs []uint8) {
// Reset dict to be identical to mtfLUT.
copy(m.dict[:], mtfLUT[:256-m.tail])
var max int
for i, idx := range idxs {
val := m.dict[idx] // Forward lookup val in dict
idxs[i] = val
max |= int(idx)
copy(m.dict[1:], m.dict[:idx])
m.dict[0] = val
}
m.tail = 256 - max - 1
}
func allocUint8s(s []uint8, n int) []uint8 {
if cap(s) >= n {
return s[:n]

View File

@ -1,241 +0,0 @@
// Copyright 2015, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build debug
package brotli
import "os"
import "fmt"
import "strings"
const debug = true
func printLUTs() {
var output = os.Stderr
printVar := func(name string, obj interface{}) {
var body string
if bs, ok := obj.([]uint8); ok && len(bs) >= 256 {
// Special case handling for large []uint8 to form 16x16 blocks.
var ss []string
ss = append(ss, "{")
var s string
for i, b := range bs {
s += fmt.Sprintf("%02x ", b)
if i%16 == 15 || i+1 == len(bs) {
ss = append(ss, "\t"+s+"")
s = ""
}
if i%256 == 255 && (i+1 != len(bs)) {
ss = append(ss, "")
}
}
ss = append(ss, "}")
body = strings.Join(ss, "\n")
} else {
body = fmt.Sprintf("%v", obj)
}
fmt.Fprintf(output, "var %s %T = %v\n", name, obj, body)
}
// Common LUTs.
printVar("reverseLUT", reverseLUT[:])
printVar("mtfLUT", mtfLUT[:])
fmt.Fprintln(output)
// Context LUTs.
printVar("contextP1LUT", contextP1LUT[:])
printVar("contextP2LUT", contextP2LUT[:])
fmt.Fprintln(output)
// Static dictionary LUTs.
printVar("dictBitSizes", dictBitSizes)
printVar("dictSizes", dictSizes)
printVar("dictOffsets", dictOffsets)
fmt.Fprintln(output)
// Prefix LUTs.
printVar("simpleLens1", simpleLens1)
printVar("simpleLens2", simpleLens2)
printVar("simpleLens3", simpleLens3)
printVar("simpleLens4a", simpleLens4a)
printVar("simpleLens4b", simpleLens4b)
printVar("complexLens", complexLens)
fmt.Fprintln(output)
printVar("insLenRanges", rangeCodes(insLenRanges))
printVar("cpyLenRanges", rangeCodes(cpyLenRanges))
printVar("blkLenRanges", rangeCodes(blkLenRanges))
printVar("maxRLERanges", rangeCodes(maxRLERanges))
fmt.Fprintln(output)
printVar("codeCLens", prefixCodes(codeCLens))
printVar("decCLens", decCLens)
printVar("encCLens", encCLens)
fmt.Fprintln(output)
printVar("codeMaxRLE", prefixCodes(codeMaxRLE))
printVar("decMaxRLE", decMaxRLE)
printVar("encMaxRLE", encMaxRLE)
fmt.Fprintln(output)
printVar("codeWinBits", prefixCodes(codeWinBits))
printVar("decWinBits", decWinBits)
printVar("encWinBits", encWinBits)
fmt.Fprintln(output)
printVar("codeCounts", prefixCodes(codeCounts))
printVar("decCounts", decCounts)
printVar("encCounts", encCounts)
fmt.Fprintln(output)
printVar("iacLUT", typeIaCLUT(iacLUT))
printVar("distShortLUT", typeDistShortLUT(distShortLUT))
printVar("distLongLUT", typeDistLongLUT(distLongLUT))
fmt.Fprintln(output)
}
func tabs(s string, n int) string {
tabs := strings.Repeat("\t", n)
return strings.Join(strings.Split(s, "\n"), "\n"+tabs)
}
type rangeCodes []rangeCode
func (rc rangeCodes) String() (s string) {
var maxBits, maxBase int
for _, c := range rc {
if maxBits < int(c.bits) {
maxBits = int(c.bits)
}
if maxBase < int(c.base) {
maxBase = int(c.base)
}
}
var ss []string
ss = append(ss, "{")
maxSymDig := len(fmt.Sprintf("%d", len(rc)-1))
maxBitsDig := len(fmt.Sprintf("%d", maxBits))
maxBaseDig := len(fmt.Sprintf("%d", maxBase))
for i, c := range rc {
base := fmt.Sprintf(fmt.Sprintf("%%%dd", maxBaseDig), c.base)
if c.bits > 0 {
base += fmt.Sprintf("-%d", c.base+1<<c.bits-1)
}
ss = append(ss, fmt.Sprintf(
fmt.Sprintf("\t%%%dd: {bits: %%%dd, base: %%s},",
maxSymDig, maxBitsDig),
i, c.bits, base,
))
}
ss = append(ss, "}")
return strings.Join(ss, "\n")
}
type prefixCodes []prefixCode
func (pc prefixCodes) String() (s string) {
var maxSym, maxLen int
for _, c := range pc {
if maxSym < int(c.sym) {
maxSym = int(c.sym)
}
if maxLen < int(c.len) {
maxLen = int(c.len)
}
}
var ss []string
ss = append(ss, "{")
maxSymDig := len(fmt.Sprintf("%d", maxSym))
for _, c := range pc {
ss = append(ss, fmt.Sprintf(
fmt.Sprintf("\t%%%dd:%s%%0%db,",
maxSymDig, strings.Repeat(" ", 2+maxLen-int(c.len)), c.len),
c.sym, c.val,
))
}
ss = append(ss, "}")
return strings.Join(ss, "\n")
}
func (pd prefixDecoder) String() string {
var ss []string
ss = append(ss, "{")
if len(pd.chunks) > 0 {
ss = append(ss, "\tchunks: {")
for i, c := range pd.chunks {
l := "sym"
if uint(c&prefixCountMask) > uint(pd.chunkBits) {
l = "idx"
}
ss = append(ss, fmt.Sprintf(
fmt.Sprintf("\t\t%%0%db: {%%s: %%3d, len: %%2d},", pd.chunkBits),
i, l, c>>prefixCountBits, c&prefixCountMask,
))
}
ss = append(ss, "\t},")
for j, links := range pd.links {
ss = append(ss, fmt.Sprintf("\tlinks[%d]: {", j))
linkBits := len(fmt.Sprintf("%b", pd.linkMask))
for i, c := range links {
ss = append(ss, fmt.Sprintf(
fmt.Sprintf("\t\t%%0%db: {sym: %%3d, len: %%2d},", linkBits),
i, c>>prefixCountBits, c&prefixCountMask,
))
}
ss = append(ss, "\t},")
}
ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pd.chunkMask))
ss = append(ss, fmt.Sprintf("\tlinkMask: %b,", pd.linkMask))
ss = append(ss, fmt.Sprintf("\tchunkBits: %d,", pd.chunkBits))
ss = append(ss, fmt.Sprintf("\tminBits: %d,", pd.minBits))
ss = append(ss, fmt.Sprintf("\tnumSyms: %d,", pd.numSyms))
}
ss = append(ss, "}")
return strings.Join(ss, "\n")
}
type typeIaCLUT [numIaCSyms]struct{ ins, cpy rangeCode }
func (t typeIaCLUT) String() string {
var ss []string
var ins, cpy rangeCodes
for _, rec := range t {
ins = append(ins, rec.ins)
cpy = append(cpy, rec.cpy)
}
ss = append(ss, "{")
ss = append(ss, "\tins: "+tabs(ins.String(), 1)+",")
ss = append(ss, "\tcpy: "+tabs(cpy.String(), 1)+",")
ss = append(ss, "}")
return strings.Join(ss, "\n")
}
type typeDistShortLUT [16]struct{ index, delta int }
func (t typeDistShortLUT) String() string {
var ss []string
ss = append(ss, "{")
for i, rec := range t {
ss = append(ss, fmt.Sprintf("\t%2d: {index: %d, delta: %+2d},", i, rec.index, rec.delta))
}
ss = append(ss, "}")
return strings.Join(ss, "\n")
}
type typeDistLongLUT [4][]rangeCode
func (t typeDistLongLUT) String() string {
var ss []string
ss = append(ss, "{")
for i, rc := range t {
ss = append(ss, fmt.Sprintf("\t%d: %s,", i, tabs(rangeCodes(rc).String(), 1)))
}
ss = append(ss, "}")
return strings.Join(ss, "\n")
}

View File

@ -132,7 +132,7 @@ func initPrefixLUTs() {
}
func initPrefixRangeLUTs() {
var makeRanges = func(base uint, bits []uint) (rc []rangeCode) {
makeRanges := func(base uint, bits []uint) (rc []rangeCode) {
for _, nb := range bits {
rc = append(rc, rangeCode{base: uint32(base), bits: uint32(nb)})
base += 1 << nb
@ -158,7 +158,7 @@ func initPrefixCodeLUTs() {
// Prefix code for reading code lengths in RFC section 3.5.
codeCLens = nil
for sym, clen := range []uint{2, 4, 3, 2, 2, 4} {
var code = prefixCode{sym: uint32(sym), len: uint32(clen)}
code := prefixCode{sym: uint32(sym), len: uint32(clen)}
codeCLens = append(codeCLens, code)
}
decCLens.Init(codeCLens, true)
@ -167,7 +167,7 @@ func initPrefixCodeLUTs() {
// Prefix code for reading RLEMAX in RFC section 7.3.
codeMaxRLE = []prefixCode{{sym: 0, val: 0, len: 1}}
for i := uint32(0); i < 16; i++ {
var code = prefixCode{sym: i + 1, val: i<<1 | 1, len: 5}
code := prefixCode{sym: i + 1, val: i<<1 | 1, len: 5}
codeMaxRLE = append(codeMaxRLE, code)
}
decMaxRLE.Init(codeMaxRLE, false)
@ -196,12 +196,12 @@ func initPrefixCodeLUTs() {
// Prefix code for reading counts in RFC section 9.2.
// This is used for: NBLTYPESL, NBLTYPESI, NBLTYPESD, NTREESL, and NTREESD.
codeCounts = []prefixCode{{sym: 1, val: 0, len: 1}}
var code = codeCounts[len(codeCounts)-1]
code := codeCounts[len(codeCounts)-1]
for i := uint32(0); i < 8; i++ {
for j := uint32(0); j < 1<<i; j++ {
code.sym = code.sym + 1
code.val = j<<4 | i<<1 | 1
code.len = uint32(i + 4)
code.len = i + 4
codeCounts = append(codeCounts, code)
}
}
@ -254,9 +254,9 @@ func initLengthLUTs() {
case distSym < 4:
index, delta = distSym, 0
case distSym < 10:
index, delta = 0, int(distSym/2-1)
index, delta = 0, distSym/2-1
case distSym < 16:
index, delta = 1, int(distSym/2-4)
index, delta = 1, distSym/2-4
}
if distSym%2 == 0 {
delta *= -1

View File

@ -4,6 +4,10 @@
package brotli
import (
"github.com/dsnet/compress/internal/errors"
)
// The algorithm used to decode variable length codes is based on the lookup
// method in zlib. If the code is less-than-or-equal to prefixMaxChunkBits,
// then the symbol can be decoded using a single lookup into the chunks table.
@ -80,7 +84,7 @@ func (pd *prefixDecoder) Init(codes []prefixCode, assignCodes bool) {
minBits, maxBits, symLast := c0.len, c0.len, int(c0.sym)
for _, c := range codes[1:] {
if int(c.sym) <= symLast {
panic(ErrCorrupt) // Non-unique or non-monotonically increasing
errors.Panic(errCorrupted) // Non-unique or non-monotonically increasing
}
if minBits > c.len {
minBits = c.len
@ -92,10 +96,10 @@ func (pd *prefixDecoder) Init(codes []prefixCode, assignCodes bool) {
symLast = int(c.sym) // Keep track of last symbol
}
if maxBits >= 1<<prefixCountBits || minBits == 0 {
panic(ErrCorrupt) // Bit-width is too long or too short
errors.Panic(errCorrupted) // Bit-width is too long or too short
}
if symLast >= 1<<prefixSymbolBits {
panic(ErrCorrupt) // Alphabet cardinality too large
errors.Panic(errCorrupted) // Alphabet cardinality too large
}
// Compute the next code for a symbol of a given bit length.
@ -107,7 +111,7 @@ func (pd *prefixDecoder) Init(codes []prefixCode, assignCodes bool) {
code += bitCnts[i]
}
if code != 1<<maxBits {
panic(ErrCorrupt) // Tree is under or over subscribed
errors.Panic(errCorrupted) // Tree is under or over subscribed
}
// Allocate chunks table if necessary.
@ -134,7 +138,7 @@ func (pd *prefixDecoder) Init(codes []prefixCode, assignCodes bool) {
for linkIdx := range pd.links {
code := reverseBits(uint32(baseCode)+uint32(linkIdx), uint(pd.chunkBits))
pd.links[linkIdx] = allocUint32s(pd.links[linkIdx], numLinks)
pd.chunks[code] = uint32(linkIdx<<prefixCountBits) | uint32(pd.chunkBits+1)
pd.chunks[code] = uint32(linkIdx<<prefixCountBits) | (pd.chunkBits + 1)
}
} else {
for i := range pd.chunks {
@ -151,14 +155,14 @@ func (pd *prefixDecoder) Init(codes []prefixCode, assignCodes bool) {
linkIdx := len(pd.links)
pd.links = extendSliceUints32s(pd.links, len(pd.links)+1)
pd.links[linkIdx] = allocUint32s(pd.links[linkIdx], numLinks)
pd.chunks[code] = uint32(linkIdx<<prefixCountBits) | uint32(pd.chunkBits+1)
pd.chunks[code] = uint32(linkIdx<<prefixCountBits) | (pd.chunkBits + 1)
}
}
}
// Fill out chunks and links tables with values.
for i, c := range codes {
chunk := c.sym<<prefixCountBits | uint32(c.len)
chunk := c.sym<<prefixCountBits | c.len
if assignCodes {
codes[i].val = reverseBits(uint32(nextCodes[c.len]), uint(c.len))
nextCodes[c.len]++
@ -179,22 +183,4 @@ func (pd *prefixDecoder) Init(codes []prefixCode, assignCodes bool) {
}
}
}
if debug && !checkPrefixes(codes) {
panic(ErrCorrupt) // The codes do not form a valid prefix tree.
}
}
// checkPrefixes reports whether any codes have overlapping prefixes.
// This check is expensive and runs in O(n^2) time!
func checkPrefixes(codes []prefixCode) bool {
for i, c1 := range codes {
for j, c2 := range codes {
mask := uint32(1)<<c1.len - 1
if i != j && c1.len <= c2.len && c1.val&mask == c2.val&mask {
return false
}
}
}
return true
}

View File

@ -4,8 +4,13 @@
package brotli
import "io"
import "io/ioutil"
import (
"io"
"io/ioutil"
"github.com/dsnet/compress/internal"
"github.com/dsnet/compress/internal/errors"
)
type Reader struct {
InputOffset int64 // Total number of bytes read from underlying io.Reader
@ -22,11 +27,11 @@ type Reader struct {
step func(*Reader) // Single step of decompression work (can panic)
stepState int // The sub-step state for certain steps
mtf moveToFront // Local move-to-front decoder
dict dictDecoder // Dynamic sliding dictionary
iacBlk blockDecoder // Insert-and-copy block decoder
litBlk blockDecoder // Literal block decoder
distBlk blockDecoder // Distance block decoder
mtf internal.MoveToFront // Local move-to-front decoder
dict dictDecoder // Dynamic sliding dictionary
iacBlk blockDecoder // Insert-and-copy block decoder
litBlk blockDecoder // Literal block decoder
distBlk blockDecoder // Distance block decoder
// Literal decoding state fields.
litMapType []uint8 // The current literal context map for the current block type
@ -87,7 +92,7 @@ func (br *Reader) Read(buf []byte) (int, error) {
// Perform next step in decompression process.
br.rd.offset = br.InputOffset
func() {
defer errRecover(&br.err)
defer errors.Recover(&br.err)
br.step(br)
}()
br.InputOffset = br.rd.FlushOffset()
@ -131,9 +136,9 @@ func (br *Reader) Reset(r io.Reader) error {
// readStreamHeader reads the Brotli stream header according to RFC section 9.1.
func (br *Reader) readStreamHeader() {
wbits := uint(br.rd.ReadSymbol(&decWinBits))
wbits := br.rd.ReadSymbol(&decWinBits)
if wbits == 0 {
panic(ErrCorrupt) // Reserved value used
errors.Panic(errCorrupted) // Reserved value used
}
size := int(1<<wbits) - 16
br.dict.Init(size)
@ -144,9 +149,9 @@ func (br *Reader) readStreamHeader() {
func (br *Reader) readBlockHeader() {
if br.last {
if br.rd.ReadPads() > 0 {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
panic(io.EOF)
errors.Panic(io.EOF)
}
// Read ISLAST and ISLASTEMPTY.
@ -159,40 +164,39 @@ func (br *Reader) readBlockHeader() {
// Read MLEN and MNIBBLES and process meta data.
var blkLen int // 1..1<<24
if nibbles := br.rd.ReadBits(2) + 4; nibbles == 7 {
nibbles := br.rd.ReadBits(2) + 4
if nibbles == 7 {
if reserved := br.rd.ReadBits(1) == 1; reserved {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
var skipLen int // 0..1<<24
if skipBytes := br.rd.ReadBits(2); skipBytes > 0 {
skipLen = int(br.rd.ReadBits(skipBytes * 8))
if skipBytes > 1 && skipLen>>((skipBytes-1)*8) == 0 {
panic(ErrCorrupt) // Shortest representation not used
errors.Panic(errCorrupted) // Shortest representation not used
}
skipLen++
}
if br.rd.ReadPads() > 0 {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
br.blkLen = skipLen // Use blkLen to track meta data number of bytes
br.blkLen = skipLen // Use blkLen to track metadata number of bytes
br.readMetaData()
return
} else {
blkLen = int(br.rd.ReadBits(nibbles * 4))
if nibbles > 4 && blkLen>>((nibbles-1)*4) == 0 {
panic(ErrCorrupt) // Shortest representation not used
}
blkLen++
}
br.blkLen = blkLen
blkLen = int(br.rd.ReadBits(nibbles * 4))
if nibbles > 4 && blkLen>>((nibbles-1)*4) == 0 {
errors.Panic(errCorrupted) // Shortest representation not used
}
br.blkLen = blkLen + 1
// Read ISUNCOMPRESSED and process uncompressed data.
if !br.last {
if uncompressed := br.rd.ReadBits(1) == 1; uncompressed {
if br.rd.ReadPads() > 0 {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
br.readRawData()
return
@ -209,9 +213,9 @@ func (br *Reader) readMetaData() {
br.metaBuf = make([]byte, 4096) // Lazy allocate
}
if cnt, err := io.CopyBuffer(br.metaWr, &br.metaRd, br.metaBuf); err != nil {
panic(err) // Will never panic with io.EOF
errors.Panic(err) // Will never panic with io.EOF
} else if cnt < int64(br.blkLen) {
panic(io.ErrUnexpectedEOF)
errors.Panic(io.ErrUnexpectedEOF)
}
br.step = (*Reader).readBlockHeader
}
@ -230,7 +234,7 @@ func (br *Reader) readRawData() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
panic(err)
errors.Panic(err)
}
if br.blkLen > 0 {
@ -402,9 +406,8 @@ startCommand:
br.distZero = iacSym < 128
if br.insLen > 0 {
goto readLiterals
} else {
goto readDistance
}
goto readDistance
}
readLiterals:
@ -443,11 +446,11 @@ readLiterals:
br.step = (*Reader).readCommands
br.stepState = stateLiterals // Need to continue work here
return
} else if br.blkLen > 0 {
goto readDistance
} else {
goto finishCommand
}
if br.blkLen > 0 {
goto readDistance
}
goto finishCommand
}
readDistance:
@ -484,7 +487,7 @@ readDistance:
}
br.distZero = bool(distSym == 0)
if br.dist <= 0 {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
}
@ -496,9 +499,8 @@ readDistance:
br.dists[0] = br.dist
}
goto copyDynamicDict
} else {
goto copyStaticDict
}
goto copyStaticDict
}
copyDynamicDict:
@ -513,9 +515,8 @@ copyDynamicDict:
br.step = (*Reader).readCommands
br.stepState = stateDynamicDict // Need to continue work here
return
} else {
goto finishCommand
}
goto finishCommand
}
copyStaticDict:
@ -523,7 +524,7 @@ copyStaticDict:
{
if len(br.word) == 0 {
if br.cpyLen < minDictLen || br.cpyLen > maxDictLen {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
wordIdx := br.dist - (br.dict.HistSize() + 1)
index := wordIdx % dictSizes[br.cpyLen]
@ -531,7 +532,7 @@ copyStaticDict:
baseWord := dictLUT[offset : offset+br.cpyLen]
transformIdx := wordIdx >> uint(dictBitSizes[br.cpyLen])
if transformIdx >= len(transformLUT) {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
cnt := transformWord(br.wordBuf[:], baseWord, transformIdx)
br.word = br.wordBuf[:cnt]
@ -548,16 +549,16 @@ copyStaticDict:
br.step = (*Reader).readCommands
br.stepState = stateStaticDict // Need to continue work here
return
} else {
goto finishCommand
}
goto finishCommand
}
finishCommand:
// Finish off this command and check if we need to loop again.
if br.blkLen < 0 {
panic(ErrCorrupt)
} else if br.blkLen > 0 {
errors.Panic(errCorrupted)
}
if br.blkLen > 0 {
goto startCommand // More commands in this block
}
@ -565,7 +566,6 @@ finishCommand:
br.toRead = br.dict.ReadFlush()
br.step = (*Reader).readBlockHeader
br.stepState = stateInit // Next call to readCommands must start here
return
}
// readContextMap reads the context map according to RFC section 7.3.
@ -590,7 +590,7 @@ func (br *Reader) readContextMap(cm []uint8, numTrees uint) {
// Repeated zeros.
n := int(br.rd.ReadOffset(sym-1, maxRLERanges))
if i+n > len(cm) {
panic(ErrCorrupt)
errors.Panic(errCorrupted)
}
for j := i + n; i < j; i++ {
cm[i] = 0

View File

@ -30,6 +30,6 @@ func (bw *writer) Close() error {
return nil
}
func (bw *writer) Reset(w io.Writer) {
return
func (bw *writer) Reset(w io.Writer) error {
return nil
}

107
vendor/github.com/dsnet/compress/internal/common.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2015, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package internal is a collection of common compression algorithms.
//
// For performance reasons, these packages lack strong error checking and
// require that the caller to ensure that strict invariants are kept.
package internal
var (
// IdentityLUT returns the input key itself.
IdentityLUT = func() (lut [256]byte) {
for i := range lut {
lut[i] = uint8(i)
}
return lut
}()
// ReverseLUT returns the input key with its bits reversed.
ReverseLUT = func() (lut [256]byte) {
for i := range lut {
b := uint8(i)
b = (b&0xaa)>>1 | (b&0x55)<<1
b = (b&0xcc)>>2 | (b&0x33)<<2
b = (b&0xf0)>>4 | (b&0x0f)<<4
lut[i] = b
}
return lut
}()
)
// ReverseUint32 reverses all bits of v.
func ReverseUint32(v uint32) (x uint32) {
x |= uint32(ReverseLUT[byte(v>>0)]) << 24
x |= uint32(ReverseLUT[byte(v>>8)]) << 16
x |= uint32(ReverseLUT[byte(v>>16)]) << 8
x |= uint32(ReverseLUT[byte(v>>24)]) << 0
return x
}
// ReverseUint32N reverses the lower n bits of v.
func ReverseUint32N(v uint32, n uint) (x uint32) {
return ReverseUint32(v << (32 - n))
}
// ReverseUint64 reverses all bits of v.
func ReverseUint64(v uint64) (x uint64) {
x |= uint64(ReverseLUT[byte(v>>0)]) << 56
x |= uint64(ReverseLUT[byte(v>>8)]) << 48
x |= uint64(ReverseLUT[byte(v>>16)]) << 40
x |= uint64(ReverseLUT[byte(v>>24)]) << 32
x |= uint64(ReverseLUT[byte(v>>32)]) << 24
x |= uint64(ReverseLUT[byte(v>>40)]) << 16
x |= uint64(ReverseLUT[byte(v>>48)]) << 8
x |= uint64(ReverseLUT[byte(v>>56)]) << 0
return x
}
// ReverseUint64N reverses the lower n bits of v.
func ReverseUint64N(v uint64, n uint) (x uint64) {
return ReverseUint64(v << (64 - n))
}
// MoveToFront is a data structure that allows for more efficient move-to-front
// transformations. This specific implementation assumes that the alphabet is
// densely packed within 0..255.
type MoveToFront struct {
dict [256]uint8 // Mapping from indexes to values
tail int // Number of tail bytes that are already ordered
}
func (m *MoveToFront) Encode(vals []uint8) {
copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity
var max int
for i, val := range vals {
var idx uint8 // Reverse lookup idx in dict
for di, dv := range m.dict {
if dv == val {
idx = uint8(di)
break
}
}
vals[i] = idx
max |= int(idx)
copy(m.dict[1:], m.dict[:idx])
m.dict[0] = val
}
m.tail = 256 - max - 1
}
func (m *MoveToFront) Decode(idxs []uint8) {
copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity
var max int
for i, idx := range idxs {
val := m.dict[idx] // Forward lookup val in dict
idxs[i] = val
max |= int(idx)
copy(m.dict[1:], m.dict[:idx])
m.dict[0] = val
}
m.tail = 256 - max - 1
}

View File

@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !debug
// +build debug,!gofuzz
package brotli
package internal
const debug = false
func printLUTs() {}
const (
Debug = true
GoFuzz = false
)

View File

@ -0,0 +1,120 @@
// Copyright 2016, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package errors implements functions to manipulate compression errors.
//
// In idiomatic Go, it is an anti-pattern to use panics as a form of error
// reporting in the API. Instead, the expected way to transmit errors is by
// returning an error value. Unfortunately, the checking of "err != nil" in
// tight loops commonly found in compression causes non-negligible performance
// degradation. While this may not be idiomatic, the internal packages of this
// repository rely on panics as a normal means to convey errors. In order to
// ensure that these panics do not leak across the public API, the public
// packages must recover from these panics and present an error value.
//
// The Panic and Recover functions in this package provide a safe way to
// recover from errors only generated from within this repository.
//
// Example usage:
// func Foo() (err error) {
// defer errors.Recover(&err)
//
// if rand.Intn(2) == 0 {
// // Unexpected panics will not be caught by Recover.
// io.Closer(nil).Close()
// } else {
// // Errors thrown by Panic will be caught by Recover.
// errors.Panic(errors.New("whoopsie"))
// }
// }
//
package errors
import "strings"
const (
// Unknown indicates that there is no classification for this error.
Unknown = iota
// Internal indicates that this error is due to an internal bug.
// Users should file a issue report if this type of error is encountered.
Internal
// Invalid indicates that this error is due to the user misusing the API
// and is indicative of a bug on the user's part.
Invalid
// Deprecated indicates the use of a deprecated and unsupported feature.
Deprecated
// Corrupted indicates that the input stream is corrupted.
Corrupted
// Closed indicates that the handlers are closed.
Closed
)
var codeMap = map[int]string{
Unknown: "unknown error",
Internal: "internal error",
Invalid: "invalid argument",
Deprecated: "deprecated format",
Corrupted: "corrupted input",
Closed: "closed handler",
}
type Error struct {
Code int // The error type
Pkg string // Name of the package where the error originated
Msg string // Descriptive message about the error (optional)
}
func (e Error) Error() string {
var ss []string
for _, s := range []string{e.Pkg, codeMap[e.Code], e.Msg} {
if s != "" {
ss = append(ss, s)
}
}
return strings.Join(ss, ": ")
}
func (e Error) CompressError() {}
func (e Error) IsInternal() bool { return e.Code == Internal }
func (e Error) IsInvalid() bool { return e.Code == Invalid }
func (e Error) IsDeprecated() bool { return e.Code == Deprecated }
func (e Error) IsCorrupted() bool { return e.Code == Corrupted }
func (e Error) IsClosed() bool { return e.Code == Closed }
func IsInternal(err error) bool { return isCode(err, Internal) }
func IsInvalid(err error) bool { return isCode(err, Invalid) }
func IsDeprecated(err error) bool { return isCode(err, Deprecated) }
func IsCorrupted(err error) bool { return isCode(err, Corrupted) }
func IsClosed(err error) bool { return isCode(err, Closed) }
func isCode(err error, code int) bool {
if cerr, ok := err.(Error); ok && cerr.Code == code {
return true
}
return false
}
// errWrap is used by Panic and Recover to ensure that only errors raised by
// Panic are recovered by Recover.
type errWrap struct{ e *error }
func Recover(err *error) {
switch ex := recover().(type) {
case nil:
// Do nothing.
case errWrap:
*err = *ex.e
default:
panic(ex)
}
}
func Panic(err error) {
panic(errWrap{&err})
}

12
vendor/github.com/dsnet/compress/internal/gofuzz.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2016, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build gofuzz
package internal
const (
Debug = true
GoFuzz = true
)

21
vendor/github.com/dsnet/compress/internal/release.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2015, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !debug,!gofuzz
package internal
// Debug indicates whether the debug build tag was set.
//
// If set, programs may choose to print with more human-readable
// debug information and also perform sanity checks that would otherwise be too
// expensive to run in a release build.
const Debug = false
// GoFuzz indicates whether the gofuzz build tag was set.
//
// If set, programs may choose to disable certain checks (like checksums) that
// would be nearly impossible for gofuzz to properly get right.
// If GoFuzz is set, it implies that Debug is set as well.
const GoFuzz = false

View File

@ -1,7 +1,4 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View File

@ -37,27 +37,9 @@ package proto
import (
"errors"
"fmt"
"reflect"
)
// RequiredNotSetError is the error returned if Marshal is called with
// a protocol buffer struct whose required fields have not
// all been initialized. It is also the error returned if Unmarshal is
// called with an encoded protocol buffer that does not include all the
// required fields.
//
// When printed, RequiredNotSetError reports the first unset required field in a
// message. If the field cannot be precisely determined, it is reported as
// "{Unknown}".
type RequiredNotSetError struct {
field string
}
func (e *RequiredNotSetError) Error() string {
return fmt.Sprintf("proto: required field %q not set", e.field)
}
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.

View File

@ -265,7 +265,6 @@ package proto
import (
"encoding/json"
"errors"
"fmt"
"log"
"reflect"
@ -274,7 +273,66 @@ import (
"sync"
)
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
// Marshal reports this when a required field is not initialized.
// Unmarshal reports this when a required field is missing from the wire data.
type RequiredNotSetError struct{ field string }
func (e *RequiredNotSetError) Error() string {
if e.field == "" {
return fmt.Sprintf("proto: required field not set")
}
return fmt.Sprintf("proto: required field %q not set", e.field)
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
type invalidUTF8Error struct{ field string }
func (e *invalidUTF8Error) Error() string {
if e.field == "" {
return "proto: invalid UTF-8 detected"
}
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
}
func (e *invalidUTF8Error) InvalidUTF8() bool {
return true
}
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
// This error should not be exposed to the external API as such errors should
// be recreated with the field information.
var errInvalidUTF8 = &invalidUTF8Error{}
// isNonFatal reports whether the error is either a RequiredNotSet error
// or a InvalidUTF8 error.
func isNonFatal(err error) bool {
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
return true
}
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
return true
}
return false
}
type nonFatal struct{ E error }
// Merge merges err into nf and reports whether it was successful.
// Otherwise it returns false for any fatal non-nil errors.
func (nf *nonFatal) Merge(err error) (ok bool) {
if err == nil {
return true // not an error
}
if !isNonFatal(err) {
return false // fatal error
}
if nf.E == nil {
nf.E = err // store first instance of non-fatal error
}
return true
}
// Message is implemented by generated protocol buffer messages.
type Message interface {

View File

@ -139,7 +139,7 @@ type Properties struct {
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
Default string // default value
@ -148,9 +148,9 @@ type Properties struct {
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
mtype reflect.Type // set for map types only
MapKeyProp *Properties // set for map types only
MapValProp *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
case reflect.Map:
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
p.MapKeyProp = &Properties{}
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {

View File

@ -231,7 +231,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
return b, err
}
var err, errreq error
var err, errLater error
// The old marshaler encodes extensions at beginning.
if u.extensions.IsValid() {
e := ptr.offset(u.extensions).toExtensions()
@ -252,11 +252,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
}
}
for _, f := range u.fields {
if f.required && errreq == nil {
if f.required {
if ptr.offset(f.field).getPointer().isNil() {
// Required field is not set.
// We record the error but keep going, to give a complete marshaling.
errreq = &RequiredNotSetError{f.name}
if errLater == nil {
errLater = &RequiredNotSetError{f.name}
}
continue
}
}
@ -269,14 +271,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
if err1, ok := err.(*RequiredNotSetError); ok {
// Required field in submessage is not set.
// We record the error but keep going, to give a complete marshaling.
if errreq == nil {
errreq = &RequiredNotSetError{f.name + "." + err1.field}
if errLater == nil {
errLater = &RequiredNotSetError{f.name + "." + err1.field}
}
continue
}
if err == errRepeatedHasNil {
err = errors.New("proto: repeated field " + f.name + " has nil element")
}
if err == errInvalidUTF8 {
if errLater == nil {
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
errLater = &invalidUTF8Error{fullName}
}
continue
}
return b, err
}
}
@ -284,7 +293,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
s := *ptr.offset(u.unrecognized).toBytes()
b = append(b, s...)
}
return b, errreq
return b, errLater
}
// computeMarshalInfo initializes the marshal info.
@ -530,6 +539,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
packed := false
proto3 := false
validateUTF8 := true
for i := 2; i < len(tags); i++ {
if tags[i] == "packed" {
packed = true
@ -538,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
proto3 = true
}
}
validateUTF8 = validateUTF8 && proto3
switch t.Kind() {
case reflect.Bool:
@ -735,6 +746,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
}
return sizeFloat64Value, appendFloat64Value
case reflect.String:
if validateUTF8 {
if pointer {
return sizeStringPtr, appendUTF8StringPtr
}
if slice {
return sizeStringSlice, appendUTF8StringSlice
}
if nozero {
return sizeStringValueNoZero, appendUTF8StringValueNoZero
}
return sizeStringValue, appendUTF8StringValue
}
if pointer {
return sizeStringPtr, appendStringPtr
}
@ -1984,9 +2007,6 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
}
func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
v := *ptr.toString()
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
@ -1997,9 +2017,6 @@ func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]b
if v == "" {
return b, nil
}
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
@ -2011,24 +2028,83 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err
return b, nil
}
v := *p
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
return b, nil
}
func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
s := *ptr.toStringSlice()
for _, v := range s {
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
}
return b, nil
}
func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
var invalidUTF8 bool
v := *ptr.toString()
if !utf8.ValidString(v) {
invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
if invalidUTF8 {
return b, errInvalidUTF8
}
return b, nil
}
func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
var invalidUTF8 bool
v := *ptr.toString()
if v == "" {
return b, nil
}
if !utf8.ValidString(v) {
invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
if invalidUTF8 {
return b, errInvalidUTF8
}
return b, nil
}
func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
var invalidUTF8 bool
p := *ptr.toStringPtr()
if p == nil {
return b, nil
}
v := *p
if !utf8.ValidString(v) {
invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
if invalidUTF8 {
return b, errInvalidUTF8
}
return b, nil
}
func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
var invalidUTF8 bool
s := *ptr.toStringSlice()
for _, v := range s {
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
}
if invalidUTF8 {
return b, errInvalidUTF8
}
return b, nil
}
func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
@ -2107,7 +2183,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getPointerSlice()
var err, errreq error
var err error
var nerr nonFatal
for _, v := range s {
if v.isNil() {
return b, errRepeatedHasNil
@ -2115,22 +2192,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
b = appendVarint(b, wiretag) // start group
b, err = u.marshal(b, v, deterministic)
b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
if err != nil {
if _, ok := err.(*RequiredNotSetError); ok {
// Required field in submessage is not set.
// We record the error but keep going, to give a complete marshaling.
if errreq == nil {
errreq = err
}
continue
}
if !nerr.Merge(err) {
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
return b, errreq
return b, nerr.E
}
}
@ -2174,7 +2243,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getPointerSlice()
var err, errreq error
var err error
var nerr nonFatal
for _, v := range s {
if v.isNil() {
return b, errRepeatedHasNil
@ -2184,22 +2254,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
b = appendVarint(b, uint64(siz))
b, err = u.marshal(b, v, deterministic)
if err != nil {
if _, ok := err.(*RequiredNotSetError); ok {
// Required field in submessage is not set.
// We record the error but keep going, to give a complete marshaling.
if errreq == nil {
errreq = err
}
continue
}
if !nerr.Merge(err) {
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
return b, errreq
return b, nerr.E
}
}
@ -2223,6 +2285,25 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
// value.
// Key cannot be pointer-typed.
valIsPtr := valType.Kind() == reflect.Ptr
// If value is a message with nested maps, calling
// valSizer in marshal may be quadratic. We should use
// cached version in marshal (but not in size).
// If value is not message type, we don't have size cache,
// but it cannot be nested either. Just use valSizer.
valCachedSizer := valSizer
if valIsPtr && valType.Elem().Kind() == reflect.Struct {
u := getMarshalInfo(valType.Elem())
valCachedSizer = func(ptr pointer, tagsize int) int {
// Same as message sizer, but use cache.
p := ptr.getPointer()
if p.isNil() {
return 0
}
siz := u.cachedsize(p)
return siz + SizeVarint(uint64(siz)) + tagsize
}
}
return func(ptr pointer, tagsize int) int {
m := ptr.asPointerTo(t).Elem() // the map
n := 0
@ -2243,24 +2324,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
if len(keys) > 1 && deterministic {
sort.Sort(mapKeys(keys))
}
var nerr nonFatal
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
b = appendVarint(b, tag)
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
if err != nil {
if !nerr.Merge(err) {
return b, err
}
b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
if err != nil && err != ErrNil { // allow nil value in map
if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
return b, err
}
}
return b, nil
return b, nerr.E
}
}
@ -2333,6 +2416,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
defer mu.Unlock()
var err error
var nerr nonFatal
// Fast-path for common cases: zero or one extensions.
// Don't bother sorting the keys.
@ -2352,11 +2436,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if err != nil {
if !nerr.Merge(err) {
return b, err
}
}
return b, nil
return b, nerr.E
}
// Sort the keys to provide a deterministic encoding.
@ -2383,11 +2467,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if err != nil {
if !nerr.Merge(err) {
return b, err
}
}
return b, nil
return b, nerr.E
}
// message set format is:
@ -2444,6 +2528,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
defer mu.Unlock()
var err error
var nerr nonFatal
// Fast-path for common cases: zero or one extensions.
// Don't bother sorting the keys.
@ -2470,12 +2555,12 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
if err != nil {
if !nerr.Merge(err) {
return b, err
}
b = append(b, 1<<3|WireEndGroup)
}
return b, nil
return b, nerr.E
}
// Sort the keys to provide a deterministic encoding.
@ -2509,11 +2594,11 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
if err != nil {
if !nerr.Merge(err) {
return b, err
}
}
return b, nil
return b, nerr.E
}
// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
@ -2556,6 +2641,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
sort.Ints(keys)
var err error
var nerr nonFatal
for _, k := range keys {
e := m[int32(k)]
if e.value == nil || e.desc == nil {
@ -2572,11 +2658,11 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if err != nil {
if !nerr.Merge(err) {
return b, err
}
}
return b, nil
return b, nerr.E
}
// newMarshaler is the interface representing objects that can marshal themselves.

View File

@ -97,6 +97,8 @@ type unmarshalFieldInfo struct {
// if a required field, contains a single set bit at this field's index in the required field list.
reqMask uint64
name string // name of the field, for error reporting
}
var (
@ -136,8 +138,8 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
if u.isMessageSet {
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
var reqMask uint64 // bitmask of required fields we've seen.
var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
var reqMask uint64 // bitmask of required fields we've seen.
var errLater error
for len(b) > 0 {
// Read tag and wire type.
// Special case 1 and 2 byte varints.
@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
if r, ok := err.(*RequiredNotSetError); ok {
// Remember this error, but keep parsing. We need to produce
// a full parse even if a required field is missing.
rnse = r
if errLater == nil {
errLater = r
}
reqMask |= f.reqMask
continue
}
if err != errInternalBadWireType {
if err == errInvalidUTF8 {
if errLater == nil {
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
errLater = &invalidUTF8Error{fullName}
}
continue
}
return err
}
// Fragments with bad wire type are treated as unknown fields.
@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
emap[int32(tag)] = e
}
}
if rnse != nil {
// A required field of a submessage/group is missing. Return that error.
return rnse
}
if reqMask != u.reqMask {
if reqMask != u.reqMask && errLater == nil {
// A required field of this message is missing.
for _, n := range u.reqFields {
if reqMask&1 == 0 {
return &RequiredNotSetError{n}
errLater = &RequiredNotSetError{n}
}
reqMask >>= 1
}
}
return nil
return errLater
}
// computeUnmarshalInfo fills in u with information for use
@ -351,7 +358,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Store the info in the correct slot in the message.
u.setTag(tag, toField(&f), unmarshal, reqMask)
u.setTag(tag, toField(&f), unmarshal, reqMask, name)
}
// Find any types associated with oneof fields.
@ -366,10 +373,17 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
tag, err := strconv.Atoi(tagstr)
tags := strings.Split(f.Tag.Get("protobuf"), ",")
fieldNum, err := strconv.Atoi(tags[1])
if err != nil {
panic("protobuf tag field not an integer: " + tagstr)
panic("protobuf tag field not an integer: " + tags[1])
}
var name string
for _, tag := range tags {
if strings.HasPrefix(tag, "name=") {
name = strings.TrimPrefix(tag, "name=")
break
}
}
// Find the oneof field that this struct implements.
@ -380,7 +394,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
u.setTag(tag, of.field, unmarshal, 0)
u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
}
@ -401,7 +415,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
}, 0)
}, 0, "")
// Set mask for required field check.
u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
@ -413,8 +427,9 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// tag = tag # for field
// field/unmarshal = unmarshal info for that field.
// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
// name = short name of the field.
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
n := u.typ.NumField()
if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
for len(u.dense) <= tag {
@ -442,11 +457,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
tagArray := strings.Split(tags, ",")
encoding := tagArray[0]
name := "unknown"
proto3 := false
validateUTF8 := true
for _, tag := range tagArray[3:] {
if strings.HasPrefix(tag, "name=") {
name = tag[5:]
}
if tag == "proto3" {
proto3 = true
}
}
validateUTF8 = validateUTF8 && proto3
// Figure out packaging (pointer, slice, or both)
slice := false
@ -594,6 +615,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
}
return unmarshalBytesValue
case reflect.String:
if validateUTF8 {
if pointer {
return unmarshalUTF8StringPtr
}
if slice {
return unmarshalUTF8StringSlice
}
return unmarshalUTF8StringValue
}
if pointer {
return unmarshalStringPtr
}
@ -1448,9 +1478,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
*f.toString() = v
return b[x:], nil
}
@ -1468,9 +1495,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
*f.toStringPtr() = &v
return b[x:], nil
}
@ -1488,14 +1512,72 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
s := f.toStringSlice()
*s = append(*s, v)
return b[x:], nil
}
func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
*f.toString() = v
if !utf8.ValidString(v) {
return b[x:], errInvalidUTF8
}
return b[x:], nil
}
func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
*f.toStringPtr() = &v
if !utf8.ValidString(v) {
return b[x:], errInvalidUTF8
}
return b[x:], nil
}
func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
s := f.toStringSlice()
*s = append(*s, v)
if !utf8.ValidString(v) {
return b[x:], errInvalidUTF8
}
return b[x:], nil
}
var emptyBuf [0]byte
func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
@ -1674,6 +1756,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// Maps will be somewhat slow. Oh well.
// Read key and value from data.
var nerr nonFatal
k := reflect.New(kt)
v := reflect.New(vt)
for len(b) > 0 {
@ -1694,7 +1777,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
err = errInternalBadWireType // skip unknown tag
}
if err == nil {
if nerr.Merge(err) {
continue
}
if err != errInternalBadWireType {
@ -1717,7 +1800,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// Insert into map.
m.SetMapIndex(k.Elem(), v.Elem())
return r, nil
return r, nerr.E
}
}
@ -1743,15 +1826,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal
// Unmarshal data into holder.
// We unmarshal into the first field of the holder object.
var err error
var nerr nonFatal
b, err = unmarshal(b, valToPointer(v).offset(field0), w)
if err != nil {
if !nerr.Merge(err) {
return nil, err
}
// Write pointer to holder into target field.
f.asPointerTo(ityp).Elem().Set(v)
return b, nil
return b, nerr.E
}
}

View File

@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {

View File

@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {

View File

@ -2,10 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
// Package snappy implements the Snappy compression format. It aims for very
// high speeds and reasonable compression.
//
// The C++ snappy implementation is at https://github.com/google/snappy
// There are actually two Snappy formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a Snappy stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// The canonical, C++ implementation is at https://github.com/google/snappy and
// it only implements the block format.
package snappy // import "github.com/golang/snappy"
import (

View File

@ -4,5 +4,6 @@
# Please keep the list sorted.
Gary Burd <gary@beagledreams.com>
Google LLC (https://opensource.google.com/)
Joachim Bauch <mail@joachim-bauch.de>

View File

@ -34,9 +34,11 @@ type Upgrader struct {
ReadBufferSize, WriteBufferSize int
// Subprotocols specifies the server's supported protocols in order of
// preference. If this field is set, then the Upgrade method negotiates a
// preference. If this field is not nil, then the Upgrade method negotiates a
// subprotocol by selecting the first match in this list with a protocol
// requested by the client.
// requested by the client. If there's no match, then no protocol is
// negotiated (the Sec-Websocket-Protocol header is not included in the
// handshake response).
Subprotocols []string
// Error specifies the function for generating HTTP error responses. If Error

3
vendor/github.com/hashicorp/hcl/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/hashicorp/hcl
require github.com/davecgh/go-spew v1.1.1

2
vendor/github.com/hashicorp/hcl/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View File

@ -116,4 +116,4 @@ https://godoc.org/github.com/jpillora/backoff
#### Credits
Forked from some JavaScript written by [@tj](https://github.com/tj)
Forked from [some JavaScript](https://github.com/segmentio/backo) written by [@tj](https://github.com/tj)

View File

@ -1,4 +1,4 @@
This is Go package to generate guid (globally unique id) with good properties
This is Go package to generate guid (globally unique id) with good properties.
Usage:
```go
@ -10,13 +10,13 @@ fmt.Printf("guid: '%s'\n", id)
Generated guids have good properties:
* they're 20 character strings, safe for inclusion in urls (don't require escaping)
* they're based on timestamp so that they sort **after** any existing ids
* they're based on timestamp; they sort **after** any existing ids
* they contain 72-bits of random data after the timestamp so that IDs won't
collide with other clients' IDs
* they sort **lexicographically** (so the timestamp is converted to characters
that will sort properly)
* they're monotonically increasing. Even if you generate more than one in the
same timestamp, thelatter ones will sort after the former ones. We do this
collide with other IDs
* they sort **lexicographically** (the timestamp is converted to a string
that will sort correctly)
* they're monotonically increasing. Even if you generate more than one in the
same timestamp, the latter ones will sort after the former ones. We do this
by using the previous random bits but "incrementing" them by 1 (only in the
case of a timestamp collision).
@ -24,3 +24,5 @@ Read https://www.firebase.com/blog/2015-02-11-firebase-unique-identifiers.html
for more info.
Based on https://gist.github.com/mikelehen/3596a30bd69384624c11
You can read [Generating good, random and unique ids in Go](https://blog.kowalczyk.info/article/JyRZ/generating-good-random-and-unique-ids-in-go.html) to see how it compares to other similar libraries.

View File

@ -24,14 +24,17 @@ var (
)
func init() {
// have to seed to get randomness
// seed to get randomness
rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < 12; i++ {
}
func genRandPart() {
for i := 0; i < len(lastRandChars); i++ {
lastRandChars[i] = rnd.Intn(64)
}
}
// New creates a new guid.
// New creates a new random, unique id
func New() string {
var id [8 + 12]byte
mu.Lock()
@ -46,6 +49,8 @@ func New() string {
// increment the next byte
lastRandChars[i] = 0
}
} else {
genRandPart()
}
lastPushTimeMs = timeMs
// put random as the second part

View File

@ -6,5 +6,5 @@ go:
- 1.7.x
- 1.8.x
- 1.9.x
- "1.10"
- "1.10.x"
- tip

View File

@ -1,5 +1,13 @@
## Changelog
### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018
* [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading
This adds the option to disable property expansion during loading.
Thanks to [@kmala](https://github.com/kmala) for the patch.
### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018
* [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -73,7 +73,7 @@
// # refers to the users' home dir
// home = ${HOME}
//
// # local key takes precendence over env var: u = foo
// # local key takes precedence over env var: u = foo
// USER = foo
// u = ${USER}
//
@ -102,7 +102,7 @@
// v = p.GetString("key", "def")
// v = p.GetDuration("key", 999)
//
// As an alterantive properties may be applied with the standard
// As an alternative properties may be applied with the standard
// library's flag implementation at any time.
//
// # Standard configuration

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -16,21 +16,157 @@ import (
type Encoding uint
const (
// utf8Default is a private placeholder for the zero value of Encoding to
// ensure that it has the correct meaning. UTF8 is the default encoding but
// was assigned a non-zero value which cannot be changed without breaking
// existing code. Clients should continue to use the public constants.
utf8Default Encoding = iota
// UTF8 interprets the input data as UTF-8.
UTF8 Encoding = 1 << iota
UTF8
// ISO_8859_1 interprets the input data as ISO-8859-1.
ISO_8859_1
)
type Loader struct {
// Encoding determines how the data from files and byte buffers
// is interpreted. For URLs the Content-Type header is used
// to determine the encoding of the data.
Encoding Encoding
// DisableExpansion configures the property expansion of the
// returned property object. When set to true, the property values
// will not be expanded and the Property object will not be checked
// for invalid expansion expressions.
DisableExpansion bool
// IgnoreMissing configures whether missing files or URLs which return
// 404 are reported as errors. When set to true, missing files and 404
// status codes are not reported as errors.
IgnoreMissing bool
}
// Load reads a buffer into a Properties struct.
func (l *Loader) LoadBytes(buf []byte) (*Properties, error) {
return l.loadBytes(buf, l.Encoding)
}
// LoadAll reads the content of multiple URLs or files in the given order into
// a Properties struct. If IgnoreMissing is true then a 404 status code or
// missing file will not be reported as error. Encoding sets the encoding for
// files. For the URLs see LoadURL for the Content-Type header and the
// encoding.
func (l *Loader) LoadAll(names []string) (*Properties, error) {
all := NewProperties()
for _, name := range names {
n, err := expandName(name)
if err != nil {
return nil, err
}
var p *Properties
switch {
case strings.HasPrefix(n, "http://"):
p, err = l.LoadURL(n)
case strings.HasPrefix(n, "https://"):
p, err = l.LoadURL(n)
default:
p, err = l.LoadFile(n)
}
if err != nil {
return nil, err
}
all.Merge(p)
}
all.DisableExpansion = l.DisableExpansion
if all.DisableExpansion {
return all, nil
}
return all, all.check()
}
// LoadFile reads a file into a Properties struct.
// If IgnoreMissing is true then a missing file will not be
// reported as error.
func (l *Loader) LoadFile(filename string) (*Properties, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
if l.IgnoreMissing && os.IsNotExist(err) {
LogPrintf("properties: %s not found. skipping", filename)
return NewProperties(), nil
}
return nil, err
}
return l.loadBytes(data, l.Encoding)
}
// LoadURL reads the content of the URL into a Properties struct.
//
// The encoding is determined via the Content-Type header which
// should be set to 'text/plain'. If the 'charset' parameter is
// missing, 'iso-8859-1' or 'latin1' the encoding is set to
// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
// encoding is set to UTF-8. A missing content type header is
// interpreted as 'text/plain; charset=utf-8'.
func (l *Loader) LoadURL(url string) (*Properties, error) {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
}
if resp.StatusCode == 404 && l.IgnoreMissing {
LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
return NewProperties(), nil
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
}
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
var enc Encoding
switch strings.ToLower(ct) {
case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
enc = ISO_8859_1
case "", "text/plain; charset=utf-8":
enc = UTF8
default:
return nil, fmt.Errorf("properties: invalid content type %s", ct)
}
return l.loadBytes(body, enc)
}
func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) {
p, err := parse(convert(buf, enc))
if err != nil {
return nil, err
}
p.DisableExpansion = l.DisableExpansion
if p.DisableExpansion {
return p, nil
}
return p, p.check()
}
// Load reads a buffer into a Properties struct.
func Load(buf []byte, enc Encoding) (*Properties, error) {
return loadBuf(buf, enc)
l := &Loader{Encoding: enc}
return l.LoadBytes(buf)
}
// LoadString reads an UTF8 string into a properties struct.
func LoadString(s string) (*Properties, error) {
return loadBuf([]byte(s), UTF8)
l := &Loader{Encoding: UTF8}
return l.LoadBytes([]byte(s))
}
// LoadMap creates a new Properties struct from a string map.
@ -44,34 +180,32 @@ func LoadMap(m map[string]string) *Properties {
// LoadFile reads a file into a Properties struct.
func LoadFile(filename string, enc Encoding) (*Properties, error) {
return loadAll([]string{filename}, enc, false)
l := &Loader{Encoding: enc}
return l.LoadAll([]string{filename})
}
// LoadFiles reads multiple files in the given order into
// a Properties struct. If 'ignoreMissing' is true then
// non-existent files will not be reported as error.
func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
return loadAll(filenames, enc, ignoreMissing)
l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
return l.LoadAll(filenames)
}
// LoadURL reads the content of the URL into a Properties struct.
//
// The encoding is determined via the Content-Type header which
// should be set to 'text/plain'. If the 'charset' parameter is
// missing, 'iso-8859-1' or 'latin1' the encoding is set to
// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
// encoding is set to UTF-8. A missing content type header is
// interpreted as 'text/plain; charset=utf-8'.
// See Loader#LoadURL for details.
func LoadURL(url string) (*Properties, error) {
return loadAll([]string{url}, UTF8, false)
l := &Loader{Encoding: UTF8}
return l.LoadAll([]string{url})
}
// LoadURLs reads the content of multiple URLs in the given order into a
// Properties struct. If 'ignoreMissing' is true then a 404 status code will
// not be reported as error. See LoadURL for the Content-Type header
// Properties struct. If IgnoreMissing is true then a 404 status code will
// not be reported as error. See Loader#LoadURL for the Content-Type header
// and the encoding.
func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
return loadAll(urls, UTF8, ignoreMissing)
l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing}
return l.LoadAll(urls)
}
// LoadAll reads the content of multiple URLs or files in the given order into a
@ -79,7 +213,8 @@ func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
// not be reported as error. Encoding sets the encoding for files. For the URLs please see
// LoadURL for the Content-Type header and the encoding.
func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
return loadAll(names, enc, ignoreMissing)
l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
return l.LoadAll(names)
}
// MustLoadString reads an UTF8 string into a Properties struct and
@ -122,90 +257,6 @@ func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
return must(LoadAll(names, enc, ignoreMissing))
}
func loadBuf(buf []byte, enc Encoding) (*Properties, error) {
p, err := parse(convert(buf, enc))
if err != nil {
return nil, err
}
return p, p.check()
}
func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
result := NewProperties()
for _, name := range names {
n, err := expandName(name)
if err != nil {
return nil, err
}
var p *Properties
if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") {
p, err = loadURL(n, ignoreMissing)
} else {
p, err = loadFile(n, enc, ignoreMissing)
}
if err != nil {
return nil, err
}
result.Merge(p)
}
return result, result.check()
}
func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
if ignoreMissing && os.IsNotExist(err) {
LogPrintf("properties: %s not found. skipping", filename)
return NewProperties(), nil
}
return nil, err
}
p, err := parse(convert(data, enc))
if err != nil {
return nil, err
}
return p, nil
}
func loadURL(url string, ignoreMissing bool) (*Properties, error) {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
}
if resp.StatusCode == 404 && ignoreMissing {
LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
return NewProperties(), nil
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
}
if err = resp.Body.Close(); err != nil {
return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
}
ct := resp.Header.Get("Content-Type")
var enc Encoding
switch strings.ToLower(ct) {
case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
enc = ISO_8859_1
case "", "text/plain; charset=utf-8":
enc = UTF8
default:
return nil, fmt.Errorf("properties: invalid content type %s", ct)
}
p, err := parse(convert(body, enc))
if err != nil {
return nil, err
}
return p, nil
}
func must(p *Properties, err error) *Properties {
if err != nil {
ErrorHandler(err)
@ -226,7 +277,7 @@ func expandName(name string) (string, error) {
// first 256 unicode code points cover ISO-8859-1.
func convert(buf []byte, enc Encoding) string {
switch enc {
case UTF8:
case utf8Default, UTF8:
return string(buf)
case ISO_8859_1:
runes := make([]rune, len(buf))

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -83,6 +83,17 @@ func NewProperties() *Properties {
}
}
// Load reads a buffer into the given Properties struct.
func (p *Properties) Load(buf []byte, enc Encoding) error {
l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion}
newProperties, err := l.LoadBytes(buf)
if err != nil {
return err
}
p.Merge(newProperties)
return nil
}
// Get returns the expanded value for the given key if exists.
// Otherwise, ok is false.
func (p *Properties) Get(key string) (value string, ok bool) {

View File

@ -1,4 +1,4 @@
// Copyright 2017 Frank Schroeder. All rights reserved.
// Copyright 2018 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -2,3 +2,4 @@
*_easyjson.go
*.iml
.idea
*.swp

View File

@ -37,6 +37,7 @@ generate: root build
.root/bin/easyjson .root/src/$(PKG)/tests/named_type.go
.root/bin/easyjson .root/src/$(PKG)/tests/custom_map_key_type.go
.root/bin/easyjson .root/src/$(PKG)/tests/embedded_type.go
.root/bin/easyjson -disallow_unknown_fields .root/src/$(PKG)/tests/disallow_unknown.go
test: generate root
go test \

View File

@ -53,6 +53,8 @@ Usage of easyjson:
use lowerCamelCase instead of CamelCase by default
-stubs
only generate stubs for marshaler/unmarshaler funcs
-disallow_unknown_fields
return error if some unknown field in json appeared
```
Using `-all` will generate marshalers/unmarshalers for all Go structs in the

View File

@ -240,7 +240,7 @@ func (r *Lexer) fetchNumber() {
// findStringLen tries to scan into the string literal for ending quote char to determine required size.
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
func findStringLen(data []byte) (hasEscapes bool, length int) {
func findStringLen(data []byte) (isValid, hasEscapes bool, length int) {
delta := 0
for i := 0; i < len(data); i++ {
@ -252,11 +252,11 @@ func findStringLen(data []byte) (hasEscapes bool, length int) {
delta++
}
case '"':
return (delta > 0), (i - delta)
return true, (delta > 0), (i - delta)
}
}
return false, len(data)
return false, false, len(data)
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
@ -342,7 +342,12 @@ func (r *Lexer) fetchString() {
r.pos++
data := r.Data[r.pos:]
hasEscapes, length := findStringLen(data)
isValid, hasEscapes, length := findStringLen(data)
if !isValid {
r.pos += length
r.errParse("unterminated string literal")
return
}
if !hasEscapes {
r.token.byteValue = data[:length]
r.pos += length + 1
@ -649,7 +654,7 @@ func (r *Lexer) Bytes() []byte {
return nil
}
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
if err != nil {
r.fatalError = &LexerError{
Reason: err.Error(),
@ -658,7 +663,7 @@ func (r *Lexer) Bytes() []byte {
}
r.consume()
return ret[:len]
return ret[:n]
}
// Bool reads a true or false boolean keyword.

View File

@ -7,14 +7,21 @@ import (
"context"
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strings"
"time"
)
const dnsTimeout time.Duration = 2 * time.Second
const tcpIdleTimeout time.Duration = 8 * time.Second
const (
dnsTimeout time.Duration = 2 * time.Second
tcpIdleTimeout time.Duration = 8 * time.Second
dohMimeType = "application/dns-message"
)
// A Conn represents a connection to a DNS server.
type Conn struct {
@ -37,6 +44,7 @@ type Client struct {
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
@ -81,11 +89,10 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
// create a new dialer with the appropriate timeout
var d net.Dialer
if c.Dialer == nil {
d = net.Dialer{}
d = net.Dialer{Timeout:c.getTimeoutForRequest(c.dialTimeout())}
} else {
d = net.Dialer(*c.Dialer)
}
d.Timeout = c.getTimeoutForRequest(c.writeTimeout())
network := "udp"
useTLS := false
@ -134,6 +141,11 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
// attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
if c.Net == "https" {
// TODO(tmthrgd): pipe timeouts into exchangeDOH
return c.exchangeDOH(context.TODO(), m, address)
}
return c.exchange(m, address)
}
@ -146,6 +158,11 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
cl = cl1
}
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
if c.Net == "https" {
// TODO(tmthrgd): pipe timeouts into exchangeDOH
return c.exchangeDOH(context.TODO(), m, address)
}
return c.exchange(m, address)
})
if r != nil && shared {
@ -191,6 +208,67 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
return r, rtt, err
}
func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
p, err := m.Pack()
if err != nil {
return nil, 0, err
}
req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p))
if err != nil {
return nil, 0, err
}
req.Header.Set("Content-Type", dohMimeType)
req.Header.Set("Accept", dohMimeType)
hc := http.DefaultClient
if c.HTTPClient != nil {
hc = c.HTTPClient
}
if ctx != context.Background() && ctx != context.TODO() {
req = req.WithContext(ctx)
}
t := time.Now()
resp, err := hc.Do(req)
if err != nil {
return nil, 0, err
}
defer closeHTTPBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status)
}
if ct := resp.Header.Get("Content-Type"); ct != dohMimeType {
return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType)
}
p, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, 0, err
}
rtt = time.Since(t)
r = new(Msg)
if err := r.Unpack(p); err != nil {
return r, 0, err
}
// TODO: TSIG? Is it even supported over DoH?
return r, rtt, nil
}
func closeHTTPBody(r io.ReadCloser) error {
io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20))
return r.Close()
}
// ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction signature
// is verified. This method always tries to return the message, however if an
@ -490,6 +568,10 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
// context, if present. If there is both a context deadline and a configured
// timeout on the client, the earliest of the two takes effect.
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight && c.Net == "https" {
return c.exchangeDOH(ctx, m, a)
}
var timeout time.Duration
if deadline, ok := ctx.Deadline(); !ok {
timeout = 0
@ -498,6 +580,7 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg,
}
// not passing the context to the underlying calls, as the API does not support
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
// TODO(tmthrgd): this is a race condition
c.Dialer = &net.Dialer{Timeout: timeout}
return c.Exchange(m, a)
}

View File

@ -101,7 +101,8 @@ Names:
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n")
fmt.Fprint(b, "currentLen := initLen\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range domainTypes {
o := scope.Lookup(name)
@ -109,7 +110,10 @@ Names:
fmt.Fprintf(b, "case *%s:\n", name)
for i := 1; i < st.NumFields(); i++ {
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
out := func(s string) {
fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
@ -118,8 +122,12 @@ Names:
case `dns:"cdomain-name"`:
// For HIP we need to slice over the elements in this slice.
fmt.Fprintf(b, `for i := range x.%s {
compressionLenHelper(c, x.%s[i])
}
currentLen -= len(x.%s[i]) + 1
}
`, st.Field(i).Name(), st.Field(i).Name())
fmt.Fprintf(b, `for i := range x.%s {
currentLen += compressionLenHelper(c, x.%s[i], currentLen)
}
`, st.Field(i).Name(), st.Field(i).Name())
}
continue
@ -133,11 +141,11 @@ Names:
}
}
}
fmt.Fprintln(b, "}\n}\n\n")
fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
// compressionLenSearchType - search cdomain-tags types for compressible names.
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range cdomainTypes {
o := scope.Lookup(name)
@ -147,7 +155,7 @@ Names:
j := 1
for i := 1; i < st.NumFields(); i++ {
out := func(s string, j int) {
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
}
// There are no slice types with names that can be compressed.
@ -160,13 +168,15 @@ Names:
}
k := "k1"
ok := "ok1"
sz := "sz1"
for i := 2; i < j; i++ {
k += fmt.Sprintf(" + k%d", i)
ok += fmt.Sprintf(" && ok%d", i)
sz += fmt.Sprintf(" + sz%d", i)
}
fmt.Fprintf(b, "return %s, %s\n", k, ok)
fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
}
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
// gofmt
res, err := format.Source(b.Bytes())

10
vendor/github.com/miekg/dns/dns.go generated vendored
View File

@ -55,16 +55,6 @@ func (h *RR_Header) Header() *RR_Header { return h }
// Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
func (h *RR_Header) String() string {
var s string

View File

@ -73,6 +73,7 @@ var StringToAlgorithm = reverseInt8(AlgorithmToString)
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
DSA: crypto.SHA1,
RSASHA1: crypto.SHA1,
RSASHA1NSEC3SHA1: crypto.SHA1,
RSASHA256: crypto.SHA256,
@ -239,7 +240,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
c := &CDNSKEY{DNSKEY: *k}
c.Hdr = *k.Hdr.copyHeader()
c.Hdr = k.Hdr
c.Hdr.Rrtype = TypeCDNSKEY
return c
}
@ -247,7 +248,7 @@ func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
// ToCDS converts a DS record to a CDS record.
func (d *DS) ToCDS() *CDS {
c := &CDS{DS: *d}
c.Hdr = *d.Hdr.copyHeader()
c.Hdr = d.Hdr
c.Hdr.Rrtype = TypeCDS
return c
}
@ -541,20 +542,20 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
keyoff = 3
}
if explen > 4 {
// Larger exponent than supported by the crypto package.
return nil
}
pubkey := new(rsa.PublicKey)
pubkey.N = big.NewInt(0)
shift := uint64((explen - 1) * 8)
expo := uint64(0)
for i := int(explen - 1); i > 0; i-- {
expo += uint64(keybuf[keyoff+i]) << shift
shift -= 8
for i := 0; i < int(explen); i++ {
expo <<= 8
expo |= uint64(keybuf[keyoff+i])
}
// Remainder
expo += uint64(keybuf[keyoff])
if expo > (2<<31)+1 {
// Larger expo than supported.
// println("dns: F5 primes (or larger) are not supported")
if expo > 1<<31-1 {
// Larger exponent than supported by the crypto package.
return nil
}
pubkey.E = int(expo)

View File

@ -102,12 +102,15 @@ func (rr *OPT) SetVersion(v uint8) {
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
func (rr *OPT) ExtendedRcode() int {
return int((rr.Hdr.Ttl & 0xFF000000) >> 24)
return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
}
// SetExtendedRcode sets the EDNS extended RCODE field.
func (rr *OPT) SetExtendedRcode(v uint8) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
return
}
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
}
// UDPSize returns the UDP buffer size.

127
vendor/github.com/miekg/dns/msg.go generated vendored
View File

@ -691,18 +691,20 @@ func (dns *Msg) Pack() (msg []byte, err error) {
return dns.PackBuffer(nil)
}
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
// a new buffer is allocated.
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var (
dh Header
compression map[string]int
)
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings
compression = make(map[string]int) // Compression pointer mappings.
}
return dns.packBufferWithCompressionMap(buf, compression)
}
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var dh Header
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode
@ -714,12 +716,11 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
return nil, ErrExtendedRcode
}
opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
dns.Rcode &= 0xF
}
// Convert convenient Msg into wire-like Header.
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
if dns.Response {
dh.Bits |= _QR
}
@ -922,23 +923,27 @@ func (dns *Msg) String() string {
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int {
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
compressionLenHelper(compression, r.Name, l)
l += r.len()
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
}
// compressedLen returns the message length when in compressed wire format
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
l := 12 // Message header is always 12 bytes
if compress {
compression := map[string]int{}
for _, r := range dns.Question {
l += r.len()
compressionLenHelper(compression, r.Name)
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
return compressedLenWithCompressionMap(dns, compression)
}
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
l += r.len()
@ -962,70 +967,94 @@ func compressedLen(dns *Msg, compress bool) int {
return l
}
func compressionLenSlice(len int, c map[string]int, rs []RR) int {
var l int
func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
initLen := lenp
for _, r := range rs {
if r == nil {
continue
}
// track this length, and the global length in len, while taking compression into account for both.
// TmpLen is to track len of record at 14bits boudaries
tmpLen := lenp
x := r.len()
l += x
len += x
k, ok := compressionLenSearch(c, r.Header().Name)
// track this length, and the global length in len, while taking compression into account for both.
k, ok, _ := compressionLenSearch(c, r.Header().Name)
if ok {
l += 1 - k
len += 1 - k
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
// so, basically x:= x - k - 1 + 2
x += 1 - k
}
if len < maxCompressionOffset {
compressionLenHelper(c, r.Header().Name)
}
k, ok = compressionLenSearchType(c, r)
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
k, ok, _ = compressionLenSearchType(c, r)
if ok {
l += 1 - k
len += 1 - k
x += 1 - k
}
lenp += x
tmpLen = lenp
tmpLen += compressionLenHelperType(c, r, tmpLen)
if len < maxCompressionOffset {
compressionLenHelperType(c, r)
}
}
return l
return lenp - initLen
}
// Put the parts of the name in the compression map.
func compressionLenHelper(c map[string]int, s string) {
// Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]int, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
initLen := currentLen
pref := ""
prev := s
lbs := Split(s)
for j := len(lbs) - 1; j >= 0; j-- {
for j := 0; j < len(lbs); j++ {
pref = s[lbs[j]:]
currentLen += len(prev) - len(pref)
prev = pref
if _, ok := c[pref]; !ok {
c[pref] = len(pref)
// If first byte label is within the first 14bits, it might be re-used later
if currentLen < maxCompressionOffset {
c[pref] = currentLen
}
} else {
added := currentLen - initLen
if j > 0 {
// We added a new PTR
added += 2
}
return added
}
}
return currentLen - initLen
}
// Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match.
func compressionLenSearch(c map[string]int, s string) (int, bool) {
// Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false
return 0, false, 0
}
fullSize := 0
for {
if _, ok := c[s[off:]]; ok {
return len(s[off:]), true
return len(s[off:]), true, fullSize + off
}
if end {
break
}
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off)
}
return 0, false
return 0, false, fullSize + len(s)
}
// Copy returns a new RR which is a deep-copy of r.

Some files were not shown because too many files have changed in this diff Show More