Update dependencies

This commit is contained in:
Ken-Håvard Lieng 2020-06-15 11:05:32 +02:00
parent 6985dd16da
commit fcf0c17682
101 changed files with 7033 additions and 2205 deletions

File diff suppressed because one or more lines are too long

View File

@ -68,7 +68,7 @@
"formik": "^2.1.4",
"history": "^5.0.0-beta.8",
"hsluv": "^0.1.0",
"immer": "^6.0.9",
"immer": "^7.0.1",
"js-cookie": "^2.2.1",
"lodash": "^4.17.15",
"react": "16.13.1",

File diff suppressed because it is too large Load Diff

19
go.mod
View File

@ -4,8 +4,8 @@ go 1.14
require (
github.com/RoaringBitmap/roaring v0.4.23 // indirect
github.com/blevesearch/bleve v1.0.8
github.com/caddyserver/certmagic v0.10.13
github.com/blevesearch/bleve v1.0.9
github.com/caddyserver/certmagic v0.11.2
github.com/cenkalti/backoff/v4 v4.0.2 // indirect
github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
@ -21,11 +21,11 @@ require (
github.com/jmhodges/levigo v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0
github.com/kjk/betterguid v0.0.0-20170621091430-c442874ba63a
github.com/klauspost/cpuid v1.2.4
github.com/klauspost/cpuid v1.3.0
github.com/mailru/easyjson v0.7.2-0.20200424172602-f0a000e7a8e0
github.com/miekg/dns v1.1.29 // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/mitchellh/mapstructure v1.3.1 // indirect
github.com/mitchellh/mapstructure v1.3.2 // indirect
github.com/onsi/ginkgo v1.8.0 // indirect
github.com/onsi/gomega v1.5.0 // indirect
github.com/pelletier/go-toml v1.8.0 // indirect
@ -37,16 +37,17 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.7.0
github.com/stretchr/testify v1.5.1
github.com/tdewolff/minify/v2 v2.7.4
github.com/tdewolff/minify/v2 v2.7.6
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
github.com/tinylib/msgp v1.1.2 // indirect
github.com/xdg-go/scram v0.0.0-20180814205039-7eeb5667e42c
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
github.com/xdg/stringprep v1.0.0 // indirect
go.etcd.io/bbolt v1.3.4
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed // indirect
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2
golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect
gopkg.in/ini.v1 v1.56.0 // indirect
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 // indirect
golang.org/x/net v0.0.0-20200602114024-627f9648deb9
golang.org/x/sys v0.0.0-20200610111108-226ff32320da // indirect
google.golang.org/protobuf v1.24.0 // indirect
gopkg.in/ini.v1 v1.57.0 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
)

60
go.sum
View File

@ -61,8 +61,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blevesearch/bleve v1.0.8 h1:XBty4rmSs5dBqOhGOL4GojjeHGNuBmyzQLNTTEW6qSU=
github.com/blevesearch/bleve v1.0.8/go.mod h1:rVSPClRCgzWIJJqDjfiNjXxycE1jCODqVYWeTId8A+M=
github.com/blevesearch/bleve v1.0.9 h1:kqw/Ank/61UV9/Bx9kCcnfH6qWPgmS8O5LNfpsgzASg=
github.com/blevesearch/bleve v1.0.9/go.mod h1:tb04/rbU29clbtNgorgFd8XdJea4x3ybYaOjWKr+UBU=
github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ=
github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ=
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
@ -73,14 +73,16 @@ github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt
github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ=
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
github.com/blevesearch/zap/v11 v11.0.8 h1:Pnes647E5BkBpXYiX94LYZeKG/6oeSnQ4AjeAFl+Nl8=
github.com/blevesearch/zap/v11 v11.0.8/go.mod h1:VlWmhlTeVIT1aItOGUjABKblNp9gpQXnHtpWQi3vhNw=
github.com/blevesearch/zap/v12 v12.0.8 h1:8aQEmkrJ/qNgmGYZomhDdE133ZxS+5tY58p2CgkV3mk=
github.com/blevesearch/zap/v12 v12.0.8/go.mod h1:GD0cwdeEUvaAW2qfK/8COKg6Ru8dl9DBotl+U2tvWmA=
github.com/blevesearch/zap/v13 v13.0.0 h1:AEPlDdoxOTOo5NFQlecIDzNZOJIHCIql4Uc7SdOrlLE=
github.com/blevesearch/zap/v13 v13.0.0/go.mod h1:bONNlOmL4gWrJaSYpuE78VnYmq+RJN8rwoG2+aEZrV4=
github.com/caddyserver/certmagic v0.10.13 h1:wfyYpXVXSSYMS1ZFpSr7HptwsC+j7elda5PUERrHtRc=
github.com/caddyserver/certmagic v0.10.13/go.mod h1:Yz6cSRUdddGy6Ut5JfrvcqBwrm1BqXxJRqJq2TwjPnA=
github.com/blevesearch/zap/v11 v11.0.9 h1:wlSrDBeGN1G4M51NQHIXca23ttwUfQpWaK7uhO5lRSo=
github.com/blevesearch/zap/v11 v11.0.9/go.mod h1:47hzinvmY2EvvJruzsSCJpro7so8L1neseaGjrtXHOY=
github.com/blevesearch/zap/v12 v12.0.9 h1:PpatkY+BLVFZf0Ok3/fwgI/I4RU0z5blXFGuQANmqXk=
github.com/blevesearch/zap/v12 v12.0.9/go.mod h1:paQuvxy7yXor+0Mx8p2KNmJgygQbQNN+W6HRfL5Hvwc=
github.com/blevesearch/zap/v13 v13.0.1 h1:NSCM6uKu77Vn/x9nlPp4pE1o/bftqcOWZEHSyZVpGBQ=
github.com/blevesearch/zap/v13 v13.0.1/go.mod h1:XmyNLMvMf8Z5FjLANXwUeDW3e1+o77TTGUWrth7T9WI=
github.com/blevesearch/zap/v14 v14.0.0 h1:HF8Ysjm13qxB0jTGaKLlatNXmJbQD8bY+PrPxm5v4hE=
github.com/blevesearch/zap/v14 v14.0.0/go.mod h1:sUc/gPGJlFbSQ2ZUh/wGRYwkKx+Dg/5p+dd+eq6QMXk=
github.com/caddyserver/certmagic v0.11.2 h1:nPBqyuFNHJEf2FwC1ixJjArtTKWyPqpaH6k4jl7gxYI=
github.com/caddyserver/certmagic v0.11.2/go.mod h1:fqY1IZk5iqhsj5FU3Vw20Sjq66tEKaanTFYNZ74soMY=
github.com/cenkalti/backoff/v4 v4.0.0 h1:6VeaLF9aI+MAUQ95106HwWzYZgJJpZ4stumjj6RFYAU=
github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs=
@ -195,6 +197,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
@ -289,8 +292,8 @@ github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.4 h1:EBfaK0SWSwk+fgk6efYFWdzl8MwRWoOO1gkmiaTXPW4=
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.0 h1:2JqaNE1hGdABW2YbA3TenkO7RiPFRvSWnEnGqWh9sHE=
github.com/klauspost/cpuid v1.3.0/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/kljensen/snowball v0.6.0 h1:6DZLCcZeL0cLfodx+Md4/OLC6b/bfurWUOUGs1ydfOU=
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ=
@ -334,8 +337,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA=
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@ -464,10 +467,10 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tdewolff/minify/v2 v2.7.4 h1:r0OZQ3QzWeDS5cXq53Bk4IFIBDZ7fiXIkw1a4bHONsw=
github.com/tdewolff/minify/v2 v2.7.4/go.mod h1:BkDSm8aMMT0ALGmpt7j3Ra7nLUgZL0qhyrAHXwxcy5w=
github.com/tdewolff/parse/v2 v2.4.2 h1:Bu2Qv6wepkc+Ou7iB/qHjAhEImlAP5vedzlQRUdj3BI=
github.com/tdewolff/parse/v2 v2.4.2/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
github.com/tdewolff/minify/v2 v2.7.6 h1:b6UzNphZeDm3AVmk0a69orkNLPJzJx3k/AQ/W2xoMs8=
github.com/tdewolff/minify/v2 v2.7.6/go.mod h1:Mt3hGbK/ETDplEP9EMNZo1lPkM3TZq0rDIVV76nFgY0=
github.com/tdewolff/parse/v2 v2.4.3 h1:k24zHgTRGm7LkvbTEreuavyZTf0k8a/lIenggv62OiU=
github.com/tdewolff/parse/v2 v2.4.3/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
github.com/tdewolff/test v1.0.6 h1:76mzYJQ83Op284kMT+63iCNCI7NEERsIN8dLM+RiKr4=
github.com/tdewolff/test v1.0.6/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
@ -527,8 +530,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed h1:g4KENRiCMEx58Q7/ecwfT0N2o8z35Fnbsjig/Alf2T4=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf3coIG4HqZElCPehJsfAYM=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -587,8 +590,8 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -641,8 +644,8 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da h1:bGb80FudwxpeucJUjPYJXuJ8Hk91vNtfvrymzwiei38=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -726,6 +729,7 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -742,8 +746,12 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -757,8 +765,8 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.56.0 h1:DPMeDvGTM54DXbPkVIZsp19fp/I2K7zwA/itHYHKo8Y=
gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw=
gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=

View File

@ -8,9 +8,10 @@ require (
github.com/blevesearch/go-porterstemmer v1.0.3
github.com/blevesearch/segment v0.9.0
github.com/blevesearch/snowballstem v0.9.0
github.com/blevesearch/zap/v11 v11.0.8
github.com/blevesearch/zap/v12 v12.0.8
github.com/blevesearch/zap/v13 v13.0.0
github.com/blevesearch/zap/v11 v11.0.9
github.com/blevesearch/zap/v12 v12.0.9
github.com/blevesearch/zap/v13 v13.0.1
github.com/blevesearch/zap/v14 v14.0.0
github.com/couchbase/ghistogram v0.1.0 // indirect
github.com/couchbase/moss v0.1.0
github.com/couchbase/vellum v1.0.1

View File

@ -107,8 +107,9 @@ func (o *Builder) parseConfig(config map[string]interface{}) (err error) {
segPlugin, err := chooseSegmentPlugin(forcedSegmentType,
uint32(forcedSegmentVersion))
if err != nil {
o.segPlugin = segPlugin
return err
}
o.segPlugin = segPlugin
}
return nil

View File

@ -142,17 +142,18 @@ func NewScorch(storeName string,
// configForceSegmentTypeVersion checks if the caller has requested a
// specific segment type/version
func configForceSegmentTypeVersion(config map[string]interface{}) (string, uint32, error) {
forcedSegmentVersion, ok := config["forceSegmentVersion"].(int)
if ok {
forcedSegmentType, ok2 := config["forceSegmentType"].(string)
if !ok2 {
return "", 0, fmt.Errorf(
"forceSegmentVersion set to %d, must also specify forceSegmentType", forcedSegmentVersion)
}
return forcedSegmentType, uint32(forcedSegmentVersion), nil
forcedSegmentVersion, err := parseToInteger(config["forceSegmentVersion"])
if err != nil {
return "", 0, nil
}
return "", 0, nil
forcedSegmentType, ok := config["forceSegmentType"].(string)
if !ok {
return "", 0, fmt.Errorf(
"forceSegmentVersion set to %d, must also specify forceSegmentType", forcedSegmentVersion)
}
return forcedSegmentType, uint32(forcedSegmentVersion), nil
}
func (s *Scorch) paused() uint64 {

View File

@ -22,6 +22,7 @@ import (
zapv11 "github.com/blevesearch/zap/v11"
zapv12 "github.com/blevesearch/zap/v12"
zapv13 "github.com/blevesearch/zap/v13"
zapv14 "github.com/blevesearch/zap/v14"
)
var supportedSegmentPlugins map[string]map[uint32]segment.Plugin
@ -29,6 +30,7 @@ var defaultSegmentPlugin segment.Plugin
func init() {
ResetPlugins()
RegisterPlugin(zapv14.Plugin(), false)
RegisterPlugin(zapv13.Plugin(), false)
RegisterPlugin(zapv12.Plugin(), false)
RegisterPlugin(zapv11.Plugin(), true)

View File

@ -4,7 +4,7 @@ go 1.12
require (
github.com/RoaringBitmap/roaring v0.4.21
github.com/blevesearch/bleve v1.0.8
github.com/blevesearch/bleve v1.0.9
github.com/blevesearch/mmap-go v1.0.2
github.com/couchbase/vellum v1.0.1
github.com/golang/snappy v0.0.1

View File

@ -4,7 +4,7 @@ go 1.12
require (
github.com/RoaringBitmap/roaring v0.4.21
github.com/blevesearch/bleve v1.0.8
github.com/blevesearch/bleve v1.0.9
github.com/blevesearch/mmap-go v1.0.2
github.com/couchbase/vellum v1.0.1
github.com/golang/snappy v0.0.1

View File

@ -4,7 +4,7 @@ go 1.12
require (
github.com/RoaringBitmap/roaring v0.4.21
github.com/blevesearch/bleve v1.0.8
github.com/blevesearch/bleve v1.0.9
github.com/blevesearch/mmap-go v1.0.2
github.com/couchbase/vellum v1.0.1
github.com/golang/snappy v0.0.1

12
vendor/github.com/blevesearch/zap/v14/.gitignore generated vendored Normal file
View File

@ -0,0 +1,12 @@
#*
*.sublime-*
*~
.#*
.project
.settings
**/.idea/
**/*.iml
.DS_Store
/cmd/zap/zap
*.test
tags

202
vendor/github.com/blevesearch/zap/v14/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

158
vendor/github.com/blevesearch/zap/v14/README.md generated vendored Normal file
View File

@ -0,0 +1,158 @@
# zap file format
Advanced ZAP File Format Documentation is [here](zap.md).
The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written.
Current usage:
- mmap the entire file
- crc-32 bytes and version are in fixed position at end of the file
- reading remainder of footer could be version specific
- remainder of footer gives us:
- 3 important offsets (docValue , fields index and stored data index)
- 2 important values (number of docs and chunk factor)
- field data is processed once and memoized onto the heap so that we never have to go back to disk for it
- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends.
- access to all other indexed data follows the following pattern:
- first know the field name -> convert to id
- next navigate to term dictionary for that field
- some operations stop here and do dictionary ops
- next use dictionary to navigate to posting list for a specific term
- walk posting list
- if necessary, walk posting details as we go
- if location info is desired, consult location bitmap to see if it is there
## stored fields section
- for each document
- preparation phase:
- produce a slice of metadata bytes and data bytes
- produce these slices in field id order
- field value is appended to the data slice
- metadata slice is varint encoded with the following values for each field value
- field id (uint16)
- field type (byte)
- field value start offset in uncompressed data slice (uint64)
- field value length (uint64)
- field number of array positions (uint64)
- one additional value for each array position (uint64)
- compress the data slice using snappy
- file writing phase:
- remember the start offset for this document
- write out meta data length (varint uint64)
- write out compressed data length (varint uint64)
- write out the metadata bytes
- write out the compressed data bytes
## stored fields idx
- for each document
- write start offset (remembered from previous section) of stored data (big endian uint64)
With this index and a known document number, we have direct access to all the stored field data.
## posting details (freq/norm) section
- for each posting list
- produce a slice containing multiple consecutive chunks (each chunk is varint stream)
- produce a slice remembering offsets of where each chunk starts
- preparation phase:
- for each hit in the posting list
- if this hit is in next chunk close out encoding of last chunk and record offset start of next
- encode term frequency (uint64)
- encode norm factor (float32)
- file writing phase:
- remember start position for this posting list details
- write out number of chunks that follow (varint uint64)
- write out length of each chunk (each a varint uint64)
- write out the byte slice containing all the chunk data
If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it.
## posting details (location) section
- for each posting list
- produce a slice containing multiple consecutive chunks (each chunk is varint stream)
- produce a slice remembering offsets of where each chunk starts
- preparation phase:
- for each hit in the posting list
- if this hit is in next chunk close out encoding of last chunk and record offset start of next
- encode field (uint16)
- encode field pos (uint64)
- encode field start (uint64)
- encode field end (uint64)
- encode number of array positions to follow (uint64)
- encode each array position (each uint64)
- file writing phase:
- remember start position for this posting list details
- write out number of chunks that follow (varint uint64)
- write out length of each chunk (each a varint uint64)
- write out the byte slice containing all the chunk data
If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it.
## postings list section
- for each posting list
- preparation phase:
- encode roaring bitmap posting list to bytes (so we know the length)
- file writing phase:
- remember the start position for this posting list
- write freq/norm details offset (remembered from previous, as varint uint64)
- write location details offset (remembered from previous, as varint uint64)
- write length of encoded roaring bitmap
- write the serialized roaring bitmap data
## dictionary
- for each field
- preparation phase:
- encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous)
- file writing phase:
- remember the start position of this persistDictionary
- write length of vellum data (varint uint64)
- write out vellum data
## fields section
- for each field
- file writing phase:
- remember start offset for each field
- write dictionary address (remembered from previous) (varint uint64)
- write length of field name (varint uint64)
- write field name bytes
## fields idx
- for each field
- file writing phase:
- write big endian uint64 of start offset for each field
NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size.
## fields DocValue
- for each field
- preparation phase:
- produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data
- produce a slice remembering the length of each chunk
- file writing phase:
- remember the start position of this first field DocValue offset in the footer
- write out number of chunks that follow (varint uint64)
- write out length of each chunk (each a varint uint64)
- write out the byte slice containing all the chunk data
NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any
read operation leverage that meta information to extract the document specific data from the file.
## footer
- file writing phase
- write number of docs (big endian uint64)
- write stored field index location (big endian uint64)
- write field index location (big endian uint64)
- write field docValue location (big endian uint64)
- write out chunk factor (big endian uint32)
- write out version (big endian uint32)
- write out file CRC of everything preceding this (big endian uint32)

156
vendor/github.com/blevesearch/zap/v14/build.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bufio"
"math"
"os"
"github.com/couchbase/vellum"
)
const Version uint32 = 14
const Type string = "zap"
const fieldNotUninverted = math.MaxUint64
func (sb *SegmentBase) Persist(path string) error {
return PersistSegmentBase(sb, path)
}
// PersistSegmentBase persists SegmentBase in the zap file format.
func PersistSegmentBase(sb *SegmentBase, path string) error {
flag := os.O_RDWR | os.O_CREATE
f, err := os.OpenFile(path, flag, 0600)
if err != nil {
return err
}
cleanup := func() {
_ = f.Close()
_ = os.Remove(path)
}
br := bufio.NewWriter(f)
_, err = br.Write(sb.mem)
if err != nil {
cleanup()
return err
}
err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset,
sb.chunkMode, sb.memCRC, br)
if err != nil {
cleanup()
return err
}
err = br.Flush()
if err != nil {
cleanup()
return err
}
err = f.Sync()
if err != nil {
cleanup()
return err
}
err = f.Close()
if err != nil {
cleanup()
return err
}
return nil
}
func persistStoredFieldValues(fieldID int,
storedFieldValues [][]byte, stf []byte, spf [][]uint64,
curr int, metaEncode varintEncoder, data []byte) (
int, []byte, error) {
for i := 0; i < len(storedFieldValues); i++ {
// encode field
_, err := metaEncode(uint64(fieldID))
if err != nil {
return 0, nil, err
}
// encode type
_, err = metaEncode(uint64(stf[i]))
if err != nil {
return 0, nil, err
}
// encode start offset
_, err = metaEncode(uint64(curr))
if err != nil {
return 0, nil, err
}
// end len
_, err = metaEncode(uint64(len(storedFieldValues[i])))
if err != nil {
return 0, nil, err
}
// encode number of array pos
_, err = metaEncode(uint64(len(spf[i])))
if err != nil {
return 0, nil, err
}
// encode all array positions
for _, pos := range spf[i] {
_, err = metaEncode(pos)
if err != nil {
return 0, nil, err
}
}
data = append(data, storedFieldValues[i]...)
curr += len(storedFieldValues[i])
}
return curr, data, nil
}
func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32,
fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64,
storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64,
dictLocs []uint64) (*SegmentBase, error) {
sb := &SegmentBase{
mem: mem,
memCRC: memCRC,
chunkMode: chunkMode,
fieldsMap: fieldsMap,
fieldsInv: fieldsInv,
numDocs: numDocs,
storedIndexOffset: storedIndexOffset,
fieldsIndexOffset: fieldsIndexOffset,
docValueOffset: docValueOffset,
dictLocs: dictLocs,
fieldDvReaders: make(map[uint16]*docValueReader),
fieldFSTs: make(map[uint16]*vellum.FST),
}
sb.updateSize()
err := sb.loadDvReaders()
if err != nil {
return nil, err
}
return sb, nil
}

67
vendor/github.com/blevesearch/zap/v14/chunk.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright (c) 2019 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"fmt"
)
// LegacyChunkMode was the original chunk mode (always chunk size 1024)
// this mode is still used for chunking doc values.
var LegacyChunkMode uint32 = 1024
// DefaultChunkMode is the most recent improvement to chunking and should
// be used by default.
var DefaultChunkMode uint32 = 1026
func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, error) {
switch {
// any chunkMode <= 1024 will always chunk with chunkSize=chunkMode
case chunkMode <= 1024:
// legacy chunk size
return uint64(chunkMode), nil
case chunkMode == 1025:
// attempt at simple improvement
// theory - the point of chunking is to put a bound on the maximum number of
// calls to Next() needed to find a random document. ie, you should be able
// to do one jump to the correct chunk, and then walk through at most
// chunk-size items
// previously 1024 was chosen as the chunk size, but this is particularly
// wasteful for low cardinality terms. the observation is that if there
// are less than 1024 items, why not put them all in one chunk,
// this way you'll still achieve the same goal of visiting at most
// chunk-size items.
// no attempt is made to tweak any other case
if cardinality <= 1024 {
return maxDocs, nil
}
return 1024, nil
case chunkMode == 1026:
// improve upon the ideas tested in chunkMode 1025
// the observation that the fewest number of dense chunks is the most
// desirable layout, given the built-in assumptions of chunking
// (that we want to put an upper-bound on the number of items you must
// walk over without skipping, currently tuned to 1024)
//
// 1. compute the number of chunks needed (max 1024/chunk)
// 2. convert to chunkSize, dividing into maxDocs
numChunks := (cardinality / 1024) + 1
chunkSize := maxDocs / numChunks
return chunkSize, nil
}
return 0, fmt.Errorf("unknown chunk mode %d", chunkMode)
}

243
vendor/github.com/blevesearch/zap/v14/contentcoder.go generated vendored Normal file
View File

@ -0,0 +1,243 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"encoding/binary"
"io"
"reflect"
"github.com/golang/snappy"
)
var reflectStaticSizeMetaData int
func init() {
var md MetaData
reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size())
}
var termSeparator byte = 0xff
var termSeparatorSplitSlice = []byte{termSeparator}
type chunkedContentCoder struct {
final []byte
chunkSize uint64
currChunk uint64
chunkLens []uint64
w io.Writer
progressiveWrite bool
chunkMetaBuf bytes.Buffer
chunkBuf bytes.Buffer
chunkMeta []MetaData
compressed []byte // temp buf for snappy compression
}
// MetaData represents the data information inside a
// chunk.
type MetaData struct {
DocNum uint64 // docNum of the data inside the chunk
DocDvOffset uint64 // offset of data inside the chunk for the given docid
}
// newChunkedContentCoder returns a new chunk content coder which
// packs data into chunks based on the provided chunkSize
func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64,
w io.Writer, progressiveWrite bool) *chunkedContentCoder {
total := maxDocNum/chunkSize + 1
rv := &chunkedContentCoder{
chunkSize: chunkSize,
chunkLens: make([]uint64, total),
chunkMeta: make([]MetaData, 0, total),
w: w,
progressiveWrite: progressiveWrite,
}
return rv
}
// Reset lets you reuse this chunked content coder. Buffers are reset
// and re used. You cannot change the chunk size.
func (c *chunkedContentCoder) Reset() {
c.currChunk = 0
c.final = c.final[:0]
c.chunkBuf.Reset()
c.chunkMetaBuf.Reset()
for i := range c.chunkLens {
c.chunkLens[i] = 0
}
c.chunkMeta = c.chunkMeta[:0]
}
func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) {
total := int(maxDocNum/chunkSize + 1)
c.chunkSize = chunkSize
if cap(c.chunkLens) < total {
c.chunkLens = make([]uint64, total)
} else {
c.chunkLens = c.chunkLens[:total]
}
if cap(c.chunkMeta) < total {
c.chunkMeta = make([]MetaData, 0, total)
}
}
// Close indicates you are done calling Add() this allows
// the final chunk to be encoded.
func (c *chunkedContentCoder) Close() error {
return c.flushContents()
}
func (c *chunkedContentCoder) flushContents() error {
// flush the contents, with meta information at first
buf := make([]byte, binary.MaxVarintLen64)
n := binary.PutUvarint(buf, uint64(len(c.chunkMeta)))
_, err := c.chunkMetaBuf.Write(buf[:n])
if err != nil {
return err
}
// write out the metaData slice
for _, meta := range c.chunkMeta {
_, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset)
if err != nil {
return err
}
}
// write the metadata to final data
metaData := c.chunkMetaBuf.Bytes()
c.final = append(c.final, c.chunkMetaBuf.Bytes()...)
// write the compressed data to the final data
c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes())
c.final = append(c.final, c.compressed...)
c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData))
if c.progressiveWrite {
_, err := c.w.Write(c.final)
if err != nil {
return err
}
c.final = c.final[:0]
}
return nil
}
// Add encodes the provided byte slice into the correct chunk for the provided
// doc num. You MUST call Add() with increasing docNums.
func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error {
chunk := docNum / c.chunkSize
if chunk != c.currChunk {
// flush out the previous chunk details
err := c.flushContents()
if err != nil {
return err
}
// clearing the chunk specific meta for next chunk
c.chunkBuf.Reset()
c.chunkMetaBuf.Reset()
c.chunkMeta = c.chunkMeta[:0]
c.currChunk = chunk
}
// get the starting offset for this doc
dvOffset := c.chunkBuf.Len()
dvSize, err := c.chunkBuf.Write(vals)
if err != nil {
return err
}
c.chunkMeta = append(c.chunkMeta, MetaData{
DocNum: docNum,
DocDvOffset: uint64(dvOffset + dvSize),
})
return nil
}
// Write commits all the encoded chunked contents to the provided writer.
//
// | ..... data ..... | chunk offsets (varints)
// | position of chunk offsets (uint64) | number of offsets (uint64) |
//
func (c *chunkedContentCoder) Write() (int, error) {
var tw int
if c.final != nil {
// write out the data section first
nw, err := c.w.Write(c.final)
tw += nw
if err != nil {
return tw, err
}
}
chunkOffsetsStart := uint64(tw)
if cap(c.final) < binary.MaxVarintLen64 {
c.final = make([]byte, binary.MaxVarintLen64)
} else {
c.final = c.final[0:binary.MaxVarintLen64]
}
chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens)
// write out the chunk offsets
for _, chunkOffset := range chunkOffsets {
n := binary.PutUvarint(c.final, chunkOffset)
nw, err := c.w.Write(c.final[:n])
tw += nw
if err != nil {
return tw, err
}
}
chunkOffsetsLen := uint64(tw) - chunkOffsetsStart
c.final = c.final[0:8]
// write out the length of chunk offsets
binary.BigEndian.PutUint64(c.final, chunkOffsetsLen)
nw, err := c.w.Write(c.final)
tw += nw
if err != nil {
return tw, err
}
// write out the number of chunks
binary.BigEndian.PutUint64(c.final, uint64(len(c.chunkLens)))
nw, err = c.w.Write(c.final)
tw += nw
if err != nil {
return tw, err
}
c.final = c.final[:0]
return tw, nil
}
// ReadDocValueBoundary elicits the start, end offsets from a
// metaData header slice
func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) {
var start uint64
if chunk > 0 {
start = metaHeaders[chunk-1].DocDvOffset
}
return start, metaHeaders[chunk].DocDvOffset
}

61
vendor/github.com/blevesearch/zap/v14/count.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"hash/crc32"
"io"
"github.com/blevesearch/bleve/index/scorch/segment"
)
// CountHashWriter is a wrapper around a Writer which counts the number of
// bytes which have been written and computes a crc32 hash
type CountHashWriter struct {
w io.Writer
crc uint32
n int
s segment.StatsReporter
}
// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer
func NewCountHashWriter(w io.Writer) *CountHashWriter {
return &CountHashWriter{w: w}
}
func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter {
return &CountHashWriter{w: w, s: s}
}
// Write writes the provided bytes to the wrapped writer and counts the bytes
func (c *CountHashWriter) Write(b []byte) (int, error) {
n, err := c.w.Write(b)
c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n])
c.n += n
if c.s != nil {
c.s.ReportBytesWritten(uint64(n))
}
return n, err
}
// Count returns the number of bytes written
func (c *CountHashWriter) Count() int {
return c.n
}
// Sum32 returns the CRC-32 hash of the content written to this writer
func (c *CountHashWriter) Sum32() uint32 {
return c.crc
}

263
vendor/github.com/blevesearch/zap/v14/dict.go generated vendored Normal file
View File

@ -0,0 +1,263 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"fmt"
"github.com/RoaringBitmap/roaring"
"github.com/blevesearch/bleve/index"
"github.com/blevesearch/bleve/index/scorch/segment"
"github.com/couchbase/vellum"
)
// Dictionary is the zap representation of the term dictionary
type Dictionary struct {
sb *SegmentBase
field string
fieldID uint16
fst *vellum.FST
fstReader *vellum.Reader
}
// PostingsList returns the postings list for the specified term
func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap,
prealloc segment.PostingsList) (segment.PostingsList, error) {
var preallocPL *PostingsList
pl, ok := prealloc.(*PostingsList)
if ok && pl != nil {
preallocPL = pl
}
return d.postingsList(term, except, preallocPL)
}
func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) {
if d.fstReader == nil {
if rv == nil || rv == emptyPostingsList {
return emptyPostingsList, nil
}
return d.postingsListInit(rv, except), nil
}
postingsOffset, exists, err := d.fstReader.Get(term)
if err != nil {
return nil, fmt.Errorf("vellum err: %v", err)
}
if !exists {
if rv == nil || rv == emptyPostingsList {
return emptyPostingsList, nil
}
return d.postingsListInit(rv, except), nil
}
return d.postingsListFromOffset(postingsOffset, except, rv)
}
func (d *Dictionary) postingsListFromOffset(postingsOffset uint64, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) {
rv = d.postingsListInit(rv, except)
err := rv.read(postingsOffset, d)
if err != nil {
return nil, err
}
return rv, nil
}
func (d *Dictionary) postingsListInit(rv *PostingsList, except *roaring.Bitmap) *PostingsList {
if rv == nil || rv == emptyPostingsList {
rv = &PostingsList{}
} else {
postings := rv.postings
if postings != nil {
postings.Clear()
}
*rv = PostingsList{} // clear the struct
rv.postings = postings
}
rv.sb = d.sb
rv.except = except
return rv
}
func (d *Dictionary) Contains(key []byte) (bool, error) {
return d.fst.Contains(key)
}
// Iterator returns an iterator for this dictionary
func (d *Dictionary) Iterator() segment.DictionaryIterator {
rv := &DictionaryIterator{
d: d,
}
if d.fst != nil {
itr, err := d.fst.Iterator(nil, nil)
if err == nil {
rv.itr = itr
} else if err != vellum.ErrIteratorDone {
rv.err = err
}
}
return rv
}
// PrefixIterator returns an iterator which only visits terms having the
// the specified prefix
func (d *Dictionary) PrefixIterator(prefix string) segment.DictionaryIterator {
rv := &DictionaryIterator{
d: d,
}
kBeg := []byte(prefix)
kEnd := segment.IncrementBytes(kBeg)
if d.fst != nil {
itr, err := d.fst.Iterator(kBeg, kEnd)
if err == nil {
rv.itr = itr
} else if err != vellum.ErrIteratorDone {
rv.err = err
}
}
return rv
}
// RangeIterator returns an iterator which only visits terms between the
// start and end terms. NOTE: bleve.index API specifies the end is inclusive.
func (d *Dictionary) RangeIterator(start, end string) segment.DictionaryIterator {
rv := &DictionaryIterator{
d: d,
}
// need to increment the end position to be inclusive
var endBytes []byte
if len(end) > 0 {
endBytes = []byte(end)
if endBytes[len(endBytes)-1] < 0xff {
endBytes[len(endBytes)-1]++
} else {
endBytes = append(endBytes, 0xff)
}
}
if d.fst != nil {
itr, err := d.fst.Iterator([]byte(start), endBytes)
if err == nil {
rv.itr = itr
} else if err != vellum.ErrIteratorDone {
rv.err = err
}
}
return rv
}
// AutomatonIterator returns an iterator which only visits terms
// having the the vellum automaton and start/end key range
func (d *Dictionary) AutomatonIterator(a vellum.Automaton,
startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator {
rv := &DictionaryIterator{
d: d,
}
if d.fst != nil {
itr, err := d.fst.Search(a, startKeyInclusive, endKeyExclusive)
if err == nil {
rv.itr = itr
} else if err != vellum.ErrIteratorDone {
rv.err = err
}
}
return rv
}
func (d *Dictionary) OnlyIterator(onlyTerms [][]byte,
includeCount bool) segment.DictionaryIterator {
rv := &DictionaryIterator{
d: d,
omitCount: !includeCount,
}
var buf bytes.Buffer
builder, err := vellum.New(&buf, nil)
if err != nil {
rv.err = err
return rv
}
for _, term := range onlyTerms {
err = builder.Insert(term, 0)
if err != nil {
rv.err = err
return rv
}
}
err = builder.Close()
if err != nil {
rv.err = err
return rv
}
onlyFST, err := vellum.Load(buf.Bytes())
if err != nil {
rv.err = err
return rv
}
itr, err := d.fst.Search(onlyFST, nil, nil)
if err == nil {
rv.itr = itr
} else if err != vellum.ErrIteratorDone {
rv.err = err
}
return rv
}
// DictionaryIterator is an iterator for term dictionary
type DictionaryIterator struct {
d *Dictionary
itr vellum.Iterator
err error
tmp PostingsList
entry index.DictEntry
omitCount bool
}
// Next returns the next entry in the dictionary
func (i *DictionaryIterator) Next() (*index.DictEntry, error) {
if i.err != nil && i.err != vellum.ErrIteratorDone {
return nil, i.err
} else if i.itr == nil || i.err == vellum.ErrIteratorDone {
return nil, nil
}
term, postingsOffset := i.itr.Current()
i.entry.Term = string(term)
if !i.omitCount {
i.err = i.tmp.read(postingsOffset, i.d)
if i.err != nil {
return nil, i.err
}
i.entry.Count = i.tmp.Count()
}
i.err = i.itr.Next()
return &i.entry, nil
}

312
vendor/github.com/blevesearch/zap/v14/docvalues.go generated vendored Normal file
View File

@ -0,0 +1,312 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"reflect"
"sort"
"github.com/blevesearch/bleve/index"
"github.com/blevesearch/bleve/index/scorch/segment"
"github.com/blevesearch/bleve/size"
"github.com/golang/snappy"
)
var reflectStaticSizedocValueReader int
func init() {
var dvi docValueReader
reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size())
}
type docNumTermsVisitor func(docNum uint64, terms []byte) error
type docVisitState struct {
dvrs map[uint16]*docValueReader
segment *SegmentBase
}
type docValueReader struct {
field string
curChunkNum uint64
chunkOffsets []uint64
dvDataLoc uint64
curChunkHeader []MetaData
curChunkData []byte // compressed data cache
uncompressed []byte // temp buf for snappy decompression
}
func (di *docValueReader) size() int {
return reflectStaticSizedocValueReader + size.SizeOfPtr +
len(di.field) +
len(di.chunkOffsets)*size.SizeOfUint64 +
len(di.curChunkHeader)*reflectStaticSizeMetaData +
len(di.curChunkData)
}
func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader {
if rv == nil {
rv = &docValueReader{}
}
rv.field = di.field
rv.curChunkNum = math.MaxUint64
rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable
rv.dvDataLoc = di.dvDataLoc
rv.curChunkHeader = rv.curChunkHeader[:0]
rv.curChunkData = nil
rv.uncompressed = rv.uncompressed[:0]
return rv
}
func (di *docValueReader) curChunkNumber() uint64 {
return di.curChunkNum
}
func (s *SegmentBase) loadFieldDocValueReader(field string,
fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) {
// get the docValue offset for the given fields
if fieldDvLocStart == fieldNotUninverted {
// no docValues found, nothing to do
return nil, nil
}
// read the number of chunks, and chunk offsets position
var numChunks, chunkOffsetsPosition uint64
if fieldDvLocEnd-fieldDvLocStart > 16 {
numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd])
// read the length of chunk offsets
chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8])
// acquire position of chunk offsets
chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen
} else {
return nil, fmt.Errorf("loadFieldDocValueReader: fieldDvLoc too small: %d-%d", fieldDvLocEnd, fieldDvLocStart)
}
fdvIter := &docValueReader{
curChunkNum: math.MaxUint64,
field: field,
chunkOffsets: make([]uint64, int(numChunks)),
}
// read the chunk offsets
var offset uint64
for i := 0; i < int(numChunks); i++ {
loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64])
if read <= 0 {
return nil, fmt.Errorf("corrupted chunk offset during segment load")
}
fdvIter.chunkOffsets[i] = loc
offset += uint64(read)
}
// set the data offset
fdvIter.dvDataLoc = fieldDvLocStart
return fdvIter, nil
}
func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error {
// advance to the chunk where the docValues
// reside for the given docNum
destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc
start, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets)
if start >= end {
di.curChunkHeader = di.curChunkHeader[:0]
di.curChunkData = nil
di.curChunkNum = chunkNumber
di.uncompressed = di.uncompressed[:0]
return nil
}
destChunkDataLoc += start
curChunkEnd += end
// read the number of docs reside in the chunk
numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64])
if read <= 0 {
return fmt.Errorf("failed to read the chunk")
}
chunkMetaLoc := destChunkDataLoc + uint64(read)
offset := uint64(0)
if cap(di.curChunkHeader) < int(numDocs) {
di.curChunkHeader = make([]MetaData, int(numDocs))
} else {
di.curChunkHeader = di.curChunkHeader[:int(numDocs)]
}
for i := 0; i < int(numDocs); i++ {
di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])
offset += uint64(read)
di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])
offset += uint64(read)
}
compressedDataLoc := chunkMetaLoc + offset
dataLength := curChunkEnd - compressedDataLoc
di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength]
di.curChunkNum = chunkNumber
di.uncompressed = di.uncompressed[:0]
return nil
}
func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error {
for i := 0; i < len(di.chunkOffsets); i++ {
err := di.loadDvChunk(uint64(i), s)
if err != nil {
return err
}
if di.curChunkData == nil || len(di.curChunkHeader) == 0 {
continue
}
// uncompress the already loaded data
uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)
if err != nil {
return err
}
di.uncompressed = uncompressed
start := uint64(0)
for _, entry := range di.curChunkHeader {
err = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset])
if err != nil {
return err
}
start = entry.DocDvOffset
}
}
return nil
}
func (di *docValueReader) visitDocValues(docNum uint64,
visitor index.DocumentFieldTermVisitor) error {
// binary search the term locations for the docNum
start, end := di.getDocValueLocs(docNum)
if start == math.MaxUint64 || end == math.MaxUint64 || start == end {
return nil
}
var uncompressed []byte
var err error
// use the uncompressed copy if available
if len(di.uncompressed) > 0 {
uncompressed = di.uncompressed
} else {
// uncompress the already loaded data
uncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)
if err != nil {
return err
}
di.uncompressed = uncompressed
}
// pick the terms for the given docNum
uncompressed = uncompressed[start:end]
for {
i := bytes.Index(uncompressed, termSeparatorSplitSlice)
if i < 0 {
break
}
visitor(di.field, uncompressed[0:i])
uncompressed = uncompressed[i+1:]
}
return nil
}
func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) {
i := sort.Search(len(di.curChunkHeader), func(i int) bool {
return di.curChunkHeader[i].DocNum >= docNum
})
if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum {
return ReadDocValueBoundary(i, di.curChunkHeader)
}
return math.MaxUint64, math.MaxUint64
}
// VisitDocumentFieldTerms is an implementation of the
// DocumentFieldTermVisitable interface
func (s *SegmentBase) VisitDocumentFieldTerms(localDocNum uint64, fields []string,
visitor index.DocumentFieldTermVisitor, dvsIn segment.DocVisitState) (
segment.DocVisitState, error) {
dvs, ok := dvsIn.(*docVisitState)
if !ok || dvs == nil {
dvs = &docVisitState{}
} else {
if dvs.segment != s {
dvs.segment = s
dvs.dvrs = nil
}
}
var fieldIDPlus1 uint16
if dvs.dvrs == nil {
dvs.dvrs = make(map[uint16]*docValueReader, len(fields))
for _, field := range fields {
if fieldIDPlus1, ok = s.fieldsMap[field]; !ok {
continue
}
fieldID := fieldIDPlus1 - 1
if dvIter, exists := s.fieldDvReaders[fieldID]; exists &&
dvIter != nil {
dvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID])
}
}
}
// find the chunkNumber where the docValues are stored
// NOTE: doc values continue to use legacy chunk mode
chunkFactor, err := getChunkSize(LegacyChunkMode, 0, 0)
if err != nil {
return nil, err
}
docInChunk := localDocNum / chunkFactor
var dvr *docValueReader
for _, field := range fields {
if fieldIDPlus1, ok = s.fieldsMap[field]; !ok {
continue
}
fieldID := fieldIDPlus1 - 1
if dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil {
// check if the chunk is already loaded
if docInChunk != dvr.curChunkNumber() {
err := dvr.loadDvChunk(docInChunk, s)
if err != nil {
return dvs, err
}
}
_ = dvr.visitDocValues(localDocNum, visitor)
}
}
return dvs, nil
}
// VisitableDocValueFields returns the list of fields with
// persisted doc value terms ready to be visitable using the
// VisitDocumentFieldTerms method.
func (s *SegmentBase) VisitableDocValueFields() ([]string, error) {
return s.fieldDvNames, nil
}

138
vendor/github.com/blevesearch/zap/v14/enumerator.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
// Copyright (c) 2018 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"github.com/couchbase/vellum"
)
// enumerator provides an ordered traversal of multiple vellum
// iterators. Like JOIN of iterators, the enumerator produces a
// sequence of (key, iteratorIndex, value) tuples, sorted by key ASC,
// then iteratorIndex ASC, where the same key might be seen or
// repeated across multiple child iterators.
type enumerator struct {
itrs []vellum.Iterator
currKs [][]byte
currVs []uint64
lowK []byte
lowIdxs []int
lowCurr int
}
// newEnumerator returns a new enumerator over the vellum Iterators
func newEnumerator(itrs []vellum.Iterator) (*enumerator, error) {
rv := &enumerator{
itrs: itrs,
currKs: make([][]byte, len(itrs)),
currVs: make([]uint64, len(itrs)),
lowIdxs: make([]int, 0, len(itrs)),
}
for i, itr := range rv.itrs {
rv.currKs[i], rv.currVs[i] = itr.Current()
}
rv.updateMatches(false)
if rv.lowK == nil && len(rv.lowIdxs) == 0 {
return rv, vellum.ErrIteratorDone
}
return rv, nil
}
// updateMatches maintains the low key matches based on the currKs
func (m *enumerator) updateMatches(skipEmptyKey bool) {
m.lowK = nil
m.lowIdxs = m.lowIdxs[:0]
m.lowCurr = 0
for i, key := range m.currKs {
if (key == nil && m.currVs[i] == 0) || // in case of empty iterator
(len(key) == 0 && skipEmptyKey) { // skip empty keys
continue
}
cmp := bytes.Compare(key, m.lowK)
if cmp < 0 || len(m.lowIdxs) == 0 {
// reached a new low
m.lowK = key
m.lowIdxs = m.lowIdxs[:0]
m.lowIdxs = append(m.lowIdxs, i)
} else if cmp == 0 {
m.lowIdxs = append(m.lowIdxs, i)
}
}
}
// Current returns the enumerator's current key, iterator-index, and
// value. If the enumerator is not pointing at a valid value (because
// Next returned an error previously), Current will return nil,0,0.
func (m *enumerator) Current() ([]byte, int, uint64) {
var i int
var v uint64
if m.lowCurr < len(m.lowIdxs) {
i = m.lowIdxs[m.lowCurr]
v = m.currVs[i]
}
return m.lowK, i, v
}
// GetLowIdxsAndValues will return all of the iterator indices
// which point to the current key, and their corresponding
// values. This can be used by advanced caller which may need
// to peek into these other sets of data before processing.
func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) {
values := make([]uint64, 0, len(m.lowIdxs))
for _, idx := range m.lowIdxs {
values = append(values, m.currVs[idx])
}
return m.lowIdxs, values
}
// Next advances the enumerator to the next key/iterator/value result,
// else vellum.ErrIteratorDone is returned.
func (m *enumerator) Next() error {
m.lowCurr += 1
if m.lowCurr >= len(m.lowIdxs) {
// move all the current low iterators forwards
for _, vi := range m.lowIdxs {
err := m.itrs[vi].Next()
if err != nil && err != vellum.ErrIteratorDone {
return err
}
m.currKs[vi], m.currVs[vi] = m.itrs[vi].Current()
}
// can skip any empty keys encountered at this point
m.updateMatches(true)
}
if m.lowK == nil && len(m.lowIdxs) == 0 {
return vellum.ErrIteratorDone
}
return nil
}
// Close all the underlying Iterators. The first error, if any, will
// be returned.
func (m *enumerator) Close() error {
var rv error
for _, itr := range m.itrs {
err := itr.Close()
if rv == nil {
rv = err
}
}
return rv
}

12
vendor/github.com/blevesearch/zap/v14/go.mod generated vendored Normal file
View File

@ -0,0 +1,12 @@
module github.com/blevesearch/zap/v14
go 1.12
require (
github.com/RoaringBitmap/roaring v0.4.21
github.com/blevesearch/bleve v1.0.9
github.com/blevesearch/mmap-go v1.0.2
github.com/couchbase/vellum v1.0.1
github.com/golang/snappy v0.0.1
github.com/spf13/cobra v0.0.5
)

111
vendor/github.com/blevesearch/zap/v14/intDecoder.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// Copyright (c) 2019 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"encoding/binary"
"fmt"
"github.com/blevesearch/bleve/index/scorch/segment"
)
type chunkedIntDecoder struct {
startOffset uint64
dataStartOffset uint64
chunkOffsets []uint64
curChunkBytes []byte
data []byte
r *segment.MemUvarintReader
}
func newChunkedIntDecoder(buf []byte, offset uint64) *chunkedIntDecoder {
rv := &chunkedIntDecoder{startOffset: offset, data: buf}
var n, numChunks uint64
var read int
if offset == termNotEncoded {
numChunks = 0
} else {
numChunks, read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64])
}
n += uint64(read)
if cap(rv.chunkOffsets) >= int(numChunks) {
rv.chunkOffsets = rv.chunkOffsets[:int(numChunks)]
} else {
rv.chunkOffsets = make([]uint64, int(numChunks))
}
for i := 0; i < int(numChunks); i++ {
rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64])
n += uint64(read)
}
rv.dataStartOffset = offset + n
return rv
}
func (d *chunkedIntDecoder) loadChunk(chunk int) error {
if d.startOffset == termNotEncoded {
d.r = segment.NewMemUvarintReader([]byte(nil))
return nil
}
if chunk >= len(d.chunkOffsets) {
return fmt.Errorf("tried to load freq chunk that doesn't exist %d/(%d)",
chunk, len(d.chunkOffsets))
}
end, start := d.dataStartOffset, d.dataStartOffset
s, e := readChunkBoundary(chunk, d.chunkOffsets)
start += s
end += e
d.curChunkBytes = d.data[start:end]
if d.r == nil {
d.r = segment.NewMemUvarintReader(d.curChunkBytes)
} else {
d.r.Reset(d.curChunkBytes)
}
return nil
}
func (d *chunkedIntDecoder) reset() {
d.startOffset = 0
d.dataStartOffset = 0
d.chunkOffsets = d.chunkOffsets[:0]
d.curChunkBytes = d.curChunkBytes[:0]
d.data = d.data[:0]
if d.r != nil {
d.r.Reset([]byte(nil))
}
}
func (d *chunkedIntDecoder) isNil() bool {
return d.curChunkBytes == nil
}
func (d *chunkedIntDecoder) readUvarint() (uint64, error) {
return d.r.ReadUvarint()
}
func (d *chunkedIntDecoder) SkipUvarint() {
d.r.SkipUvarint()
}
func (d *chunkedIntDecoder) SkipBytes(count int) {
d.r.SkipBytes(count)
}
func (d *chunkedIntDecoder) Len() int {
return d.r.Len()
}

206
vendor/github.com/blevesearch/zap/v14/intcoder.go generated vendored Normal file
View File

@ -0,0 +1,206 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"encoding/binary"
"io"
)
// We can safely use 0 to represent termNotEncoded since 0
// could never be a valid address for term location information.
// (stored field index is always non-empty and earlier in the
// file)
const termNotEncoded = 0
type chunkedIntCoder struct {
final []byte
chunkSize uint64
chunkBuf bytes.Buffer
chunkLens []uint64
currChunk uint64
buf []byte
}
// newChunkedIntCoder returns a new chunk int coder which packs data into
// chunks based on the provided chunkSize and supports up to the specified
// maxDocNum
func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder {
total := maxDocNum/chunkSize + 1
rv := &chunkedIntCoder{
chunkSize: chunkSize,
chunkLens: make([]uint64, total),
final: make([]byte, 0, 64),
}
return rv
}
// Reset lets you reuse this chunked int coder. buffers are reset and reused
// from previous use. you cannot change the chunk size or max doc num.
func (c *chunkedIntCoder) Reset() {
c.final = c.final[:0]
c.chunkBuf.Reset()
c.currChunk = 0
for i := range c.chunkLens {
c.chunkLens[i] = 0
}
}
// SetChunkSize changes the chunk size. It is only valid to do so
// with a new chunkedIntCoder, or immediately after calling Reset()
func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) {
total := int(maxDocNum/chunkSize + 1)
c.chunkSize = chunkSize
if cap(c.chunkLens) < total {
c.chunkLens = make([]uint64, total)
} else {
c.chunkLens = c.chunkLens[:total]
}
}
// Add encodes the provided integers into the correct chunk for the provided
// doc num. You MUST call Add() with increasing docNums.
func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error {
chunk := docNum / c.chunkSize
if chunk != c.currChunk {
// starting a new chunk
c.Close()
c.chunkBuf.Reset()
c.currChunk = chunk
}
if len(c.buf) < binary.MaxVarintLen64 {
c.buf = make([]byte, binary.MaxVarintLen64)
}
for _, val := range vals {
wb := binary.PutUvarint(c.buf, val)
_, err := c.chunkBuf.Write(c.buf[:wb])
if err != nil {
return err
}
}
return nil
}
func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error {
chunk := docNum / c.chunkSize
if chunk != c.currChunk {
// starting a new chunk
c.Close()
c.chunkBuf.Reset()
c.currChunk = chunk
}
_, err := c.chunkBuf.Write(buf)
return err
}
// Close indicates you are done calling Add() this allows the final chunk
// to be encoded.
func (c *chunkedIntCoder) Close() {
encodingBytes := c.chunkBuf.Bytes()
c.chunkLens[c.currChunk] = uint64(len(encodingBytes))
c.final = append(c.final, encodingBytes...)
c.currChunk = uint64(cap(c.chunkLens)) // sentinel to detect double close
}
// Write commits all the encoded chunked integers to the provided writer.
func (c *chunkedIntCoder) Write(w io.Writer) (int, error) {
bufNeeded := binary.MaxVarintLen64 * (1 + len(c.chunkLens))
if len(c.buf) < bufNeeded {
c.buf = make([]byte, bufNeeded)
}
buf := c.buf
// convert the chunk lengths into chunk offsets
chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens)
// write out the number of chunks & each chunk offsets
n := binary.PutUvarint(buf, uint64(len(chunkOffsets)))
for _, chunkOffset := range chunkOffsets {
n += binary.PutUvarint(buf[n:], chunkOffset)
}
tw, err := w.Write(buf[:n])
if err != nil {
return tw, err
}
// write out the data
nw, err := w.Write(c.final)
tw += nw
if err != nil {
return tw, err
}
return tw, nil
}
// writeAt commits all the encoded chunked integers to the provided writer
// and returns the starting offset, total bytes written and an error
func (c *chunkedIntCoder) writeAt(w io.Writer) (uint64, int, error) {
startOffset := uint64(termNotEncoded)
if len(c.final) <= 0 {
return startOffset, 0, nil
}
if chw := w.(*CountHashWriter); chw != nil {
startOffset = uint64(chw.Count())
}
tw, err := c.Write(w)
return startOffset, tw, err
}
func (c *chunkedIntCoder) FinalSize() int {
return len(c.final)
}
// modifyLengthsToEndOffsets converts the chunk length array
// to a chunk offset array. The readChunkBoundary
// will figure out the start and end of every chunk from
// these offsets. Starting offset of i'th index is stored
// in i-1'th position except for 0'th index and ending offset
// is stored at i'th index position.
// For 0'th element, starting position is always zero.
// eg:
// Lens -> 5 5 5 5 => 5 10 15 20
// Lens -> 0 5 0 5 => 0 5 5 10
// Lens -> 0 0 0 5 => 0 0 0 5
// Lens -> 5 0 0 0 => 5 5 5 5
// Lens -> 0 5 0 0 => 0 5 5 5
// Lens -> 0 0 5 0 => 0 0 5 5
func modifyLengthsToEndOffsets(lengths []uint64) []uint64 {
var runningOffset uint64
var index, i int
for i = 1; i <= len(lengths); i++ {
runningOffset += lengths[i-1]
lengths[index] = runningOffset
index++
}
return lengths
}
func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) {
var start uint64
if chunk > 0 {
start = offsets[chunk-1]
}
return start, offsets[chunk]
}

847
vendor/github.com/blevesearch/zap/v14/merge.go generated vendored Normal file
View File

@ -0,0 +1,847 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"math"
"os"
"sort"
"github.com/RoaringBitmap/roaring"
seg "github.com/blevesearch/bleve/index/scorch/segment"
"github.com/couchbase/vellum"
"github.com/golang/snappy"
)
var DefaultFileMergerBufferSize = 1024 * 1024
const docDropped = math.MaxUint64 // sentinel docNum to represent a deleted doc
// Merge takes a slice of segments and bit masks describing which
// documents may be dropped, and creates a new segment containing the
// remaining data. This new segment is built at the specified path.
func (*ZapPlugin) Merge(segments []seg.Segment, drops []*roaring.Bitmap, path string,
closeCh chan struct{}, s seg.StatsReporter) (
[][]uint64, uint64, error) {
segmentBases := make([]*SegmentBase, len(segments))
for segmenti, segment := range segments {
switch segmentx := segment.(type) {
case *Segment:
segmentBases[segmenti] = &segmentx.SegmentBase
case *SegmentBase:
segmentBases[segmenti] = segmentx
default:
panic(fmt.Sprintf("oops, unexpected segment type: %T", segment))
}
}
return mergeSegmentBases(segmentBases, drops, path, DefaultChunkMode, closeCh, s)
}
func mergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, path string,
chunkMode uint32, closeCh chan struct{}, s seg.StatsReporter) (
[][]uint64, uint64, error) {
flag := os.O_RDWR | os.O_CREATE
f, err := os.OpenFile(path, flag, 0600)
if err != nil {
return nil, 0, err
}
cleanup := func() {
_ = f.Close()
_ = os.Remove(path)
}
// buffer the output
br := bufio.NewWriterSize(f, DefaultFileMergerBufferSize)
// wrap it for counting (tracking offsets)
cr := NewCountHashWriterWithStatsReporter(br, s)
newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, _, _, _, err :=
MergeToWriter(segmentBases, drops, chunkMode, cr, closeCh)
if err != nil {
cleanup()
return nil, 0, err
}
err = persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset,
docValueOffset, chunkMode, cr.Sum32(), cr)
if err != nil {
cleanup()
return nil, 0, err
}
err = br.Flush()
if err != nil {
cleanup()
return nil, 0, err
}
err = f.Sync()
if err != nil {
cleanup()
return nil, 0, err
}
err = f.Close()
if err != nil {
cleanup()
return nil, 0, err
}
return newDocNums, uint64(cr.Count()), nil
}
func MergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap,
chunkMode uint32, cr *CountHashWriter, closeCh chan struct{}) (
newDocNums [][]uint64,
numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64,
dictLocs []uint64, fieldsInv []string, fieldsMap map[string]uint16,
err error) {
docValueOffset = uint64(fieldNotUninverted)
var fieldsSame bool
fieldsSame, fieldsInv = mergeFields(segments)
fieldsMap = mapFields(fieldsInv)
numDocs = computeNewDocCount(segments, drops)
if isClosed(closeCh) {
return nil, 0, 0, 0, 0, nil, nil, nil, seg.ErrClosed
}
if numDocs > 0 {
storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops,
fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh)
if err != nil {
return nil, 0, 0, 0, 0, nil, nil, nil, err
}
dictLocs, docValueOffset, err = persistMergedRest(segments, drops,
fieldsInv, fieldsMap, fieldsSame,
newDocNums, numDocs, chunkMode, cr, closeCh)
if err != nil {
return nil, 0, 0, 0, 0, nil, nil, nil, err
}
} else {
dictLocs = make([]uint64, len(fieldsInv))
}
fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs)
if err != nil {
return nil, 0, 0, 0, 0, nil, nil, nil, err
}
return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil
}
// mapFields takes the fieldsInv list and returns a map of fieldName
// to fieldID+1
func mapFields(fields []string) map[string]uint16 {
rv := make(map[string]uint16, len(fields))
for i, fieldName := range fields {
rv[fieldName] = uint16(i) + 1
}
return rv
}
// computeNewDocCount determines how many documents will be in the newly
// merged segment when obsoleted docs are dropped
func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 {
var newDocCount uint64
for segI, segment := range segments {
newDocCount += segment.numDocs
if drops[segI] != nil {
newDocCount -= drops[segI].GetCardinality()
}
}
return newDocCount
}
func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap,
fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool,
newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32,
w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) {
var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64)
var bufLoc []uint64
var postings *PostingsList
var postItr *PostingsIterator
rv := make([]uint64, len(fieldsInv))
fieldDvLocsStart := make([]uint64, len(fieldsInv))
fieldDvLocsEnd := make([]uint64, len(fieldsInv))
// these int coders are initialized with chunk size 1024
// however this will be reset to the correct chunk size
// while processing each individual field-term section
tfEncoder := newChunkedIntCoder(1024, newSegDocCount-1)
locEncoder := newChunkedIntCoder(1024, newSegDocCount-1)
var vellumBuf bytes.Buffer
newVellum, err := vellum.New(&vellumBuf, nil)
if err != nil {
return nil, 0, err
}
newRoaring := roaring.NewBitmap()
// for each field
for fieldID, fieldName := range fieldsInv {
// collect FST iterators from all active segments for this field
var newDocNums [][]uint64
var drops []*roaring.Bitmap
var dicts []*Dictionary
var itrs []vellum.Iterator
var segmentsInFocus []*SegmentBase
for segmentI, segment := range segments {
// check for the closure in meantime
if isClosed(closeCh) {
return nil, 0, seg.ErrClosed
}
dict, err2 := segment.dictionary(fieldName)
if err2 != nil {
return nil, 0, err2
}
if dict != nil && dict.fst != nil {
itr, err2 := dict.fst.Iterator(nil, nil)
if err2 != nil && err2 != vellum.ErrIteratorDone {
return nil, 0, err2
}
if itr != nil {
newDocNums = append(newDocNums, newDocNumsIn[segmentI])
if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() {
drops = append(drops, dropsIn[segmentI])
} else {
drops = append(drops, nil)
}
dicts = append(dicts, dict)
itrs = append(itrs, itr)
segmentsInFocus = append(segmentsInFocus, segment)
}
}
}
var prevTerm []byte
newRoaring.Clear()
var lastDocNum, lastFreq, lastNorm uint64
// determines whether to use "1-hit" encoding optimization
// when a term appears in only 1 doc, with no loc info,
// has freq of 1, and the docNum fits into 31-bits
use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) {
if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 {
docNum := uint64(newRoaring.Minimum())
if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 {
return true, docNum, lastNorm
}
}
return false, 0, 0
}
finishTerm := func(term []byte) error {
tfEncoder.Close()
locEncoder.Close()
postingsOffset, err := writePostings(newRoaring,
tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64)
if err != nil {
return err
}
if postingsOffset > 0 {
err = newVellum.Insert(term, postingsOffset)
if err != nil {
return err
}
}
newRoaring.Clear()
tfEncoder.Reset()
locEncoder.Reset()
lastDocNum = 0
lastFreq = 0
lastNorm = 0
return nil
}
enumerator, err := newEnumerator(itrs)
for err == nil {
term, itrI, postingsOffset := enumerator.Current()
if !bytes.Equal(prevTerm, term) {
// check for the closure in meantime
if isClosed(closeCh) {
return nil, 0, seg.ErrClosed
}
// if the term changed, write out the info collected
// for the previous term
err = finishTerm(prevTerm)
if err != nil {
return nil, 0, err
}
}
if !bytes.Equal(prevTerm, term) || prevTerm == nil {
// compute cardinality of field-term in new seg
var newCard uint64
lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues()
for i, idx := range lowItrIdxs {
pl, err := dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil)
if err != nil {
return nil, 0, err
}
newCard += pl.Count()
}
// compute correct chunk size with this
chunkSize, err := getChunkSize(chunkMode, newCard, newSegDocCount)
if err != nil {
return nil, 0, err
}
// update encoders chunk
tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1)
locEncoder.SetChunkSize(chunkSize, newSegDocCount-1)
}
postings, err = dicts[itrI].postingsListFromOffset(
postingsOffset, drops[itrI], postings)
if err != nil {
return nil, 0, err
}
postItr = postings.iterator(true, true, true, postItr)
// can no longer optimize by copying, since chunk factor could have changed
lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs(
fieldsMap, term, postItr, newDocNums[itrI], newRoaring,
tfEncoder, locEncoder, bufLoc)
if err != nil {
return nil, 0, err
}
prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem
prevTerm = append(prevTerm, term...)
err = enumerator.Next()
}
if err != vellum.ErrIteratorDone {
return nil, 0, err
}
err = finishTerm(prevTerm)
if err != nil {
return nil, 0, err
}
dictOffset := uint64(w.Count())
err = newVellum.Close()
if err != nil {
return nil, 0, err
}
vellumData := vellumBuf.Bytes()
// write out the length of the vellum data
n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData)))
_, err = w.Write(bufMaxVarintLen64[:n])
if err != nil {
return nil, 0, err
}
// write this vellum to disk
_, err = w.Write(vellumData)
if err != nil {
return nil, 0, err
}
rv[fieldID] = dictOffset
// get the field doc value offset (start)
fieldDvLocsStart[fieldID] = uint64(w.Count())
// update the field doc values
// NOTE: doc values continue to use legacy chunk mode
chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0)
if err != nil {
return nil, 0, err
}
fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true)
fdvReadersAvailable := false
var dvIterClone *docValueReader
for segmentI, segment := range segmentsInFocus {
// check for the closure in meantime
if isClosed(closeCh) {
return nil, 0, seg.ErrClosed
}
fieldIDPlus1 := uint16(segment.fieldsMap[fieldName])
if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists &&
dvIter != nil {
fdvReadersAvailable = true
dvIterClone = dvIter.cloneInto(dvIterClone)
err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error {
if newDocNums[segmentI][docNum] == docDropped {
return nil
}
err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, 0, err
}
}
}
if fdvReadersAvailable {
err = fdvEncoder.Close()
if err != nil {
return nil, 0, err
}
// persist the doc value details for this field
_, err = fdvEncoder.Write()
if err != nil {
return nil, 0, err
}
// get the field doc value offset (end)
fieldDvLocsEnd[fieldID] = uint64(w.Count())
} else {
fieldDvLocsStart[fieldID] = fieldNotUninverted
fieldDvLocsEnd[fieldID] = fieldNotUninverted
}
// reset vellum buffer and vellum builder
vellumBuf.Reset()
err = newVellum.Reset(&vellumBuf)
if err != nil {
return nil, 0, err
}
}
fieldDvLocsOffset := uint64(w.Count())
buf := bufMaxVarintLen64
for i := 0; i < len(fieldDvLocsStart); i++ {
n := binary.PutUvarint(buf, fieldDvLocsStart[i])
_, err := w.Write(buf[:n])
if err != nil {
return nil, 0, err
}
n = binary.PutUvarint(buf, fieldDvLocsEnd[i])
_, err = w.Write(buf[:n])
if err != nil {
return nil, 0, err
}
}
return rv, fieldDvLocsOffset, nil
}
func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator,
newDocNums []uint64, newRoaring *roaring.Bitmap,
tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) (
lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) {
next, err := postItr.Next()
for next != nil && err == nil {
hitNewDocNum := newDocNums[next.Number()]
if hitNewDocNum == docDropped {
return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum")
}
newRoaring.Add(uint32(hitNewDocNum))
nextFreq := next.Frequency()
nextNorm := uint64(math.Float32bits(float32(next.Norm())))
locs := next.Locations()
err = tfEncoder.Add(hitNewDocNum,
encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm)
if err != nil {
return 0, 0, 0, nil, err
}
if len(locs) > 0 {
numBytesLocs := 0
for _, loc := range locs {
ap := loc.ArrayPositions()
numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1),
loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap)
}
err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs))
if err != nil {
return 0, 0, 0, nil, err
}
for _, loc := range locs {
ap := loc.ArrayPositions()
if cap(bufLoc) < 5+len(ap) {
bufLoc = make([]uint64, 0, 5+len(ap))
}
args := bufLoc[0:5]
args[0] = uint64(fieldsMap[loc.Field()] - 1)
args[1] = loc.Pos()
args[2] = loc.Start()
args[3] = loc.End()
args[4] = uint64(len(ap))
args = append(args, ap...)
err = locEncoder.Add(hitNewDocNum, args...)
if err != nil {
return 0, 0, 0, nil, err
}
}
}
lastDocNum = hitNewDocNum
lastFreq = nextFreq
lastNorm = nextNorm
next, err = postItr.Next()
}
return lastDocNum, lastFreq, lastNorm, bufLoc, err
}
func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder,
use1HitEncoding func(uint64) (bool, uint64, uint64),
w *CountHashWriter, bufMaxVarintLen64 []byte) (
offset uint64, err error) {
termCardinality := postings.GetCardinality()
if termCardinality <= 0 {
return 0, nil
}
if use1HitEncoding != nil {
encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality)
if encodeAs1Hit {
return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil
}
}
var tfOffset uint64
tfOffset, _, err = tfEncoder.writeAt(w)
if err != nil {
return 0, err
}
var locOffset uint64
locOffset, _, err = locEncoder.writeAt(w)
if err != nil {
return 0, err
}
postingsOffset := uint64(w.Count())
n := binary.PutUvarint(bufMaxVarintLen64, tfOffset)
_, err = w.Write(bufMaxVarintLen64[:n])
if err != nil {
return 0, err
}
n = binary.PutUvarint(bufMaxVarintLen64, locOffset)
_, err = w.Write(bufMaxVarintLen64[:n])
if err != nil {
return 0, err
}
_, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64)
if err != nil {
return 0, err
}
return postingsOffset, nil
}
type varintEncoder func(uint64) (int, error)
func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64,
w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) {
var rv [][]uint64 // The remapped or newDocNums for each segment.
var newDocNum uint64
var curr int
var data, compressed []byte
var metaBuf bytes.Buffer
varBuf := make([]byte, binary.MaxVarintLen64)
metaEncode := func(val uint64) (int, error) {
wb := binary.PutUvarint(varBuf, val)
return metaBuf.Write(varBuf[:wb])
}
vals := make([][][]byte, len(fieldsInv))
typs := make([][]byte, len(fieldsInv))
poss := make([][][]uint64, len(fieldsInv))
var posBuf []uint64
docNumOffsets := make([]uint64, newSegDocCount)
vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
defer visitDocumentCtxPool.Put(vdc)
// for each segment
for segI, segment := range segments {
// check for the closure in meantime
if isClosed(closeCh) {
return 0, nil, seg.ErrClosed
}
segNewDocNums := make([]uint64, segment.numDocs)
dropsI := drops[segI]
// optimize when the field mapping is the same across all
// segments and there are no deletions, via byte-copying
// of stored docs bytes directly to the writer
if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) {
err := segment.copyStoredDocs(newDocNum, docNumOffsets, w)
if err != nil {
return 0, nil, err
}
for i := uint64(0); i < segment.numDocs; i++ {
segNewDocNums[i] = newDocNum
newDocNum++
}
rv = append(rv, segNewDocNums)
continue
}
// for each doc num
for docNum := uint64(0); docNum < segment.numDocs; docNum++ {
// TODO: roaring's API limits docNums to 32-bits?
if dropsI != nil && dropsI.Contains(uint32(docNum)) {
segNewDocNums[docNum] = docDropped
continue
}
segNewDocNums[docNum] = newDocNum
curr = 0
metaBuf.Reset()
data = data[:0]
posTemp := posBuf
// collect all the data
for i := 0; i < len(fieldsInv); i++ {
vals[i] = vals[i][:0]
typs[i] = typs[i][:0]
poss[i] = poss[i][:0]
}
err := segment.visitDocument(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool {
fieldID := int(fieldsMap[field]) - 1
vals[fieldID] = append(vals[fieldID], value)
typs[fieldID] = append(typs[fieldID], typ)
// copy array positions to preserve them beyond the scope of this callback
var curPos []uint64
if len(pos) > 0 {
if cap(posTemp) < len(pos) {
posBuf = make([]uint64, len(pos)*len(fieldsInv))
posTemp = posBuf
}
curPos = posTemp[0:len(pos)]
copy(curPos, pos)
posTemp = posTemp[len(pos):]
}
poss[fieldID] = append(poss[fieldID], curPos)
return true
})
if err != nil {
return 0, nil, err
}
// _id field special case optimizes ExternalID() lookups
idFieldVal := vals[uint16(0)][0]
_, err = metaEncode(uint64(len(idFieldVal)))
if err != nil {
return 0, nil, err
}
// now walk the non-"_id" fields in order
for fieldID := 1; fieldID < len(fieldsInv); fieldID++ {
storedFieldValues := vals[fieldID]
stf := typs[fieldID]
spf := poss[fieldID]
var err2 error
curr, data, err2 = persistStoredFieldValues(fieldID,
storedFieldValues, stf, spf, curr, metaEncode, data)
if err2 != nil {
return 0, nil, err2
}
}
metaBytes := metaBuf.Bytes()
compressed = snappy.Encode(compressed[:cap(compressed)], data)
// record where we're about to start writing
docNumOffsets[newDocNum] = uint64(w.Count())
// write out the meta len and compressed data len
_, err = writeUvarints(w,
uint64(len(metaBytes)),
uint64(len(idFieldVal)+len(compressed)))
if err != nil {
return 0, nil, err
}
// now write the meta
_, err = w.Write(metaBytes)
if err != nil {
return 0, nil, err
}
// now write the _id field val (counted as part of the 'compressed' data)
_, err = w.Write(idFieldVal)
if err != nil {
return 0, nil, err
}
// now write the compressed data
_, err = w.Write(compressed)
if err != nil {
return 0, nil, err
}
newDocNum++
}
rv = append(rv, segNewDocNums)
}
// return value is the start of the stored index
storedIndexOffset := uint64(w.Count())
// now write out the stored doc index
for _, docNumOffset := range docNumOffsets {
err := binary.Write(w, binary.BigEndian, docNumOffset)
if err != nil {
return 0, nil, err
}
}
return storedIndexOffset, rv, nil
}
// copyStoredDocs writes out a segment's stored doc info, optimized by
// using a single Write() call for the entire set of bytes. The
// newDocNumOffsets is filled with the new offsets for each doc.
func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64,
w *CountHashWriter) error {
if s.numDocs <= 0 {
return nil
}
indexOffset0, storedOffset0, _, _, _ :=
s.getDocStoredOffsets(0) // the segment's first doc
indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN :=
s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc
storedOffset0New := uint64(w.Count())
storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN]
_, err := w.Write(storedBytes)
if err != nil {
return err
}
// remap the storedOffset's for the docs into new offsets relative
// to storedOffset0New, filling the given docNumOffsetsOut array
for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 {
storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8])
storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New
newDocNumOffsets[newDocNum] = storedOffsetNew
newDocNum += 1
}
return nil
}
// mergeFields builds a unified list of fields used across all the
// input segments, and computes whether the fields are the same across
// segments (which depends on fields to be sorted in the same way
// across segments)
func mergeFields(segments []*SegmentBase) (bool, []string) {
fieldsSame := true
var segment0Fields []string
if len(segments) > 0 {
segment0Fields = segments[0].Fields()
}
fieldsExist := map[string]struct{}{}
for _, segment := range segments {
fields := segment.Fields()
for fieldi, field := range fields {
fieldsExist[field] = struct{}{}
if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field {
fieldsSame = false
}
}
}
rv := make([]string, 0, len(fieldsExist))
// ensure _id stays first
rv = append(rv, "_id")
for k := range fieldsExist {
if k != "_id" {
rv = append(rv, k)
}
}
sort.Strings(rv[1:]) // leave _id as first
return fieldsSame, rv
}
func isClosed(closeCh chan struct{}) bool {
select {
case <-closeCh:
return true
default:
return false
}
}

860
vendor/github.com/blevesearch/zap/v14/new.go generated vendored Normal file
View File

@ -0,0 +1,860 @@
// Copyright (c) 2018 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"encoding/binary"
"math"
"sort"
"sync"
"github.com/RoaringBitmap/roaring"
"github.com/blevesearch/bleve/analysis"
"github.com/blevesearch/bleve/document"
"github.com/blevesearch/bleve/index"
"github.com/blevesearch/bleve/index/scorch/segment"
"github.com/couchbase/vellum"
"github.com/golang/snappy"
)
var NewSegmentBufferNumResultsBump int = 100
var NewSegmentBufferNumResultsFactor float64 = 1.0
var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0
// ValidateDocFields can be set by applications to perform additional checks
// on fields in a document being added to a new segment, by default it does
// nothing.
// This API is experimental and may be removed at any time.
var ValidateDocFields = func(field document.Field) error {
return nil
}
// AnalysisResultsToSegmentBase produces an in-memory zap-encoded
// SegmentBase from analysis results
func (z *ZapPlugin) New(results []*index.AnalysisResult) (
segment.Segment, uint64, error) {
return z.newWithChunkMode(results, DefaultChunkMode)
}
func (*ZapPlugin) newWithChunkMode(results []*index.AnalysisResult,
chunkMode uint32) (segment.Segment, uint64, error) {
s := interimPool.Get().(*interim)
var br bytes.Buffer
if s.lastNumDocs > 0 {
// use previous results to initialize the buf with an estimate
// size, but note that the interim instance comes from a
// global interimPool, so multiple scorch instances indexing
// different docs can lead to low quality estimates
estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) *
NewSegmentBufferNumResultsFactor)
estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) *
NewSegmentBufferAvgBytesPerDocFactor)
br.Grow(estimateAvgBytesPerDoc * estimateNumResults)
}
s.results = results
s.chunkMode = chunkMode
s.w = NewCountHashWriter(&br)
storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets,
err := s.convert()
if err != nil {
return nil, uint64(0), err
}
sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkMode,
s.FieldsMap, s.FieldsInv, uint64(len(results)),
storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets)
if err == nil && s.reset() == nil {
s.lastNumDocs = len(results)
s.lastOutSize = len(br.Bytes())
interimPool.Put(s)
}
return sb, uint64(len(br.Bytes())), err
}
var interimPool = sync.Pool{New: func() interface{} { return &interim{} }}
// interim holds temporary working data used while converting from
// analysis results to a zap-encoded segment
type interim struct {
results []*index.AnalysisResult
chunkMode uint32
w *CountHashWriter
// FieldsMap adds 1 to field id to avoid zero value issues
// name -> field id + 1
FieldsMap map[string]uint16
// FieldsInv is the inverse of FieldsMap
// field id -> name
FieldsInv []string
// Term dictionaries for each field
// field id -> term -> postings list id + 1
Dicts []map[string]uint64
// Terms for each field, where terms are sorted ascending
// field id -> []term
DictKeys [][]string
// Fields whose IncludeDocValues is true
// field id -> bool
IncludeDocValues []bool
// postings id -> bitmap of docNums
Postings []*roaring.Bitmap
// postings id -> freq/norm's, one for each docNum in postings
FreqNorms [][]interimFreqNorm
freqNormsBacking []interimFreqNorm
// postings id -> locs, one for each freq
Locs [][]interimLoc
locsBacking []interimLoc
numTermsPerPostingsList []int // key is postings list id
numLocsPerPostingsList []int // key is postings list id
builder *vellum.Builder
builderBuf bytes.Buffer
metaBuf bytes.Buffer
tmp0 []byte
tmp1 []byte
lastNumDocs int
lastOutSize int
}
func (s *interim) reset() (err error) {
s.results = nil
s.chunkMode = 0
s.w = nil
s.FieldsMap = nil
s.FieldsInv = nil
for i := range s.Dicts {
s.Dicts[i] = nil
}
s.Dicts = s.Dicts[:0]
for i := range s.DictKeys {
s.DictKeys[i] = s.DictKeys[i][:0]
}
s.DictKeys = s.DictKeys[:0]
for i := range s.IncludeDocValues {
s.IncludeDocValues[i] = false
}
s.IncludeDocValues = s.IncludeDocValues[:0]
for _, idn := range s.Postings {
idn.Clear()
}
s.Postings = s.Postings[:0]
s.FreqNorms = s.FreqNorms[:0]
for i := range s.freqNormsBacking {
s.freqNormsBacking[i] = interimFreqNorm{}
}
s.freqNormsBacking = s.freqNormsBacking[:0]
s.Locs = s.Locs[:0]
for i := range s.locsBacking {
s.locsBacking[i] = interimLoc{}
}
s.locsBacking = s.locsBacking[:0]
s.numTermsPerPostingsList = s.numTermsPerPostingsList[:0]
s.numLocsPerPostingsList = s.numLocsPerPostingsList[:0]
s.builderBuf.Reset()
if s.builder != nil {
err = s.builder.Reset(&s.builderBuf)
}
s.metaBuf.Reset()
s.tmp0 = s.tmp0[:0]
s.tmp1 = s.tmp1[:0]
s.lastNumDocs = 0
s.lastOutSize = 0
return err
}
func (s *interim) grabBuf(size int) []byte {
buf := s.tmp0
if cap(buf) < size {
buf = make([]byte, size)
s.tmp0 = buf
}
return buf[0:size]
}
type interimStoredField struct {
vals [][]byte
typs []byte
arrayposs [][]uint64 // array positions
}
type interimFreqNorm struct {
freq uint64
norm float32
numLocs int
}
type interimLoc struct {
fieldID uint16
pos uint64
start uint64
end uint64
arrayposs []uint64
}
func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) {
s.FieldsMap = map[string]uint16{}
s.getOrDefineField("_id") // _id field is fieldID 0
for _, result := range s.results {
for _, field := range result.Document.CompositeFields {
s.getOrDefineField(field.Name())
}
for _, field := range result.Document.Fields {
s.getOrDefineField(field.Name())
}
}
sort.Strings(s.FieldsInv[1:]) // keep _id as first field
for fieldID, fieldName := range s.FieldsInv {
s.FieldsMap[fieldName] = uint16(fieldID + 1)
}
if cap(s.IncludeDocValues) >= len(s.FieldsInv) {
s.IncludeDocValues = s.IncludeDocValues[:len(s.FieldsInv)]
} else {
s.IncludeDocValues = make([]bool, len(s.FieldsInv))
}
s.prepareDicts()
for _, dict := range s.DictKeys {
sort.Strings(dict)
}
s.processDocuments()
storedIndexOffset, err := s.writeStoredFields()
if err != nil {
return 0, 0, 0, nil, err
}
var fdvIndexOffset uint64
var dictOffsets []uint64
if len(s.results) > 0 {
fdvIndexOffset, dictOffsets, err = s.writeDicts()
if err != nil {
return 0, 0, 0, nil, err
}
} else {
dictOffsets = make([]uint64, len(s.FieldsInv))
}
fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets)
if err != nil {
return 0, 0, 0, nil, err
}
return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil
}
func (s *interim) getOrDefineField(fieldName string) int {
fieldIDPlus1, exists := s.FieldsMap[fieldName]
if !exists {
fieldIDPlus1 = uint16(len(s.FieldsInv) + 1)
s.FieldsMap[fieldName] = fieldIDPlus1
s.FieldsInv = append(s.FieldsInv, fieldName)
s.Dicts = append(s.Dicts, make(map[string]uint64))
n := len(s.DictKeys)
if n < cap(s.DictKeys) {
s.DictKeys = s.DictKeys[:n+1]
s.DictKeys[n] = s.DictKeys[n][:0]
} else {
s.DictKeys = append(s.DictKeys, []string(nil))
}
}
return int(fieldIDPlus1 - 1)
}
// fill Dicts and DictKeys from analysis results
func (s *interim) prepareDicts() {
var pidNext int
var totTFs int
var totLocs int
visitField := func(fieldID uint16, tfs analysis.TokenFrequencies) {
dict := s.Dicts[fieldID]
dictKeys := s.DictKeys[fieldID]
for term, tf := range tfs {
pidPlus1, exists := dict[term]
if !exists {
pidNext++
pidPlus1 = uint64(pidNext)
dict[term] = pidPlus1
dictKeys = append(dictKeys, term)
s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0)
s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0)
}
pid := pidPlus1 - 1
s.numTermsPerPostingsList[pid] += 1
s.numLocsPerPostingsList[pid] += len(tf.Locations)
totLocs += len(tf.Locations)
}
totTFs += len(tfs)
s.DictKeys[fieldID] = dictKeys
}
for _, result := range s.results {
// walk each composite field
for _, field := range result.Document.CompositeFields {
fieldID := uint16(s.getOrDefineField(field.Name()))
_, tf := field.Analyze()
visitField(fieldID, tf)
}
// walk each field
for i, field := range result.Document.Fields {
fieldID := uint16(s.getOrDefineField(field.Name()))
tf := result.Analyzed[i]
visitField(fieldID, tf)
}
}
numPostingsLists := pidNext
if cap(s.Postings) >= numPostingsLists {
s.Postings = s.Postings[:numPostingsLists]
} else {
postings := make([]*roaring.Bitmap, numPostingsLists)
copy(postings, s.Postings[:cap(s.Postings)])
for i := 0; i < numPostingsLists; i++ {
if postings[i] == nil {
postings[i] = roaring.New()
}
}
s.Postings = postings
}
if cap(s.FreqNorms) >= numPostingsLists {
s.FreqNorms = s.FreqNorms[:numPostingsLists]
} else {
s.FreqNorms = make([][]interimFreqNorm, numPostingsLists)
}
if cap(s.freqNormsBacking) >= totTFs {
s.freqNormsBacking = s.freqNormsBacking[:totTFs]
} else {
s.freqNormsBacking = make([]interimFreqNorm, totTFs)
}
freqNormsBacking := s.freqNormsBacking
for pid, numTerms := range s.numTermsPerPostingsList {
s.FreqNorms[pid] = freqNormsBacking[0:0]
freqNormsBacking = freqNormsBacking[numTerms:]
}
if cap(s.Locs) >= numPostingsLists {
s.Locs = s.Locs[:numPostingsLists]
} else {
s.Locs = make([][]interimLoc, numPostingsLists)
}
if cap(s.locsBacking) >= totLocs {
s.locsBacking = s.locsBacking[:totLocs]
} else {
s.locsBacking = make([]interimLoc, totLocs)
}
locsBacking := s.locsBacking
for pid, numLocs := range s.numLocsPerPostingsList {
s.Locs[pid] = locsBacking[0:0]
locsBacking = locsBacking[numLocs:]
}
}
func (s *interim) processDocuments() {
numFields := len(s.FieldsInv)
reuseFieldLens := make([]int, numFields)
reuseFieldTFs := make([]analysis.TokenFrequencies, numFields)
for docNum, result := range s.results {
for i := 0; i < numFields; i++ { // clear these for reuse
reuseFieldLens[i] = 0
reuseFieldTFs[i] = nil
}
s.processDocument(uint64(docNum), result,
reuseFieldLens, reuseFieldTFs)
}
}
func (s *interim) processDocument(docNum uint64,
result *index.AnalysisResult,
fieldLens []int, fieldTFs []analysis.TokenFrequencies) {
visitField := func(fieldID uint16, fieldName string,
ln int, tf analysis.TokenFrequencies) {
fieldLens[fieldID] += ln
existingFreqs := fieldTFs[fieldID]
if existingFreqs != nil {
existingFreqs.MergeAll(fieldName, tf)
} else {
fieldTFs[fieldID] = tf
}
}
// walk each composite field
for _, field := range result.Document.CompositeFields {
fieldID := uint16(s.getOrDefineField(field.Name()))
ln, tf := field.Analyze()
visitField(fieldID, field.Name(), ln, tf)
}
// walk each field
for i, field := range result.Document.Fields {
fieldID := uint16(s.getOrDefineField(field.Name()))
ln := result.Length[i]
tf := result.Analyzed[i]
visitField(fieldID, field.Name(), ln, tf)
}
// now that it's been rolled up into fieldTFs, walk that
for fieldID, tfs := range fieldTFs {
dict := s.Dicts[fieldID]
norm := float32(1.0 / math.Sqrt(float64(fieldLens[fieldID])))
for term, tf := range tfs {
pid := dict[term] - 1
bs := s.Postings[pid]
bs.Add(uint32(docNum))
s.FreqNorms[pid] = append(s.FreqNorms[pid],
interimFreqNorm{
freq: uint64(tf.Frequency()),
norm: norm,
numLocs: len(tf.Locations),
})
if len(tf.Locations) > 0 {
locs := s.Locs[pid]
for _, loc := range tf.Locations {
var locf = uint16(fieldID)
if loc.Field != "" {
locf = uint16(s.getOrDefineField(loc.Field))
}
var arrayposs []uint64
if len(loc.ArrayPositions) > 0 {
arrayposs = loc.ArrayPositions
}
locs = append(locs, interimLoc{
fieldID: locf,
pos: uint64(loc.Position),
start: uint64(loc.Start),
end: uint64(loc.End),
arrayposs: arrayposs,
})
}
s.Locs[pid] = locs
}
}
}
}
func (s *interim) writeStoredFields() (
storedIndexOffset uint64, err error) {
varBuf := make([]byte, binary.MaxVarintLen64)
metaEncode := func(val uint64) (int, error) {
wb := binary.PutUvarint(varBuf, val)
return s.metaBuf.Write(varBuf[:wb])
}
data, compressed := s.tmp0[:0], s.tmp1[:0]
defer func() { s.tmp0, s.tmp1 = data, compressed }()
// keyed by docNum
docStoredOffsets := make([]uint64, len(s.results))
// keyed by fieldID, for the current doc in the loop
docStoredFields := map[uint16]interimStoredField{}
for docNum, result := range s.results {
for fieldID := range docStoredFields { // reset for next doc
delete(docStoredFields, fieldID)
}
for _, field := range result.Document.Fields {
fieldID := uint16(s.getOrDefineField(field.Name()))
opts := field.Options()
if opts.IsStored() {
isf := docStoredFields[fieldID]
isf.vals = append(isf.vals, field.Value())
isf.typs = append(isf.typs, encodeFieldType(field))
isf.arrayposs = append(isf.arrayposs, field.ArrayPositions())
docStoredFields[fieldID] = isf
}
if opts.IncludeDocValues() {
s.IncludeDocValues[fieldID] = true
}
err := ValidateDocFields(field)
if err != nil {
return 0, err
}
}
var curr int
s.metaBuf.Reset()
data = data[:0]
// _id field special case optimizes ExternalID() lookups
idFieldVal := docStoredFields[uint16(0)].vals[0]
_, err = metaEncode(uint64(len(idFieldVal)))
if err != nil {
return 0, err
}
// handle non-"_id" fields
for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ {
isf, exists := docStoredFields[uint16(fieldID)]
if exists {
curr, data, err = persistStoredFieldValues(
fieldID, isf.vals, isf.typs, isf.arrayposs,
curr, metaEncode, data)
if err != nil {
return 0, err
}
}
}
metaBytes := s.metaBuf.Bytes()
compressed = snappy.Encode(compressed[:cap(compressed)], data)
docStoredOffsets[docNum] = uint64(s.w.Count())
_, err := writeUvarints(s.w,
uint64(len(metaBytes)),
uint64(len(idFieldVal)+len(compressed)))
if err != nil {
return 0, err
}
_, err = s.w.Write(metaBytes)
if err != nil {
return 0, err
}
_, err = s.w.Write(idFieldVal)
if err != nil {
return 0, err
}
_, err = s.w.Write(compressed)
if err != nil {
return 0, err
}
}
storedIndexOffset = uint64(s.w.Count())
for _, docStoredOffset := range docStoredOffsets {
err = binary.Write(s.w, binary.BigEndian, docStoredOffset)
if err != nil {
return 0, err
}
}
return storedIndexOffset, nil
}
func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) {
dictOffsets = make([]uint64, len(s.FieldsInv))
fdvOffsetsStart := make([]uint64, len(s.FieldsInv))
fdvOffsetsEnd := make([]uint64, len(s.FieldsInv))
buf := s.grabBuf(binary.MaxVarintLen64)
// these int coders are initialized with chunk size 1024
// however this will be reset to the correct chunk size
// while processing each individual field-term section
tfEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1))
locEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1))
var docTermMap [][]byte
if s.builder == nil {
s.builder, err = vellum.New(&s.builderBuf, nil)
if err != nil {
return 0, nil, err
}
}
for fieldID, terms := range s.DictKeys {
if cap(docTermMap) < len(s.results) {
docTermMap = make([][]byte, len(s.results))
} else {
docTermMap = docTermMap[0:len(s.results)]
for docNum := range docTermMap { // reset the docTermMap
docTermMap[docNum] = docTermMap[docNum][:0]
}
}
dict := s.Dicts[fieldID]
for _, term := range terms { // terms are already sorted
pid := dict[term] - 1
postingsBS := s.Postings[pid]
freqNorms := s.FreqNorms[pid]
freqNormOffset := 0
locs := s.Locs[pid]
locOffset := 0
chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results)))
if err != nil {
return 0, nil, err
}
tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1))
locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1))
postingsItr := postingsBS.Iterator()
for postingsItr.HasNext() {
docNum := uint64(postingsItr.Next())
freqNorm := freqNorms[freqNormOffset]
err = tfEncoder.Add(docNum,
encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0),
uint64(math.Float32bits(freqNorm.norm)))
if err != nil {
return 0, nil, err
}
if freqNorm.numLocs > 0 {
numBytesLocs := 0
for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] {
numBytesLocs += totalUvarintBytes(
uint64(loc.fieldID), loc.pos, loc.start, loc.end,
uint64(len(loc.arrayposs)), loc.arrayposs)
}
err = locEncoder.Add(docNum, uint64(numBytesLocs))
if err != nil {
return 0, nil, err
}
for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] {
err = locEncoder.Add(docNum,
uint64(loc.fieldID), loc.pos, loc.start, loc.end,
uint64(len(loc.arrayposs)))
if err != nil {
return 0, nil, err
}
err = locEncoder.Add(docNum, loc.arrayposs...)
if err != nil {
return 0, nil, err
}
}
locOffset += freqNorm.numLocs
}
freqNormOffset++
docTermMap[docNum] = append(
append(docTermMap[docNum], term...),
termSeparator)
}
tfEncoder.Close()
locEncoder.Close()
postingsOffset, err :=
writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf)
if err != nil {
return 0, nil, err
}
if postingsOffset > uint64(0) {
err = s.builder.Insert([]byte(term), postingsOffset)
if err != nil {
return 0, nil, err
}
}
tfEncoder.Reset()
locEncoder.Reset()
}
err = s.builder.Close()
if err != nil {
return 0, nil, err
}
// record where this dictionary starts
dictOffsets[fieldID] = uint64(s.w.Count())
vellumData := s.builderBuf.Bytes()
// write out the length of the vellum data
n := binary.PutUvarint(buf, uint64(len(vellumData)))
_, err = s.w.Write(buf[:n])
if err != nil {
return 0, nil, err
}
// write this vellum to disk
_, err = s.w.Write(vellumData)
if err != nil {
return 0, nil, err
}
// reset vellum for reuse
s.builderBuf.Reset()
err = s.builder.Reset(&s.builderBuf)
if err != nil {
return 0, nil, err
}
// write the field doc values
// NOTE: doc values continue to use legacy chunk mode
chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0)
if err != nil {
return 0, nil, err
}
fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false)
if s.IncludeDocValues[fieldID] {
for docNum, docTerms := range docTermMap {
if len(docTerms) > 0 {
err = fdvEncoder.Add(uint64(docNum), docTerms)
if err != nil {
return 0, nil, err
}
}
}
err = fdvEncoder.Close()
if err != nil {
return 0, nil, err
}
fdvOffsetsStart[fieldID] = uint64(s.w.Count())
_, err = fdvEncoder.Write()
if err != nil {
return 0, nil, err
}
fdvOffsetsEnd[fieldID] = uint64(s.w.Count())
fdvEncoder.Reset()
} else {
fdvOffsetsStart[fieldID] = fieldNotUninverted
fdvOffsetsEnd[fieldID] = fieldNotUninverted
}
}
fdvIndexOffset = uint64(s.w.Count())
for i := 0; i < len(fdvOffsetsStart); i++ {
n := binary.PutUvarint(buf, fdvOffsetsStart[i])
_, err := s.w.Write(buf[:n])
if err != nil {
return 0, nil, err
}
n = binary.PutUvarint(buf, fdvOffsetsEnd[i])
_, err = s.w.Write(buf[:n])
if err != nil {
return 0, nil, err
}
}
return fdvIndexOffset, dictOffsets, nil
}
func encodeFieldType(f document.Field) byte {
fieldType := byte('x')
switch f.(type) {
case *document.TextField:
fieldType = 't'
case *document.NumericField:
fieldType = 'n'
case *document.DateTimeField:
fieldType = 'd'
case *document.BooleanField:
fieldType = 'b'
case *document.GeoPointField:
fieldType = 'g'
case *document.CompositeField:
fieldType = 'c'
}
return fieldType
}
// returns the total # of bytes needed to encode the given uint64's
// into binary.PutUVarint() encoding
func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) {
n = numUvarintBytes(a)
n += numUvarintBytes(b)
n += numUvarintBytes(c)
n += numUvarintBytes(d)
n += numUvarintBytes(e)
for _, v := range more {
n += numUvarintBytes(v)
}
return n
}
// returns # of bytes needed to encode x in binary.PutUvarint() encoding
func numUvarintBytes(x uint64) (n int) {
for x >= 0x80 {
x >>= 7
n++
}
return n + 1
}

37
vendor/github.com/blevesearch/zap/v14/plugin.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright (c) 2020 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"github.com/blevesearch/bleve/index/scorch/segment"
)
// ZapPlugin implements the Plugin interface of
// the blevesearch/bleve/index/scorch/segment pkg
type ZapPlugin struct{}
func (*ZapPlugin) Type() string {
return Type
}
func (*ZapPlugin) Version() uint32 {
return Version
}
// Plugin returns an instance segment.Plugin for use
// by the Scorch indexing scheme
func Plugin() segment.Plugin {
return &ZapPlugin{}
}

798
vendor/github.com/blevesearch/zap/v14/posting.go generated vendored Normal file
View File

@ -0,0 +1,798 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"encoding/binary"
"fmt"
"math"
"reflect"
"github.com/RoaringBitmap/roaring"
"github.com/blevesearch/bleve/index/scorch/segment"
"github.com/blevesearch/bleve/size"
)
var reflectStaticSizePostingsList int
var reflectStaticSizePostingsIterator int
var reflectStaticSizePosting int
var reflectStaticSizeLocation int
func init() {
var pl PostingsList
reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size())
var pi PostingsIterator
reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size())
var p Posting
reflectStaticSizePosting = int(reflect.TypeOf(p).Size())
var l Location
reflectStaticSizeLocation = int(reflect.TypeOf(l).Size())
}
// FST or vellum value (uint64) encoding is determined by the top two
// highest-order or most significant bits...
//
// encoding : MSB
// name : 63 62 61...to...bit #0 (LSB)
// ----------+---+---+---------------------------------------------------
// general : 0 | 0 | 62-bits of postingsOffset.
// ~ : 0 | 1 | reserved for future.
// 1-hit : 1 | 0 | 31-bits of positive float31 norm | 31-bits docNum.
// ~ : 1 | 1 | reserved for future.
//
// Encoding "general" is able to handle all cases, where the
// postingsOffset points to more information about the postings for
// the term.
//
// Encoding "1-hit" is used to optimize a commonly seen case when a
// term has only a single hit. For example, a term in the _id field
// will have only 1 hit. The "1-hit" encoding is used for a term
// in a field when...
//
// - term vector info is disabled for that field;
// - and, the term appears in only a single doc for that field;
// - and, the term's freq is exactly 1 in that single doc for that field;
// - and, the docNum must fit into 31-bits;
//
// Otherwise, the "general" encoding is used instead.
//
// In the "1-hit" encoding, the field in that single doc may have
// other terms, which is supported in the "1-hit" encoding by the
// positive float31 norm.
const FSTValEncodingMask = uint64(0xc000000000000000)
const FSTValEncodingGeneral = uint64(0x0000000000000000)
const FSTValEncoding1Hit = uint64(0x8000000000000000)
func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 {
return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum)
}
func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) {
return (mask31Bits & v), (mask31Bits & (v >> 31))
}
const mask31Bits = uint64(0x000000007fffffff)
func under32Bits(x uint64) bool {
return x <= mask31Bits
}
const DocNum1HitFinished = math.MaxUint64
var NormBits1Hit = uint64(math.Float32bits(float32(1)))
// PostingsList is an in-memory representation of a postings list
type PostingsList struct {
sb *SegmentBase
postingsOffset uint64
freqOffset uint64
locOffset uint64
postings *roaring.Bitmap
except *roaring.Bitmap
// when normBits1Hit != 0, then this postings list came from a
// 1-hit encoding, and only the docNum1Hit & normBits1Hit apply
docNum1Hit uint64
normBits1Hit uint64
}
// represents an immutable, empty postings list
var emptyPostingsList = &PostingsList{}
func (p *PostingsList) Size() int {
sizeInBytes := reflectStaticSizePostingsList + size.SizeOfPtr
if p.except != nil {
sizeInBytes += int(p.except.GetSizeInBytes())
}
return sizeInBytes
}
func (p *PostingsList) OrInto(receiver *roaring.Bitmap) {
if p.normBits1Hit != 0 {
receiver.Add(uint32(p.docNum1Hit))
return
}
if p.postings != nil {
receiver.Or(p.postings)
}
}
// Iterator returns an iterator for this postings list
func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool,
prealloc segment.PostingsIterator) segment.PostingsIterator {
if p.normBits1Hit == 0 && p.postings == nil {
return emptyPostingsIterator
}
var preallocPI *PostingsIterator
pi, ok := prealloc.(*PostingsIterator)
if ok && pi != nil {
preallocPI = pi
}
if preallocPI == emptyPostingsIterator {
preallocPI = nil
}
return p.iterator(includeFreq, includeNorm, includeLocs, preallocPI)
}
func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool,
rv *PostingsIterator) *PostingsIterator {
if rv == nil {
rv = &PostingsIterator{}
} else {
freqNormReader := rv.freqNormReader
if freqNormReader != nil {
freqNormReader.reset()
}
locReader := rv.locReader
if locReader != nil {
locReader.reset()
}
nextLocs := rv.nextLocs[:0]
nextSegmentLocs := rv.nextSegmentLocs[:0]
buf := rv.buf
*rv = PostingsIterator{} // clear the struct
rv.freqNormReader = freqNormReader
rv.locReader = locReader
rv.nextLocs = nextLocs
rv.nextSegmentLocs = nextSegmentLocs
rv.buf = buf
}
rv.postings = p
rv.includeFreqNorm = includeFreq || includeNorm || includeLocs
rv.includeLocs = includeLocs
if p.normBits1Hit != 0 {
// "1-hit" encoding
rv.docNum1Hit = p.docNum1Hit
rv.normBits1Hit = p.normBits1Hit
if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) {
rv.docNum1Hit = DocNum1HitFinished
}
return rv
}
// "general" encoding, check if empty
if p.postings == nil {
return rv
}
// initialize freq chunk reader
if rv.includeFreqNorm {
rv.freqNormReader = newChunkedIntDecoder(p.sb.mem, p.freqOffset)
}
// initialize the loc chunk reader
if rv.includeLocs {
rv.locReader = newChunkedIntDecoder(p.sb.mem, p.locOffset)
}
rv.all = p.postings.Iterator()
if p.except != nil {
rv.ActualBM = roaring.AndNot(p.postings, p.except)
rv.Actual = rv.ActualBM.Iterator()
} else {
rv.ActualBM = p.postings
rv.Actual = rv.all // Optimize to use same iterator for all & Actual.
}
return rv
}
// Count returns the number of items on this postings list
func (p *PostingsList) Count() uint64 {
var n, e uint64
if p.normBits1Hit != 0 {
n = 1
if p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) {
e = 1
}
} else if p.postings != nil {
n = p.postings.GetCardinality()
if p.except != nil {
e = p.postings.AndCardinality(p.except)
}
}
return n - e
}
func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error {
rv.postingsOffset = postingsOffset
// handle "1-hit" encoding special case
if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit {
return rv.init1Hit(postingsOffset)
}
// read the location of the freq/norm details
var n uint64
var read int
rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64])
n += uint64(read)
rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64])
n += uint64(read)
var postingsLen uint64
postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64])
n += uint64(read)
roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen]
if rv.postings == nil {
rv.postings = roaring.NewBitmap()
}
_, err := rv.postings.FromBuffer(roaringBytes)
if err != nil {
return fmt.Errorf("error loading roaring bitmap: %v", err)
}
return nil
}
func (rv *PostingsList) init1Hit(fstVal uint64) error {
docNum, normBits := FSTValDecode1Hit(fstVal)
rv.docNum1Hit = docNum
rv.normBits1Hit = normBits
return nil
}
// PostingsIterator provides a way to iterate through the postings list
type PostingsIterator struct {
postings *PostingsList
all roaring.IntPeekable
Actual roaring.IntPeekable
ActualBM *roaring.Bitmap
currChunk uint32
freqNormReader *chunkedIntDecoder
locReader *chunkedIntDecoder
next Posting // reused across Next() calls
nextLocs []Location // reused across Next() calls
nextSegmentLocs []segment.Location // reused across Next() calls
docNum1Hit uint64
normBits1Hit uint64
buf []byte
includeFreqNorm bool
includeLocs bool
}
var emptyPostingsIterator = &PostingsIterator{}
func (i *PostingsIterator) Size() int {
sizeInBytes := reflectStaticSizePostingsIterator + size.SizeOfPtr +
i.next.Size()
// account for freqNormReader, locReader if we start using this.
for _, entry := range i.nextLocs {
sizeInBytes += entry.Size()
}
return sizeInBytes
}
func (i *PostingsIterator) loadChunk(chunk int) error {
if i.includeFreqNorm {
err := i.freqNormReader.loadChunk(chunk)
if err != nil {
return err
}
}
if i.includeLocs {
err := i.locReader.loadChunk(chunk)
if err != nil {
return err
}
}
i.currChunk = uint32(chunk)
return nil
}
func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) {
if i.normBits1Hit != 0 {
return 1, i.normBits1Hit, false, nil
}
freqHasLocs, err := i.freqNormReader.readUvarint()
if err != nil {
return 0, 0, false, fmt.Errorf("error reading frequency: %v", err)
}
freq, hasLocs := decodeFreqHasLocs(freqHasLocs)
normBits, err := i.freqNormReader.readUvarint()
if err != nil {
return 0, 0, false, fmt.Errorf("error reading norm: %v", err)
}
return freq, normBits, hasLocs, nil
}
func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) {
if i.normBits1Hit != 0 {
return false, nil
}
freqHasLocs, err := i.freqNormReader.readUvarint()
if err != nil {
return false, fmt.Errorf("error reading freqHasLocs: %v", err)
}
i.freqNormReader.SkipUvarint() // Skip normBits.
return freqHasLocs&0x01 != 0, nil // See decodeFreqHasLocs() / hasLocs.
}
func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 {
rv := freq << 1
if hasLocs {
rv = rv | 0x01 // 0'th LSB encodes whether there are locations
}
return rv
}
func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) {
freq := freqHasLocs >> 1
hasLocs := freqHasLocs&0x01 != 0
return freq, hasLocs
}
// readLocation processes all the integers on the stream representing a single
// location.
func (i *PostingsIterator) readLocation(l *Location) error {
// read off field
fieldID, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading location field: %v", err)
}
// read off pos
pos, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading location pos: %v", err)
}
// read off start
start, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading location start: %v", err)
}
// read off end
end, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading location end: %v", err)
}
// read off num array pos
numArrayPos, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading location num array pos: %v", err)
}
l.field = i.postings.sb.fieldsInv[fieldID]
l.pos = pos
l.start = start
l.end = end
if cap(l.ap) < int(numArrayPos) {
l.ap = make([]uint64, int(numArrayPos))
} else {
l.ap = l.ap[:int(numArrayPos)]
}
// read off array positions
for k := 0; k < int(numArrayPos); k++ {
ap, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading array position: %v", err)
}
l.ap[k] = ap
}
return nil
}
// Next returns the next posting on the postings list, or nil at the end
func (i *PostingsIterator) Next() (segment.Posting, error) {
return i.nextAtOrAfter(0)
}
// Advance returns the posting at the specified docNum or it is not present
// the next posting, or if the end is reached, nil
func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) {
return i.nextAtOrAfter(docNum)
}
// Next returns the next posting on the postings list, or nil at the end
func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) {
docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter)
if err != nil || !exists {
return nil, err
}
i.next = Posting{} // clear the struct
rv := &i.next
rv.docNum = docNum
if !i.includeFreqNorm {
return rv, nil
}
var normBits uint64
var hasLocs bool
rv.freq, normBits, hasLocs, err = i.readFreqNormHasLocs()
if err != nil {
return nil, err
}
rv.norm = math.Float32frombits(uint32(normBits))
if i.includeLocs && hasLocs {
// prepare locations into reused slices, where we assume
// rv.freq >= "number of locs", since in a composite field,
// some component fields might have their IncludeTermVector
// flags disabled while other component fields are enabled
if cap(i.nextLocs) >= int(rv.freq) {
i.nextLocs = i.nextLocs[0:rv.freq]
} else {
i.nextLocs = make([]Location, rv.freq, rv.freq*2)
}
if cap(i.nextSegmentLocs) < int(rv.freq) {
i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2)
}
rv.locs = i.nextSegmentLocs[:0]
numLocsBytes, err := i.locReader.readUvarint()
if err != nil {
return nil, fmt.Errorf("error reading location numLocsBytes: %v", err)
}
j := 0
startBytesRemaining := i.locReader.Len() // # bytes remaining in the locReader
for startBytesRemaining-i.locReader.Len() < int(numLocsBytes) {
err := i.readLocation(&i.nextLocs[j])
if err != nil {
return nil, err
}
rv.locs = append(rv.locs, &i.nextLocs[j])
j++
}
}
return rv, nil
}
// nextDocNum returns the next docNum on the postings list, and also
// sets up the currChunk / loc related fields of the iterator.
func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) {
if i.normBits1Hit != 0 {
if i.docNum1Hit == DocNum1HitFinished {
return 0, false, nil
}
if i.docNum1Hit < atOrAfter {
// advanced past our 1-hit
i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum
return 0, false, nil
}
docNum := i.docNum1Hit
i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum
return docNum, true, nil
}
if i.Actual == nil || !i.Actual.HasNext() {
return 0, false, nil
}
if i.postings == nil || i.postings.postings == i.ActualBM {
return i.nextDocNumAtOrAfterClean(atOrAfter)
}
i.Actual.AdvanceIfNeeded(uint32(atOrAfter))
if !i.Actual.HasNext() {
// couldn't find anything
return 0, false, nil
}
n := i.Actual.Next()
allN := i.all.Next()
chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs)
if err != nil {
return 0, false, err
}
nChunk := n / uint32(chunkSize)
// when allN becomes >= to here, then allN is in the same chunk as nChunk.
allNReachesNChunk := nChunk * uint32(chunkSize)
// n is the next actual hit (excluding some postings), and
// allN is the next hit in the full postings, and
// if they don't match, move 'all' forwards until they do
for allN != n {
// we've reached same chunk, so move the freq/norm/loc decoders forward
if i.includeFreqNorm && allN >= allNReachesNChunk {
err := i.currChunkNext(nChunk)
if err != nil {
return 0, false, err
}
}
allN = i.all.Next()
}
if i.includeFreqNorm && (i.currChunk != nChunk || i.freqNormReader.isNil()) {
err := i.loadChunk(int(nChunk))
if err != nil {
return 0, false, fmt.Errorf("error loading chunk: %v", err)
}
}
return uint64(n), true, nil
}
// optimization when the postings list is "clean" (e.g., no updates &
// no deletions) where the all bitmap is the same as the actual bitmap
func (i *PostingsIterator) nextDocNumAtOrAfterClean(
atOrAfter uint64) (uint64, bool, error) {
if !i.includeFreqNorm {
i.Actual.AdvanceIfNeeded(uint32(atOrAfter))
if !i.Actual.HasNext() {
return 0, false, nil // couldn't find anything
}
return uint64(i.Actual.Next()), true, nil
}
chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs)
if err != nil {
return 0, false, err
}
// freq-norm's needed, so maintain freq-norm chunk reader
sameChunkNexts := 0 // # of times we called Next() in the same chunk
n := i.Actual.Next()
nChunk := n / uint32(chunkSize)
for uint64(n) < atOrAfter && i.Actual.HasNext() {
n = i.Actual.Next()
nChunkPrev := nChunk
nChunk = n / uint32(chunkSize)
if nChunk != nChunkPrev {
sameChunkNexts = 0
} else {
sameChunkNexts += 1
}
}
if uint64(n) < atOrAfter {
// couldn't find anything
return 0, false, nil
}
for j := 0; j < sameChunkNexts; j++ {
err := i.currChunkNext(nChunk)
if err != nil {
return 0, false, fmt.Errorf("error optimized currChunkNext: %v", err)
}
}
if i.currChunk != nChunk || i.freqNormReader.isNil() {
err := i.loadChunk(int(nChunk))
if err != nil {
return 0, false, fmt.Errorf("error loading chunk: %v", err)
}
}
return uint64(n), true, nil
}
func (i *PostingsIterator) currChunkNext(nChunk uint32) error {
if i.currChunk != nChunk || i.freqNormReader.isNil() {
err := i.loadChunk(int(nChunk))
if err != nil {
return fmt.Errorf("error loading chunk: %v", err)
}
}
// read off freq/offsets even though we don't care about them
hasLocs, err := i.skipFreqNormReadHasLocs()
if err != nil {
return err
}
if i.includeLocs && hasLocs {
numLocsBytes, err := i.locReader.readUvarint()
if err != nil {
return fmt.Errorf("error reading location numLocsBytes: %v", err)
}
// skip over all the location bytes
i.locReader.SkipBytes(int(numLocsBytes))
}
return nil
}
// DocNum1Hit returns the docNum and true if this is "1-hit" optimized
// and the docNum is available.
func (p *PostingsIterator) DocNum1Hit() (uint64, bool) {
if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished {
return p.docNum1Hit, true
}
return 0, false
}
// ActualBitmap returns the underlying actual bitmap
// which can be used up the stack for optimizations
func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap {
return p.ActualBM
}
// ReplaceActual replaces the ActualBM with the provided
// bitmap
func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) {
p.ActualBM = abm
p.Actual = abm.Iterator()
}
// PostingsIteratorFromBitmap constructs a PostingsIterator given an
// "actual" bitmap.
func PostingsIteratorFromBitmap(bm *roaring.Bitmap,
includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) {
return &PostingsIterator{
ActualBM: bm,
Actual: bm.Iterator(),
includeFreqNorm: includeFreqNorm,
includeLocs: includeLocs,
}, nil
}
// PostingsIteratorFrom1Hit constructs a PostingsIterator given a
// 1-hit docNum.
func PostingsIteratorFrom1Hit(docNum1Hit uint64,
includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) {
return &PostingsIterator{
docNum1Hit: docNum1Hit,
normBits1Hit: NormBits1Hit,
includeFreqNorm: includeFreqNorm,
includeLocs: includeLocs,
}, nil
}
// Posting is a single entry in a postings list
type Posting struct {
docNum uint64
freq uint64
norm float32
locs []segment.Location
}
func (p *Posting) Size() int {
sizeInBytes := reflectStaticSizePosting
for _, entry := range p.locs {
sizeInBytes += entry.Size()
}
return sizeInBytes
}
// Number returns the document number of this posting in this segment
func (p *Posting) Number() uint64 {
return p.docNum
}
// Frequency returns the frequencies of occurrence of this term in this doc/field
func (p *Posting) Frequency() uint64 {
return p.freq
}
// Norm returns the normalization factor for this posting
func (p *Posting) Norm() float64 {
return float64(p.norm)
}
// Locations returns the location information for each occurrence
func (p *Posting) Locations() []segment.Location {
return p.locs
}
// Location represents the location of a single occurrence
type Location struct {
field string
pos uint64
start uint64
end uint64
ap []uint64
}
func (l *Location) Size() int {
return reflectStaticSizeLocation +
len(l.field) +
len(l.ap)*size.SizeOfUint64
}
// Field returns the name of the field (useful in composite fields to know
// which original field the value came from)
func (l *Location) Field() string {
return l.field
}
// Start returns the start byte offset of this occurrence
func (l *Location) Start() uint64 {
return l.start
}
// End returns the end byte offset of this occurrence
func (l *Location) End() uint64 {
return l.end
}
// Pos returns the 1-based phrase position of this occurrence
func (l *Location) Pos() uint64 {
return l.pos
}
// ArrayPositions returns the array position vector associated with this occurrence
func (l *Location) ArrayPositions() []uint64 {
return l.ap
}

43
vendor/github.com/blevesearch/zap/v14/read.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import "encoding/binary"
func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) {
_, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum)
meta := s.mem[storedOffset+n : storedOffset+n+metaLen]
data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen]
return meta, data
}
func (s *SegmentBase) getDocStoredOffsets(docNum uint64) (
uint64, uint64, uint64, uint64, uint64) {
indexOffset := s.storedIndexOffset + (8 * docNum)
storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8])
var n uint64
metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64])
n += uint64(read)
dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64])
n += uint64(read)
return indexOffset, storedOffset, n, metaLen, dataLen
}

572
vendor/github.com/blevesearch/zap/v14/segment.go generated vendored Normal file
View File

@ -0,0 +1,572 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"os"
"sync"
"unsafe"
"github.com/RoaringBitmap/roaring"
"github.com/blevesearch/bleve/index/scorch/segment"
"github.com/blevesearch/bleve/size"
"github.com/couchbase/vellum"
mmap "github.com/blevesearch/mmap-go"
"github.com/golang/snappy"
)
var reflectStaticSizeSegmentBase int
func init() {
var sb SegmentBase
reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb))
}
// Open returns a zap impl of a segment
func (*ZapPlugin) Open(path string) (segment.Segment, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
mm, err := mmap.Map(f, mmap.RDONLY, 0)
if err != nil {
// mmap failed, try to close the file
_ = f.Close()
return nil, err
}
rv := &Segment{
SegmentBase: SegmentBase{
mem: mm[0 : len(mm)-FooterSize],
fieldsMap: make(map[string]uint16),
fieldDvReaders: make(map[uint16]*docValueReader),
fieldFSTs: make(map[uint16]*vellum.FST),
},
f: f,
mm: mm,
path: path,
refs: 1,
}
rv.SegmentBase.updateSize()
err = rv.loadConfig()
if err != nil {
_ = rv.Close()
return nil, err
}
err = rv.loadFields()
if err != nil {
_ = rv.Close()
return nil, err
}
err = rv.loadDvReaders()
if err != nil {
_ = rv.Close()
return nil, err
}
return rv, nil
}
// SegmentBase is a memory only, read-only implementation of the
// segment.Segment interface, using zap's data representation.
type SegmentBase struct {
mem []byte
memCRC uint32
chunkMode uint32
fieldsMap map[string]uint16 // fieldName -> fieldID+1
fieldsInv []string // fieldID -> fieldName
numDocs uint64
storedIndexOffset uint64
fieldsIndexOffset uint64
docValueOffset uint64
dictLocs []uint64
fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field
fieldDvNames []string // field names cached in fieldDvReaders
size uint64
m sync.Mutex
fieldFSTs map[uint16]*vellum.FST
}
func (sb *SegmentBase) Size() int {
return int(sb.size)
}
func (sb *SegmentBase) updateSize() {
sizeInBytes := reflectStaticSizeSegmentBase +
cap(sb.mem)
// fieldsMap
for k := range sb.fieldsMap {
sizeInBytes += (len(k) + size.SizeOfString) + size.SizeOfUint16
}
// fieldsInv, dictLocs
for _, entry := range sb.fieldsInv {
sizeInBytes += len(entry) + size.SizeOfString
}
sizeInBytes += len(sb.dictLocs) * size.SizeOfUint64
// fieldDvReaders
for _, v := range sb.fieldDvReaders {
sizeInBytes += size.SizeOfUint16 + size.SizeOfPtr
if v != nil {
sizeInBytes += v.size()
}
}
sb.size = uint64(sizeInBytes)
}
func (sb *SegmentBase) AddRef() {}
func (sb *SegmentBase) DecRef() (err error) { return nil }
func (sb *SegmentBase) Close() (err error) { return nil }
// Segment implements a persisted segment.Segment interface, by
// embedding an mmap()'ed SegmentBase.
type Segment struct {
SegmentBase
f *os.File
mm mmap.MMap
path string
version uint32
crc uint32
m sync.Mutex // Protects the fields that follow.
refs int64
}
func (s *Segment) Size() int {
// 8 /* size of file pointer */
// 4 /* size of version -> uint32 */
// 4 /* size of crc -> uint32 */
sizeOfUints := 16
sizeInBytes := (len(s.path) + size.SizeOfString) + sizeOfUints
// mutex, refs -> int64
sizeInBytes += 16
// do not include the mmap'ed part
return sizeInBytes + s.SegmentBase.Size() - cap(s.mem)
}
func (s *Segment) AddRef() {
s.m.Lock()
s.refs++
s.m.Unlock()
}
func (s *Segment) DecRef() (err error) {
s.m.Lock()
s.refs--
if s.refs == 0 {
err = s.closeActual()
}
s.m.Unlock()
return err
}
func (s *Segment) loadConfig() error {
crcOffset := len(s.mm) - 4
s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4])
verOffset := crcOffset - 4
s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4])
if s.version != Version {
return fmt.Errorf("unsupported version %d", s.version)
}
chunkOffset := verOffset - 4
s.chunkMode = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4])
docValueOffset := chunkOffset - 8
s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8])
fieldsIndexOffset := docValueOffset - 8
s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8])
storedIndexOffset := fieldsIndexOffset - 8
s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8])
numDocsOffset := storedIndexOffset - 8
s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8])
return nil
}
func (s *SegmentBase) loadFields() error {
// NOTE for now we assume the fields index immediately precedes
// the footer, and if this changes, need to adjust accordingly (or
// store explicit length), where s.mem was sliced from s.mm in Open().
fieldsIndexEnd := uint64(len(s.mem))
// iterate through fields index
var fieldID uint64
for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd {
addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8])
dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd])
n := uint64(read)
s.dictLocs = append(s.dictLocs, dictLoc)
var nameLen uint64
nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd])
n += uint64(read)
name := string(s.mem[addr+n : addr+n+nameLen])
s.fieldsInv = append(s.fieldsInv, name)
s.fieldsMap[name] = uint16(fieldID + 1)
fieldID++
}
return nil
}
// Dictionary returns the term dictionary for the specified field
func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) {
dict, err := s.dictionary(field)
if err == nil && dict == nil {
return &segment.EmptyDictionary{}, nil
}
return dict, err
}
func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) {
fieldIDPlus1 := sb.fieldsMap[field]
if fieldIDPlus1 > 0 {
rv = &Dictionary{
sb: sb,
field: field,
fieldID: fieldIDPlus1 - 1,
}
dictStart := sb.dictLocs[rv.fieldID]
if dictStart > 0 {
var ok bool
sb.m.Lock()
if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok {
// read the length of the vellum data
vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64])
fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen]
rv.fst, err = vellum.Load(fstBytes)
if err != nil {
sb.m.Unlock()
return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err)
}
sb.fieldFSTs[rv.fieldID] = rv.fst
}
sb.m.Unlock()
rv.fstReader, err = rv.fst.Reader()
if err != nil {
return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err)
}
}
}
return rv, nil
}
// visitDocumentCtx holds data structures that are reusable across
// multiple VisitDocument() calls to avoid memory allocations
type visitDocumentCtx struct {
buf []byte
reader bytes.Reader
arrayPos []uint64
}
var visitDocumentCtxPool = sync.Pool{
New: func() interface{} {
reuse := &visitDocumentCtx{}
return reuse
},
}
// VisitDocument invokes the DocFieldValueVistor for each stored field
// for the specified doc number
func (s *SegmentBase) VisitDocument(num uint64, visitor segment.DocumentFieldValueVisitor) error {
vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
defer visitDocumentCtxPool.Put(vdc)
return s.visitDocument(vdc, num, visitor)
}
func (s *SegmentBase) visitDocument(vdc *visitDocumentCtx, num uint64,
visitor segment.DocumentFieldValueVisitor) error {
// first make sure this is a valid number in this segment
if num < s.numDocs {
meta, compressed := s.getDocStoredMetaAndCompressed(num)
vdc.reader.Reset(meta)
// handle _id field special case
idFieldValLen, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return err
}
idFieldVal := compressed[:idFieldValLen]
keepGoing := visitor("_id", byte('t'), idFieldVal, nil)
if !keepGoing {
visitDocumentCtxPool.Put(vdc)
return nil
}
// handle non-"_id" fields
compressed = compressed[idFieldValLen:]
uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed)
if err != nil {
return err
}
for keepGoing {
field, err := binary.ReadUvarint(&vdc.reader)
if err == io.EOF {
break
}
if err != nil {
return err
}
typ, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return err
}
offset, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return err
}
l, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return err
}
numap, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return err
}
var arrayPos []uint64
if numap > 0 {
if cap(vdc.arrayPos) < int(numap) {
vdc.arrayPos = make([]uint64, numap)
}
arrayPos = vdc.arrayPos[:numap]
for i := 0; i < int(numap); i++ {
ap, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return err
}
arrayPos[i] = ap
}
}
value := uncompressed[offset : offset+l]
keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos)
}
vdc.buf = uncompressed
}
return nil
}
// DocID returns the value of the _id field for the given docNum
func (s *SegmentBase) DocID(num uint64) ([]byte, error) {
if num >= s.numDocs {
return nil, nil
}
vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
meta, compressed := s.getDocStoredMetaAndCompressed(num)
vdc.reader.Reset(meta)
// handle _id field special case
idFieldValLen, err := binary.ReadUvarint(&vdc.reader)
if err != nil {
return nil, err
}
idFieldVal := compressed[:idFieldValLen]
visitDocumentCtxPool.Put(vdc)
return idFieldVal, nil
}
// Count returns the number of documents in this segment.
func (s *SegmentBase) Count() uint64 {
return s.numDocs
}
// DocNumbers returns a bitset corresponding to the doc numbers of all the
// provided _id strings
func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) {
rv := roaring.New()
if len(s.fieldsMap) > 0 {
idDict, err := s.dictionary("_id")
if err != nil {
return nil, err
}
postingsList := emptyPostingsList
sMax, err := idDict.fst.GetMaxKey()
if err != nil {
return nil, err
}
sMaxStr := string(sMax)
filteredIds := make([]string, 0, len(ids))
for _, id := range ids {
if id <= sMaxStr {
filteredIds = append(filteredIds, id)
}
}
for _, id := range filteredIds {
postingsList, err = idDict.postingsList([]byte(id), nil, postingsList)
if err != nil {
return nil, err
}
postingsList.OrInto(rv)
}
}
return rv, nil
}
// Fields returns the field names used in this segment
func (s *SegmentBase) Fields() []string {
return s.fieldsInv
}
// Path returns the path of this segment on disk
func (s *Segment) Path() string {
return s.path
}
// Close releases all resources associated with this segment
func (s *Segment) Close() (err error) {
return s.DecRef()
}
func (s *Segment) closeActual() (err error) {
if s.mm != nil {
err = s.mm.Unmap()
}
// try to close file even if unmap failed
if s.f != nil {
err2 := s.f.Close()
if err == nil {
// try to return first error
err = err2
}
}
return
}
// some helpers i started adding for the command-line utility
// Data returns the underlying mmaped data slice
func (s *Segment) Data() []byte {
return s.mm
}
// CRC returns the CRC value stored in the file footer
func (s *Segment) CRC() uint32 {
return s.crc
}
// Version returns the file version in the file footer
func (s *Segment) Version() uint32 {
return s.version
}
// ChunkFactor returns the chunk factor in the file footer
func (s *Segment) ChunkMode() uint32 {
return s.chunkMode
}
// FieldsIndexOffset returns the fields index offset in the file footer
func (s *Segment) FieldsIndexOffset() uint64 {
return s.fieldsIndexOffset
}
// StoredIndexOffset returns the stored value index offset in the file footer
func (s *Segment) StoredIndexOffset() uint64 {
return s.storedIndexOffset
}
// DocValueOffset returns the docValue offset in the file footer
func (s *Segment) DocValueOffset() uint64 {
return s.docValueOffset
}
// NumDocs returns the number of documents in the file footer
func (s *Segment) NumDocs() uint64 {
return s.numDocs
}
// DictAddr is a helper function to compute the file offset where the
// dictionary is stored for the specified field.
func (s *Segment) DictAddr(field string) (uint64, error) {
fieldIDPlus1, ok := s.fieldsMap[field]
if !ok {
return 0, fmt.Errorf("no such field '%s'", field)
}
return s.dictLocs[fieldIDPlus1-1], nil
}
func (s *SegmentBase) loadDvReaders() error {
if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 {
return nil
}
var read uint64
for fieldID, field := range s.fieldsInv {
var fieldLocStart, fieldLocEnd uint64
var n int
fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64])
if n <= 0 {
return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID)
}
read += uint64(n)
fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64])
if n <= 0 {
return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID)
}
read += uint64(n)
fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd)
if err != nil {
return err
}
if fieldDvReader != nil {
s.fieldDvReaders[uint16(fieldID)] = fieldDvReader
s.fieldDvNames = append(s.fieldDvNames, field)
}
}
return nil
}

145
vendor/github.com/blevesearch/zap/v14/write.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
// Copyright (c) 2017 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zap
import (
"encoding/binary"
"io"
"github.com/RoaringBitmap/roaring"
)
// writes out the length of the roaring bitmap in bytes as varint
// then writes out the roaring bitmap itself
func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer,
reuseBufVarint []byte) (int, error) {
buf, err := r.ToBytes()
if err != nil {
return 0, err
}
var tw int
// write out the length
n := binary.PutUvarint(reuseBufVarint, uint64(len(buf)))
nw, err := w.Write(reuseBufVarint[:n])
tw += nw
if err != nil {
return tw, err
}
// write out the roaring bytes
nw, err = w.Write(buf)
tw += nw
if err != nil {
return tw, err
}
return tw, nil
}
func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) {
var rv uint64
var fieldsOffsets []uint64
for fieldID, fieldName := range fieldsInv {
// record start of this field
fieldsOffsets = append(fieldsOffsets, uint64(w.Count()))
// write out the dict location and field name length
_, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName)))
if err != nil {
return 0, err
}
// write out the field name
_, err = w.Write([]byte(fieldName))
if err != nil {
return 0, err
}
}
// now write out the fields index
rv = uint64(w.Count())
for fieldID := range fieldsInv {
err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID])
if err != nil {
return 0, err
}
}
return rv, nil
}
// FooterSize is the size of the footer record in bytes
// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset
const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8
func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64,
chunkMode uint32, crcBeforeFooter uint32, writerIn io.Writer) error {
w := NewCountHashWriter(writerIn)
w.crc = crcBeforeFooter
// write out the number of docs
err := binary.Write(w, binary.BigEndian, numDocs)
if err != nil {
return err
}
// write out the stored field index location:
err = binary.Write(w, binary.BigEndian, storedIndexOffset)
if err != nil {
return err
}
// write out the field index location
err = binary.Write(w, binary.BigEndian, fieldsIndexOffset)
if err != nil {
return err
}
// write out the fieldDocValue location
err = binary.Write(w, binary.BigEndian, docValueOffset)
if err != nil {
return err
}
// write out 32-bit chunk factor
err = binary.Write(w, binary.BigEndian, chunkMode)
if err != nil {
return err
}
// write out 32-bit version
err = binary.Write(w, binary.BigEndian, Version)
if err != nil {
return err
}
// write out CRC-32 of everything upto but not including this CRC
err = binary.Write(w, binary.BigEndian, w.crc)
if err != nil {
return err
}
return nil
}
func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) {
buf := make([]byte, binary.MaxVarintLen64)
for _, val := range vals {
n := binary.PutUvarint(buf, val)
var nw int
nw, err = w.Write(buf[:n])
tw += nw
if err != nil {
return tw, err
}
}
return tw, err
}

177
vendor/github.com/blevesearch/zap/v14/zap.md generated vendored Normal file
View File

@ -0,0 +1,177 @@
# ZAP File Format
## Legend
### Sections
|========|
| | section
|========|
### Fixed-size fields
|--------| |----| |--| |-|
| | uint64 | | uint32 | | uint16 | | uint8
|--------| |----| |--| |-|
### Varints
|~~~~~~~~|
| | varint(up to uint64)
|~~~~~~~~|
### Arbitrary-length fields
|--------...---|
| | arbitrary-length field (string, vellum, roaring bitmap)
|--------...---|
### Chunked data
[--------]
[ ]
[--------]
## Overview
Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing.
|==================================================|
| Stored Fields |
|==================================================|
|-----> | Stored Fields Index |
| |==================================================|
| | Dictionaries + Postings + DocValues |
| |==================================================|
| |---> | DocValues Index |
| | |==================================================|
| | | Fields |
| | |==================================================|
| | |-> | Fields Index |
| | | |========|========|========|========|====|====|====|
| | | | D# | SF | F | FDV | CF | V | CC | (Footer)
| | | |========|====|===|====|===|====|===|====|====|====|
| | | | | |
|-+-+-----------------| | |
| |--------------------------| |
|-------------------------------------|
D#. Number of Docs.
SF. Stored Fields Index Offset.
F. Field Index Offset.
FDV. Field DocValue Offset.
CF. Chunk Factor.
V. Version.
CC. CRC32.
## Stored Fields
Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located.
0 [SF] [SF + D# * 8]
| Stored Fields | Stored Fields Index |
|================================|==================================|
| | |
| |--------------------| ||--------|--------|. . .|--------||
| |-> | Stored Fields Data | || 0 | 1 | | D# - 1 ||
| | |--------------------| ||--------|----|---|. . .|--------||
| | | | |
|===|============================|==============|===================|
| |
|-------------------------------------------|
Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data.
Stored Fields Data
|~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~|
| MDS | CDS | MD | CD |
|~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~|
MDS. Metadata size.
CDS. Compressed data size.
MD. Metadata.
CD. Snappy-compressed data.
## Fields
Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields.
(...) [F] [F + F#]
| Fields | Fields Index. |
|================================|================================|
| | |
| |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------||
||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 ||
|| |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------||
|| | | |
||===============================|==============|=================|
| |
|----------------------------------------------|
## Dictionaries + Postings
Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term.
|================================================================|- Dictionaries +
| | Postings +
| | DocValues
| Freq/Norm (chunked) |
| [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] |
| |->[ Freq | Norm (float32 under varint) ] |
| | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] |
| | |
| |------------------------------------------------------------| |
| Location Details (chunked) | |
| [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | |
| |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | |
| | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | |
| | | |
| |----------------------| | |
| Postings List | | |
| |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | |
| |->| F/N | LD | Length | ROARING BITMAP | | |
| | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | |
| | |----------------------------------------------| |
| |--------------------------------------| |
| Dictionary | |
| |~~~~~~~~|--------------------------|-...-| |
| |->| Length | VELLUM DATA : (TERM -> OFFSET) | |
| | |~~~~~~~~|----------------------------...-| |
| | |
|======|=========================================================|- DocValues Index
| | |
|======|=========================================================|- Fields
| | |
| |~~~~|~~~|~~~~~~~~|---...---| |
| | Dict | Length | Name | |
| |~~~~~~~~|~~~~~~~~|---...---| |
| |
|================================================================|
## DocValues
DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice.
|================================================================|
| |------...--| |
| |->| DocValues |<-| |
| | |------...--| | |
|==|=================|===========================================|- DocValues Index
||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~||
|| DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END ||
||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~||
|================================================================|
DocValues is chunked Snappy-compressed values for each document and field.
[~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-]
[ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ]
[~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-]
Last 16 bytes are description of chunks.
|~~~~~~~~~~~~...~|----------------|----------------|
| Chunk Sizes | Chunk Size Arr | Chunk# |
|~~~~~~~~~~~~...~|----------------|----------------|

View File

@ -17,7 +17,6 @@ package certmagic
import (
"context"
"crypto/tls"
"encoding/base64"
"fmt"
"log"
weakrand "math/rand"
@ -96,13 +95,6 @@ func (am *ACMEManager) newACMEClient(useTestCA, interactive bool) (*acmeClient,
var caURL string
if useTestCA {
caURL = am.TestCA
// only use the default test CA if the CA is also
// the default CA; no point in testing against
// Let's Encrypt's staging server if we are not
// using their production server too
if caURL == "" && am.CA == DefaultACME.CA {
caURL = DefaultACME.TestCA
}
}
if caURL == "" {
caURL = am.CA
@ -175,7 +167,7 @@ func (am *ACMEManager) newACMEClient(useTestCA, interactive bool) (*acmeClient,
reg, err = client.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
TermsOfServiceAgreed: am.Agreed,
Kid: am.ExternalAccount.KeyID,
HmacEncoded: base64.StdEncoding.EncodeToString(am.ExternalAccount.HMAC),
HmacEncoded: am.ExternalAccount.HMAC,
})
} else {
reg, err = client.Registration.Register(registration.RegisterOptions{

View File

@ -108,7 +108,11 @@ func NewACMEManager(cfg *Config, template ACMEManager) *ACMEManager {
if template.CA == "" {
template.CA = DefaultACME.CA
}
if template.TestCA == "" {
if template.TestCA == "" && template.CA == DefaultACME.CA {
// only use the default test CA if the CA is also
// the default CA; no point in testing against
// Let's Encrypt's staging server if we are not
// using their production server too
template.TestCA = DefaultACME.TestCA
}
if template.Email == "" {
@ -332,7 +336,7 @@ func (am *ACMEManager) Revoke(ctx context.Context, cert CertificateResource) err
// binding an external account to an ACME account.
type ExternalAccountBinding struct {
KeyID string
HMAC []byte
HMAC string
}
// DefaultACME specifies the default settings

View File

@ -17,6 +17,7 @@ package certmagic
import (
"fmt"
"log"
weakrand "math/rand" // seeded elsewhere
"strings"
"sync"
"time"
@ -94,6 +95,9 @@ func NewCache(opts CacheOptions) *Cache {
if opts.RenewCheckInterval <= 0 {
opts.RenewCheckInterval = DefaultRenewCheckInterval
}
if opts.Capacity < 0 {
opts.Capacity = 0
}
// this must be set, because we cannot not
// safely assume that the Default Config
@ -110,7 +114,7 @@ func NewCache(opts CacheOptions) *Cache {
doneChan: make(chan struct{}),
}
go c.maintainAssets()
go c.maintainAssets(0)
return c
}
@ -153,6 +157,11 @@ type CacheOptions struct {
// How often to check certificates for renewal;
// if unset, DefaultRenewCheckInterval will be used.
RenewCheckInterval time.Duration
// Maximum number of certificates to allow in the cache.
// If reached, certificates will be randomly evicted to
// make room for new ones. 0 means unlimited.
Capacity int
}
// ConfigGetter is a function that returns a prepared,
@ -181,6 +190,25 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
return
}
// if the cache is at capacity, make room for new cert
cacheSize := len(certCache.cache)
if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
// Go maps are "nondeterministic" but not actually random,
// so although we could just chop off the "front" of the
// map with less code, that is a heavily skewed eviction
// strategy; generating random numbers is cheap and
// ensures a much better distribution.
rnd := weakrand.Intn(cacheSize)
i := 0
for _, randomCert := range certCache.cache {
if i == rnd {
certCache.removeCertificate(randomCert)
break
}
i++
}
}
// store the certificate
certCache.cache[cert.hash] = cert

View File

@ -388,7 +388,7 @@ func (cfg *Config) obtainWithIssuer(ctx context.Context, issuer Issuer, name str
// ensure idempotency of the obtain operation for this name
lockKey := cfg.lockKey("cert_acme", name)
err := obtainLock(cfg.Storage, lockKey)
err := acquireLock(ctx, cfg.Storage, lockKey)
if err != nil {
return err
}
@ -474,7 +474,7 @@ func (cfg *Config) renewWithIssuer(ctx context.Context, issuer Issuer, name stri
// ensure idempotency of the renew operation for this name
lockKey := cfg.lockKey("cert_acme", name)
err := obtainLock(cfg.Storage, lockKey)
err := acquireLock(ctx, cfg.Storage, lockKey)
if err != nil {
return err
}

View File

@ -15,6 +15,7 @@
package certmagic
import (
"context"
"encoding/json"
"fmt"
"io"
@ -124,7 +125,7 @@ func (fs *FileStorage) Filename(key string) string {
// Lock obtains a lock named by the given key. It blocks
// until the lock can be obtained or an error is returned.
func (fs *FileStorage) Lock(key string) error {
func (fs *FileStorage) Lock(ctx context.Context, key string) error {
filename := fs.lockFilename(key)
for {
@ -168,8 +169,13 @@ func (fs *FileStorage) Lock(key string) error {
default:
// lockfile exists and is not stale;
// just wait a moment and try again
time.Sleep(fileLockPollInterval)
// just wait a moment and try again,
// or return if context cancelled
select {
case <-time.After(fileLockPollInterval):
case <-ctx.Done():
return ctx.Err()
}
}
}
}

View File

@ -1,6 +1,6 @@
module github.com/caddyserver/certmagic
go 1.13
go 1.14
require (
github.com/go-acme/lego/v3 v3.7.0

View File

@ -32,13 +32,19 @@ import (
// that loops indefinitely and, on a regular schedule, checks
// certificates for expiration and initiates a renewal of certs
// that are expiring soon. It also updates OCSP stapling. It
// should only be called once per cache. Panics are recovered.
func (certCache *Cache) maintainAssets() {
// should only be called once per cache. Panics are recovered,
// and if panicCount < 10, the function is called recursively,
// incrementing panicCount each time. Initial invocation should
// start panicCount at 0.
func (certCache *Cache) maintainAssets(panicCount int) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: certificate maintenance: %v\n%s", err, buf)
if panicCount < 10 {
certCache.maintainAssets(panicCount + 1)
}
}
}()

View File

@ -15,6 +15,7 @@
package certmagic
import (
"context"
"log"
"path"
"regexp"
@ -86,8 +87,11 @@ type Locker interface {
// To prevent deadlocks, all implementations (where this concern
// is relevant) should put a reasonable expiration on the lock in
// case Unlock is unable to be called due to some sort of network
// failure or system crash.
Lock(key string) error
// failure or system crash. Additionally, implementations should
// honor context cancellation as much as possible (in case the
// caller wishes to give up and free resources before the lock
// can be obtained).
Lock(ctx context.Context, key string) error
// Unlock releases the lock for key. This method must ONLY be
// called after a successful call to Lock, and only after the
@ -223,8 +227,8 @@ func CleanUpOwnLocks() {
}
}
func obtainLock(storage Storage, lockKey string) error {
err := storage.Lock(lockKey)
func acquireLock(ctx context.Context, storage Storage, lockKey string) error {
err := storage.Lock(ctx, lockKey)
if err == nil {
locksMu.Lock()
locks[lockKey] = storage

View File

@ -1,23 +1,46 @@
language: go
sudo: false
os:
- linux
- osx
- osx
- windows
arch:
- amd64
- arm64
go:
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
- master
script:
- go vet ./...
- go test -v ./...
- go test -race ./...
- diff <(gofmt -d .) <("")
script:
- go vet ./...
- go test -race ./...
- go test -tags=noasm ./...
stages:
- gofmt
- test
matrix:
allow_failures:
- go: 'master'
fast_finish: true
fast_finish: true
include:
- stage: gofmt
go: 1.14.x
os: linux
arch: amd64
script:
- diff <(gofmt -d .) <(printf "")
- diff <(gofmt -d ./private) <(printf "")
- go install github.com/klauspost/asmfmt/cmd/asmfmt
- diff <(asmfmt -d .) <(printf "")
- stage: i386
go: 1.14.x
os: linux
arch: amd64
script:
- GOOS=linux GOARCH=386 go test .

View File

@ -12,11 +12,12 @@ Package home: https://github.com/klauspost/cpuid
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
[2]: https://godoc.org/github.com/klauspost/cpuid
[3]: https://travis-ci.org/klauspost/cpuid.svg
[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master
[4]: https://travis-ci.org/klauspost/cpuid
# features
## CPU Instructions
## x86 CPU Instructions
* **CMOV** (i686 CMOV)
* **NX** (NX (No-Execute) bit)
* **AMD3DNOW** (AMD 3DNOW)
@ -83,6 +84,35 @@ Package home: https://github.com/klauspost/cpuid
* **Cache line** (Probable size of a cache line).
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
## ARM CPU features
Currently only `arm64` platforms are implemented.
* **FP** Single-precision and double-precision floating point
* **ASIMD** Advanced SIMD
* **EVTSTRM** Generic timer
* **AES** AES instructions
* **PMULL** Polynomial Multiply instructions (PMULL/PMULL2)
* **SHA1** SHA-1 instructions (SHA1C, etc)
* **SHA2** SHA-2 instructions (SHA256H, etc)
* **CRC32** CRC32/CRC32C instructions
* **ATOMICS** Large System Extensions (LSE)
* **FPHP** Half-precision floating point
* **ASIMDHP** Advanced SIMD half-precision floating point
* **ARMCPUID** Some CPU ID registers readable at user-level
* **ASIMDRDM** Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
* **JSCVT** Javascript-style double->int convert (FJCVTZS)
* **FCMA** Floating point complex number addition and multiplication
* **LRCPC** Weaker release consistency (LDAPR, etc)
* **DCPOP** Data cache clean to Point of Persistence (DC CVAP)
* **SHA3** SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
* **SM3** SM3 instructions
* **SM4** SM4 instructions
* **ASIMDDP** SIMD Dot Product
* **SHA512** SHA512 instructions
* **SVE** Scalable Vector Extension
* **GPA** Generic Pointer Authentication
## Cpu Vendor/VM
* **Intel**
* **AMD**

View File

@ -3,7 +3,7 @@
// Package cpuid provides information about the CPU running the current program.
//
// CPU features are detected on startup, and kept for fast access through the life of the application.
// Currently x86 / x64 (AMD64) is supported.
// Currently x86 / x64 (AMD64) as well as arm64 is supported.
//
// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
//
@ -34,6 +34,8 @@ const (
XenHVM
Bhyve
Hygon
SiS
RDC
)
const (
@ -173,18 +175,76 @@ var flagNames = map[Flags]string{
}
/* all special features for arm64 should be defined here */
const (
/* extension instructions */
FP ArmFlags = 1 << iota
ASIMD
EVTSTRM
AES
PMULL
SHA1
SHA2
CRC32
ATOMICS
FPHP
ASIMDHP
ARMCPUID
ASIMDRDM
JSCVT
FCMA
LRCPC
DCPOP
SHA3
SM3
SM4
ASIMDDP
SHA512
SVE
GPA
)
var flagNamesArm = map[ArmFlags]string{
FP: "FP", // Single-precision and double-precision floating point
ASIMD: "ASIMD", // Advanced SIMD
EVTSTRM: "EVTSTRM", // Generic timer
AES: "AES", // AES instructions
PMULL: "PMULL", // Polynomial Multiply instructions (PMULL/PMULL2)
SHA1: "SHA1", // SHA-1 instructions (SHA1C, etc)
SHA2: "SHA2", // SHA-2 instructions (SHA256H, etc)
CRC32: "CRC32", // CRC32/CRC32C instructions
ATOMICS: "ATOMICS", // Large System Extensions (LSE)
FPHP: "FPHP", // Half-precision floating point
ASIMDHP: "ASIMDHP", // Advanced SIMD half-precision floating point
ARMCPUID: "CPUID", // Some CPU ID registers readable at user-level
ASIMDRDM: "ASIMDRDM", // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
JSCVT: "JSCVT", // Javascript-style double->int convert (FJCVTZS)
FCMA: "FCMA", // Floatin point complex number addition and multiplication
LRCPC: "LRCPC", // Weaker release consistency (LDAPR, etc)
DCPOP: "DCPOP", // Data cache clean to Point of Persistence (DC CVAP)
SHA3: "SHA3", // SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
SM3: "SM3", // SM3 instructions
SM4: "SM4", // SM4 instructions
ASIMDDP: "ASIMDDP", // SIMD Dot Product
SHA512: "SHA512", // SHA512 instructions
SVE: "SVE", // Scalable Vector Extension
GPA: "GPA", // Generic Pointer Authentication
}
// CPUInfo contains information about the detected system CPU.
type CPUInfo struct {
BrandName string // Brand name reported by the CPU
VendorID Vendor // Comparable CPU vendor ID
Features Flags // Features of the CPU
PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
Family int // CPU family number
Model int // CPU model number
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
Hz int64 // Clock speed, if known
BrandName string // Brand name reported by the CPU
VendorID Vendor // Comparable CPU vendor ID
VendorString string // Raw vendor string.
Features Flags // Features of the CPU (x64)
Arm ArmFlags // Features of the CPU (arm)
PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
Family int // CPU family number
Model int // CPU model number
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
Hz int64 // Clock speed, if known
Cache struct {
L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
@ -204,8 +264,7 @@ var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
// CPU contains information about the CPU as detected on startup,
// or when Detect last was called.
//
// Use this as the primary entry point to you data,
// this way queries are
// Use this as the primary entry point to you data.
var CPU CPUInfo
func init() {
@ -221,19 +280,13 @@ func init() {
// If you call this, you must ensure that no other goroutine is accessing the
// exported CPU variable.
func Detect() {
CPU.maxFunc = maxFunctionID()
CPU.maxExFunc = maxExtendedFunction()
CPU.BrandName = brandName()
CPU.CacheLine = cacheLine()
CPU.Family, CPU.Model = familyModel()
CPU.Features = support()
CPU.SGX = hasSGX(CPU.Features&SGX != 0, CPU.Features&SGXLC != 0)
CPU.ThreadsPerCore = threadsPerCore()
CPU.LogicalCores = logicalCores()
CPU.PhysicalCores = physicalCores()
CPU.VendorID = vendorID()
CPU.Hz = hertz(CPU.BrandName)
CPU.cacheSize()
// Set defaults
CPU.ThreadsPerCore = 1
CPU.Cache.L1I = -1
CPU.Cache.L1D = -1
CPU.Cache.L2 = -1
CPU.Cache.L3 = -1
addInfo(&CPU)
}
// Generated here: http://play.golang.org/p/BxFH2Gdc0G
@ -679,29 +732,49 @@ func (c CPUInfo) VM() bool {
return false
}
// Flags contains detected cpu features and caracteristics
// Flags contains detected cpu features and characteristics
type Flags uint64
// ArmFlags contains detected ARM cpu features and characteristics
type ArmFlags uint64
// String returns a string representation of the detected
// CPU features.
func (f Flags) String() string {
return strings.Join(f.Strings(), ",")
}
// Strings returns and array of the detected features.
// Strings returns an array of the detected features.
func (f Flags) Strings() []string {
s := support()
r := make([]string, 0, 20)
for i := uint(0); i < 64; i++ {
key := Flags(1 << i)
val := flagNames[key]
if s&key != 0 {
if f&key != 0 {
r = append(r, val)
}
}
return r
}
// String returns a string representation of the detected
// CPU features.
func (f ArmFlags) String() string {
return strings.Join(f.Strings(), ",")
}
// Strings returns an array of the detected features.
func (f ArmFlags) Strings() []string {
r := make([]string, 0, 20)
for i := uint(0); i < 64; i++ {
key := ArmFlags(1 << i)
val := flagNamesArm[key]
if f&key != 0 {
r = append(r, val)
}
}
return r
}
func maxExtendedFunction() uint32 {
eax, _, _, _ := cpuid(0x80000000)
return eax
@ -726,12 +799,14 @@ func brandName() string {
func threadsPerCore() int {
mfi := maxFunctionID()
if mfi < 0x4 || (vendorID() != Intel && vendorID() != AMD) {
vend, _ := vendorID()
if mfi < 0x4 || (vend != Intel && vend != AMD) {
return 1
}
if mfi < 0xb {
if vendorID() != Intel {
if vend != Intel {
return 1
}
_, b, _, d := cpuid(1)
@ -758,7 +833,8 @@ func threadsPerCore() int {
func logicalCores() int {
mfi := maxFunctionID()
switch vendorID() {
v, _ := vendorID()
switch v {
case Intel:
// Use this on old Intel processors
if mfi < 0xb {
@ -793,7 +869,8 @@ func familyModel() (int, int) {
}
func physicalCores() int {
switch vendorID() {
v, _ := vendorID()
switch v {
case Intel:
return logicalCores() / threadsPerCore()
case AMD, Hygon:
@ -828,16 +905,20 @@ var vendorMapping = map[string]Vendor{
"XenVMMXenVMM": XenHVM,
"bhyve bhyve ": Bhyve,
"HygonGenuine": Hygon,
"Vortex86 SoC": SiS,
"SiS SiS SiS ": SiS,
"RiseRiseRise": SiS,
"Genuine RDC": RDC,
}
func vendorID() Vendor {
func vendorID() (Vendor, string) {
_, b, c, d := cpuid(0)
v := valAsString(b, d, c)
vend, ok := vendorMapping[string(v)]
v := string(valAsString(b, d, c))
vend, ok := vendorMapping[v]
if !ok {
return Other
return Other, v
}
return vend
return vend, v
}
func cacheLine() int {
@ -860,7 +941,7 @@ func (c *CPUInfo) cacheSize() {
c.Cache.L1I = -1
c.Cache.L2 = -1
c.Cache.L3 = -1
vendor := vendorID()
vendor, _ := vendorID()
switch vendor {
case Intel:
if maxFunctionID() < 4 {
@ -1015,7 +1096,7 @@ func hasSGX(available, lc bool) (rval SGXSupport) {
func support() Flags {
mfi := maxFunctionID()
vend := vendorID()
vend, _ := vendorID()
if mfi < 0x1 {
return 0
}
@ -1243,7 +1324,7 @@ func support() Flags {
AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
so that SSE2 is used unless explicitly disabled by checking
AV_CPU_FLAG_SSE2SLOW. */
if vendorID() != Intel &&
if vend != Intel &&
rval&SSE2 != 0 && (c&0x00000040) == 0 {
rval |= SSE2SLOW
}
@ -1259,7 +1340,7 @@ func support() Flags {
}
}
if vendorID() == Intel {
if vend == Intel {
family, model := familyModel()
if family == 6 && (model == 9 || model == 13 || model == 14) {
/* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
@ -1306,3 +1387,118 @@ func valAsString(values ...uint32) []byte {
}
return r
}
// Single-precision and double-precision floating point
func (c CPUInfo) ArmFP() bool {
return c.Arm&FP != 0
}
// Advanced SIMD
func (c CPUInfo) ArmASIMD() bool {
return c.Arm&ASIMD != 0
}
// Generic timer
func (c CPUInfo) ArmEVTSTRM() bool {
return c.Arm&EVTSTRM != 0
}
// AES instructions
func (c CPUInfo) ArmAES() bool {
return c.Arm&AES != 0
}
// Polynomial Multiply instructions (PMULL/PMULL2)
func (c CPUInfo) ArmPMULL() bool {
return c.Arm&PMULL != 0
}
// SHA-1 instructions (SHA1C, etc)
func (c CPUInfo) ArmSHA1() bool {
return c.Arm&SHA1 != 0
}
// SHA-2 instructions (SHA256H, etc)
func (c CPUInfo) ArmSHA2() bool {
return c.Arm&SHA2 != 0
}
// CRC32/CRC32C instructions
func (c CPUInfo) ArmCRC32() bool {
return c.Arm&CRC32 != 0
}
// Large System Extensions (LSE)
func (c CPUInfo) ArmATOMICS() bool {
return c.Arm&ATOMICS != 0
}
// Half-precision floating point
func (c CPUInfo) ArmFPHP() bool {
return c.Arm&FPHP != 0
}
// Advanced SIMD half-precision floating point
func (c CPUInfo) ArmASIMDHP() bool {
return c.Arm&ASIMDHP != 0
}
// Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
func (c CPUInfo) ArmASIMDRDM() bool {
return c.Arm&ASIMDRDM != 0
}
// Javascript-style double->int convert (FJCVTZS)
func (c CPUInfo) ArmJSCVT() bool {
return c.Arm&JSCVT != 0
}
// Floatin point complex number addition and multiplication
func (c CPUInfo) ArmFCMA() bool {
return c.Arm&FCMA != 0
}
// Weaker release consistency (LDAPR, etc)
func (c CPUInfo) ArmLRCPC() bool {
return c.Arm&LRCPC != 0
}
// Data cache clean to Point of Persistence (DC CVAP)
func (c CPUInfo) ArmDCPOP() bool {
return c.Arm&DCPOP != 0
}
// SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
func (c CPUInfo) ArmSHA3() bool {
return c.Arm&SHA3 != 0
}
// SM3 instructions
func (c CPUInfo) ArmSM3() bool {
return c.Arm&SM3 != 0
}
// SM4 instructions
func (c CPUInfo) ArmSM4() bool {
return c.Arm&SM4 != 0
}
// SIMD Dot Product
func (c CPUInfo) ArmASIMDDP() bool {
return c.Arm&ASIMDDP != 0
}
// SHA512 instructions
func (c CPUInfo) ArmSHA512() bool {
return c.Arm&SHA512 != 0
}
// Scalable Vector Extension
func (c CPUInfo) ArmSVE() bool {
return c.Arm&SVE != 0
}
// Generic Pointer Authentication
func (c CPUInfo) ArmGPA() bool {
return c.Arm&GPA != 0
}

View File

@ -1,6 +1,6 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo
//+build 386,!gccgo,!noasm,!appengine
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0

View File

@ -1,6 +1,6 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build amd64,!gccgo
//+build amd64,!gccgo,!noasm,!appengine
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0

26
vendor/github.com/klauspost/cpuid/cpuid_arm64.s generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build arm64,!gccgo
// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt
// func getMidr
TEXT ·getMidr(SB), 7, $0
WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */
MOVD R0, midr+0(FP)
RET
// func getProcFeatures
TEXT ·getProcFeatures(SB), 7, $0
WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */
MOVD R0, procFeatures+0(FP)
RET
// func getInstAttributes
TEXT ·getInstAttributes(SB), 7, $0
WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */
WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */
MOVD R0, instAttrReg0+0(FP)
MOVD R1, instAttrReg1+8(FP)
RET

215
vendor/github.com/klauspost/cpuid/detect_arm64.go generated vendored Normal file
View File

@ -0,0 +1,215 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build arm64,!gccgo,!noasm,!appengine
package cpuid
func getMidr() (midr uint64)
func getProcFeatures() (procFeatures uint64)
func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
func initCPU() {
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
}
func addInfo(c *CPUInfo) {
// midr := getMidr()
// MIDR_EL1 - Main ID Register
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | Implementer | [31-24] | y |
// |--------------------------------------------------|
// | Variant | [23-20] | y |
// |--------------------------------------------------|
// | Architecture | [19-16] | y |
// |--------------------------------------------------|
// | PartNum | [15-4] | y |
// |--------------------------------------------------|
// | Revision | [3-0] | y |
// x--------------------------------------------------x
// fmt.Printf(" implementer: 0x%02x\n", (midr>>24)&0xff)
// fmt.Printf(" variant: 0x%01x\n", (midr>>20)&0xf)
// fmt.Printf("architecture: 0x%01x\n", (midr>>16)&0xf)
// fmt.Printf(" part num: 0x%03x\n", (midr>>4)&0xfff)
// fmt.Printf(" revision: 0x%01x\n", (midr>>0)&0xf)
procFeatures := getProcFeatures()
// ID_AA64PFR0_EL1 - Processor Feature Register 0
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | DIT | [51-48] | y |
// |--------------------------------------------------|
// | SVE | [35-32] | y |
// |--------------------------------------------------|
// | GIC | [27-24] | n |
// |--------------------------------------------------|
// | AdvSIMD | [23-20] | y |
// |--------------------------------------------------|
// | FP | [19-16] | y |
// |--------------------------------------------------|
// | EL3 | [15-12] | n |
// |--------------------------------------------------|
// | EL2 | [11-8] | n |
// |--------------------------------------------------|
// | EL1 | [7-4] | n |
// |--------------------------------------------------|
// | EL0 | [3-0] | n |
// x--------------------------------------------------x
var f ArmFlags
// if procFeatures&(0xf<<48) != 0 {
// fmt.Println("DIT")
// }
if procFeatures&(0xf<<32) != 0 {
f |= SVE
}
if procFeatures&(0xf<<20) != 15<<20 {
f |= ASIMD
if procFeatures&(0xf<<20) == 1<<20 {
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1
// 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic.
f |= FPHP
f |= ASIMDHP
}
}
if procFeatures&(0xf<<16) != 0 {
f |= FP
}
instAttrReg0, instAttrReg1 := getInstAttributes()
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
//
// ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | TS | [55-52] | y |
// |--------------------------------------------------|
// | FHM | [51-48] | y |
// |--------------------------------------------------|
// | DP | [47-44] | y |
// |--------------------------------------------------|
// | SM4 | [43-40] | y |
// |--------------------------------------------------|
// | SM3 | [39-36] | y |
// |--------------------------------------------------|
// | SHA3 | [35-32] | y |
// |--------------------------------------------------|
// | RDM | [31-28] | y |
// |--------------------------------------------------|
// | ATOMICS | [23-20] | y |
// |--------------------------------------------------|
// | CRC32 | [19-16] | y |
// |--------------------------------------------------|
// | SHA2 | [15-12] | y |
// |--------------------------------------------------|
// | SHA1 | [11-8] | y |
// |--------------------------------------------------|
// | AES | [7-4] | y |
// x--------------------------------------------------x
// if instAttrReg0&(0xf<<52) != 0 {
// fmt.Println("TS")
// }
// if instAttrReg0&(0xf<<48) != 0 {
// fmt.Println("FHM")
// }
if instAttrReg0&(0xf<<44) != 0 {
f |= ASIMDDP
}
if instAttrReg0&(0xf<<40) != 0 {
f |= SM4
}
if instAttrReg0&(0xf<<36) != 0 {
f |= SM3
}
if instAttrReg0&(0xf<<32) != 0 {
f |= SHA3
}
if instAttrReg0&(0xf<<28) != 0 {
f |= ASIMDRDM
}
if instAttrReg0&(0xf<<20) != 0 {
f |= ATOMICS
}
if instAttrReg0&(0xf<<16) != 0 {
f |= CRC32
}
if instAttrReg0&(0xf<<12) != 0 {
f |= SHA2
}
if instAttrReg0&(0xf<<12) == 2<<12 {
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
// 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented.
f |= SHA512
}
if instAttrReg0&(0xf<<8) != 0 {
f |= SHA1
}
if instAttrReg0&(0xf<<4) != 0 {
f |= AES
}
if instAttrReg0&(0xf<<4) == 2<<4 {
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
// 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities.
f |= PMULL
}
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1
//
// ID_AA64ISAR1_EL1 - Instruction set attribute register 1
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | GPI | [31-28] | y |
// |--------------------------------------------------|
// | GPA | [27-24] | y |
// |--------------------------------------------------|
// | LRCPC | [23-20] | y |
// |--------------------------------------------------|
// | FCMA | [19-16] | y |
// |--------------------------------------------------|
// | JSCVT | [15-12] | y |
// |--------------------------------------------------|
// | API | [11-8] | y |
// |--------------------------------------------------|
// | APA | [7-4] | y |
// |--------------------------------------------------|
// | DPB | [3-0] | y |
// x--------------------------------------------------x
// if instAttrReg1&(0xf<<28) != 0 {
// fmt.Println("GPI")
// }
if instAttrReg1&(0xf<<28) != 24 {
f |= GPA
}
if instAttrReg1&(0xf<<20) != 0 {
f |= LRCPC
}
if instAttrReg1&(0xf<<16) != 0 {
f |= FCMA
}
if instAttrReg1&(0xf<<12) != 0 {
f |= JSCVT
}
// if instAttrReg1&(0xf<<8) != 0 {
// fmt.Println("API")
// }
// if instAttrReg1&(0xf<<4) != 0 {
// fmt.Println("APA")
// }
if instAttrReg1&(0xf<<0) != 0 {
f |= DCPOP
}
c.Arm = f
}

View File

@ -1,6 +1,6 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo amd64,!gccgo
//+build 386,!gccgo amd64,!gccgo,!noasm,!appengine
package cpuid
@ -15,3 +15,19 @@ func initCPU() {
xgetbv = asmXgetbv
rdtscpAsm = asmRdtscpAsm
}
func addInfo(c *CPUInfo) {
c.maxFunc = maxFunctionID()
c.maxExFunc = maxExtendedFunction()
c.BrandName = brandName()
c.CacheLine = cacheLine()
c.Family, c.Model = familyModel()
c.Features = support()
c.SGX = hasSGX(c.Features&SGX != 0, c.Features&SGXLC != 0)
c.ThreadsPerCore = threadsPerCore()
c.LogicalCores = logicalCores()
c.PhysicalCores = physicalCores()
c.VendorID, c.VendorString = vendorID()
c.Hz = hertz(c.BrandName)
c.cacheSize()
}

View File

@ -1,23 +1,14 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build !amd64,!386 gccgo
//+build !amd64,!386,!arm64 gccgo noasm appengine
package cpuid
func initCPU() {
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
xgetbv = func(index uint32) (eax, edx uint32) {
return 0, 0
}
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
}
func addInfo(info *CPUInfo) {}

View File

@ -1,4 +0,0 @@
package cpuid
//go:generate go run private-gen.go
//go:generate gofmt -w ./private

3
vendor/github.com/klauspost/cpuid/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/klauspost/cpuid
go 1.12

View File

@ -1,3 +1,7 @@
## 1.3.2
* Decode into interface type with a struct value is supported [GH-187]
## 1.3.1
* Squash should only squash embedded structs. [GH-194]

View File

@ -463,7 +463,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
// value to "data" of that type.
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
if val.IsValid() && val.Elem().IsValid() {
return d.decode(name, data, val.Elem())
elem := val.Elem()
// If we can't address this element, then its not writable. Instead,
// we make a copy of the value (which is a pointer and therefore
// writable), decode into that, and replace the whole value.
copied := false
if !elem.CanAddr() {
copied = true
// Make *T
copy := reflect.New(elem.Type())
// *T = elem
copy.Elem().Set(elem)
// Set elem so we decode into it
elem = copy
}
// Decode. If we have an error then return. We also return right
// away if we're not a copy because that means we decoded directly.
if err := d.decode(name, data, elem); err != nil || !copied {
return err
}
// If we're a copy, we need to set te final result
val.Set(elem.Elem())
return nil
}
dataVal := reflect.ValueOf(data)
@ -847,21 +874,21 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
// Determine the name of the key in the map
if index := strings.Index(tagValue, ","); index != -1 {
if tagValue[:index] == "-" {
continue;
continue
}
// If "omitempty" is specified in the tag, it ignores empty values.
if strings.Index(tagValue[index + 1:], "omitempty") != -1 && isEmptyValue(v) {
if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
continue
}
// If "squash" is specified in the tag, we squash the field down.
squash = !squash && strings.Index(tagValue[index + 1:], "squash") != -1
squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
if squash && v.Kind() != reflect.Struct {
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
}
keyName = tagValue[:index]
} else if len(tagValue) > 0 {
if tagValue == "-" {
if tagValue == "-" {
continue
}
keyName = tagValue

View File

@ -1 +1,2 @@
benchmarks/sample_* linguist-generated=true
benchmarks/sample_* linguist-generated
tests/*/corpus/* linguist-generated

View File

@ -2,3 +2,8 @@ dist/
benchmarks/*
!benchmarks/*.go
!benchmarks/sample_*
tests/*/fuzz-fuzz.zip
tests/*/crashers
tests/*/suppressions
tests/*/corpus/*
!tests/*/corpus/*.*

View File

@ -1,4 +1,4 @@
# Minify <a name="minify"></a> [![Build Status](https://travis-ci.org/tdewolff/minify.svg?branch=master)](https://travis-ci.org/tdewolff/minify) [![GoDoc](http://godoc.org/github.com/tdewolff/minify?status.svg)](http://godoc.org/github.com/tdewolff/minify) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/minify/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/minify?branch=master)
# Minify <a name="minify"></a> [![Build Status](https://travis-ci.org/tdewolff/minify.svg?branch=master)](https://travis-ci.org/tdewolff/minify) [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/minify/v2?tab=doc) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/minify/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/minify?branch=master)
**[Online demo](https://go.tacodewolff.nl/minify) if you need to minify files *now*.**

View File

@ -8,7 +8,7 @@ require (
github.com/fsnotify/fsnotify v1.4.7
github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2
github.com/spf13/pflag v1.0.3
github.com/tdewolff/parse/v2 v2.4.2
github.com/tdewolff/parse/v2 v2.4.3
github.com/tdewolff/test v1.0.6
golang.org/x/sys v0.0.0-20181031143558-9b800f95dbbc // indirect
)

View File

@ -8,10 +8,8 @@ github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 h1:JAEbJn3j/FrhdWA9jW8
github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/tdewolff/parse/v2 v2.4.2-0.20191217133525-7b246f800500 h1:FlcpXrF3rpbZ8lxK0BcPnl3NEDuebxk+A9Wwm/tjDH4=
github.com/tdewolff/parse/v2 v2.4.2-0.20191217133525-7b246f800500/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
github.com/tdewolff/parse/v2 v2.4.2 h1:Bu2Qv6wepkc+Ou7iB/qHjAhEImlAP5vedzlQRUdj3BI=
github.com/tdewolff/parse/v2 v2.4.2/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
github.com/tdewolff/parse/v2 v2.4.3 h1:k24zHgTRGm7LkvbTEreuavyZTf0k8a/lIenggv62OiU=
github.com/tdewolff/parse/v2 v2.4.3/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
github.com/tdewolff/test v1.0.6 h1:76mzYJQ83Op284kMT+63iCNCI7NEERsIN8dLM+RiKr4=
github.com/tdewolff/test v1.0.6/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
golang.org/x/sys v0.0.0-20181031143558-9b800f95dbbc h1:SdCq5U4J+PpbSDIl9bM0V1e1Ug1jsnBkAFvTs1htn7U=

View File

@ -64,7 +64,6 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
tb := NewTokenBuffer(l)
for {
t := *tb.Shift()
SWITCH:
switch t.TokenType {
case html.ErrorToken:
if l.Err() == io.EOF {
@ -229,11 +228,12 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
if !hasAttributes && (!o.KeepDocumentTags && (t.Hash == Html || t.Hash == Head || t.Hash == Body) || t.Hash == Colgroup) {
break
} else if t.TokenType == html.EndTagToken {
omitEndTag := false
if !o.KeepEndTags {
if t.Hash == Thead || t.Hash == Tbody || t.Hash == Tfoot || t.Hash == Tr || t.Hash == Th || t.Hash == Td ||
t.Hash == Optgroup || t.Hash == Option || t.Hash == Dd || t.Hash == Dt ||
t.Hash == Li || t.Hash == Rb || t.Hash == Rt || t.Hash == Rtc || t.Hash == Rp {
break
if t.Hash == Thead || t.Hash == Tbody || t.Hash == Tfoot || t.Hash == Tr || t.Hash == Th ||
t.Hash == Td || t.Hash == Option || t.Hash == Dd || t.Hash == Dt || t.Hash == Li ||
t.Hash == Rb || t.Hash == Rt || t.Hash == Rtc || t.Hash == Rp {
omitEndTag = true // omit end tags
} else if t.Hash == P {
i := 0
for {
@ -244,7 +244,21 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
continue
}
if next.TokenType == html.ErrorToken || next.TokenType == html.EndTagToken && next.Traits&keepPTag == 0 || next.TokenType == html.StartTagToken && next.Traits&omitPTag != 0 {
break SWITCH // omit p end tag
omitEndTag = true // omit p end tag
}
break
}
} else if t.Hash == Optgroup {
i := 0
for {
next := tb.Peek(i)
i++
// continue if text token
if next.TokenType == html.TextToken {
continue
}
if next.TokenType == html.ErrorToken || next.Hash != Option {
omitEndTag = true // omit optgroup end tag
}
break
}
@ -257,12 +271,21 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
omitSpace = true // omit spaces after block elements
}
if len(t.Data) > 3+len(t.Text) {
t.Data[2+len(t.Text)] = '>'
t.Data = t.Data[:3+len(t.Text)]
if !omitEndTag {
if len(t.Data) > 3+len(t.Text) {
t.Data[2+len(t.Text)] = '>'
t.Data = t.Data[:3+len(t.Text)]
}
if _, err := w.Write(t.Data); err != nil {
return err
}
}
if _, err := w.Write(t.Data); err != nil {
return err
// skip text in select and optgroup tags
if t.Hash == Option || t.Hash == Optgroup {
if next := tb.Peek(0); next.TokenType == html.TextToken {
tb.Shift()
}
}
break
}
@ -473,10 +496,19 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
}
}
}
} else {
_ = tb.Shift() // StartTagClose
}
if _, err := w.Write(gtBytes); err != nil {
return err
}
// skip text in select and optgroup tags
if t.Hash == Select || t.Hash == Optgroup {
if next := tb.Peek(0); next.TokenType == html.TextToken {
tb.Shift()
}
}
}
}
}

1
vendor/github.com/tdewolff/parse/v2/.gitattributes generated vendored Normal file
View File

@ -0,0 +1 @@
tests/*/corpus/* linguist-generated

5
vendor/github.com/tdewolff/parse/v2/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
tests/*/fuzz-fuzz.zip
tests/*/crashers
tests/*/suppressions
tests/*/corpus/*
!tests/*/corpus/*.*

View File

@ -6,5 +6,5 @@ env:
before_install:
- go get github.com/mattn/goveralls
script:
- go test -covermode=count -coverprofile=profile.cov . ./buffer ./css ./html ./js ./json ./strconv ./svg ./xml
- go test -covermode=count -coverprofile=profile.cov . ./buffer ./css ./html ./js ./json ./strconv ./xml
- goveralls -coverprofile=profile.cov -service travis-ci

View File

@ -1,10 +1,4 @@
# Parse [![Build Status](https://travis-ci.org/tdewolff/parse.svg?branch=master)](https://travis-ci.org/tdewolff/parse) [![GoDoc](http://godoc.org/github.com/tdewolff/parse?status.svg)](http://godoc.org/github.com/tdewolff/parse) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/parse/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/parse?branch=master)
***BE AWARE: YOU NEED GO 1.9.7+, 1.10.3+, 1.11 to run the latest release!!!***
If you cannot upgrade Go, please pin to **parse@v2.3.4**
---
# Parse [![Build Status](https://travis-ci.org/tdewolff/parse.svg?branch=master)](https://travis-ci.org/tdewolff/parse) [![API reference](https://img.shields.io/badge/godoc-reference-5272B4)](https://pkg.go.dev/github.com/tdewolff/parse/v2?tab=doc) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/parse/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/parse?branch=master)
This package contains several lexers and parsers written in [Go][1]. All subpackages are built to be streaming, high performance and to be in accordance with the official (latest) specifications.

View File

@ -33,7 +33,7 @@ func NewLexer(r io.Reader) *Lexer {
b, err = ioutil.ReadAll(r)
if err != nil {
return &Lexer{
buf: []byte{0},
buf: nullBuffer,
err: err,
}
}
@ -132,7 +132,7 @@ func (z *Lexer) Rewind(pos int) {
// Lexeme returns the bytes of the current selection.
func (z *Lexer) Lexeme() []byte {
return z.buf[z.start:z.pos]
return z.buf[z.start:z.pos:z.pos]
}
// Skip collapses the position to the end of the selection.
@ -142,7 +142,7 @@ func (z *Lexer) Skip() {
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
func (z *Lexer) Shift() []byte {
b := z.buf[z.start:z.pos]
b := z.buf[z.start:z.pos:z.pos]
z.start = z.pos
return b
}
@ -154,5 +154,5 @@ func (z *Lexer) Offset() int {
// Bytes returns the underlying buffer.
func (z *Lexer) Bytes() []byte {
return z.buf[:len(z.buf)-1]
return z.buf[: len(z.buf)-1 : len(z.buf)-1]
}

View File

@ -95,18 +95,18 @@ func Mediatype(b []byte) ([]byte, map[string]string) {
if b[i] == ';' || b[i] == ' ' {
mimetype = b[:i]
if b[i] == ' ' {
i++
i++ // space
for i < n && b[i] == ' ' {
i++
}
if i < n && b[i] != ';' {
if n <= i || b[i] != ';' {
break
}
}
params = map[string]string{}
s := string(b)
PARAM:
i++
i++ // semicolon
for i < n && s[i] == ' ' {
i++
}

View File

@ -10,239 +10,15 @@ type Hash uint32
// Unique hash definitions to be used instead of strings
const (
A Hash = 0x1 // a
Abbr Hash = 0x37a04 // abbr
About Hash = 0x5 // about
Accept Hash = 0x1106 // accept
Accept_Charset Hash = 0x110e // accept-charset
Action Hash = 0x23f06 // action
Address Hash = 0x5a07 // address
Align Hash = 0x32705 // align
Alink Hash = 0x7005 // alink
Allowfullscreen Hash = 0x2ad0f // allowfullscreen
Amp_Boilerplate Hash = 0x610f // amp-boilerplate
Area Hash = 0x1e304 // area
Article Hash = 0x2707 // article
Aside Hash = 0xb405 // aside
Async Hash = 0xac05 // async
Audio Hash = 0xd105 // audio
Autofocus Hash = 0xe409 // autofocus
Autoplay Hash = 0x10808 // autoplay
Axis Hash = 0x11004 // axis
B Hash = 0x101 // b
Background Hash = 0x300a // background
Base Hash = 0x19604 // base
Bb Hash = 0x37b02 // bb
Bdi Hash = 0x7503 // bdi
Bdo Hash = 0x31f03 // bdo
Bgcolor Hash = 0x12607 // bgcolor
Blockquote Hash = 0x13e0a // blockquote
Body Hash = 0xd04 // body
Br Hash = 0x37c02 // br
Button Hash = 0x14806 // button
Canvas Hash = 0xb006 // canvas
Caption Hash = 0x21f07 // caption
Charset Hash = 0x1807 // charset
Checked Hash = 0x1b307 // checked
Cite Hash = 0xfb04 // cite
Class Hash = 0x15905 // class
Classid Hash = 0x15907 // classid
Clear Hash = 0x2b05 // clear
Code Hash = 0x19204 // code
Codebase Hash = 0x19208 // codebase
Codetype Hash = 0x1a408 // codetype
Col Hash = 0x12803 // col
Colgroup Hash = 0x1bb08 // colgroup
Color Hash = 0x12805 // color
Cols Hash = 0x1cf04 // cols
Colspan Hash = 0x1cf07 // colspan
Compact Hash = 0x1ec07 // compact
Content Hash = 0x28407 // content
Controls Hash = 0x20108 // controls
Data Hash = 0x1f04 // data
Datalist Hash = 0x1f08 // datalist
Datatype Hash = 0x4d08 // datatype
Dd Hash = 0x5b02 // dd
Declare Hash = 0xb707 // declare
Default Hash = 0x7f07 // default
DefaultChecked Hash = 0x1730e // defaultChecked
DefaultMuted Hash = 0x7f0c // defaultMuted
DefaultSelected Hash = 0x8a0f // defaultSelected
Defer Hash = 0x9805 // defer
Del Hash = 0x10503 // del
Details Hash = 0x15f07 // details
Dfn Hash = 0x16c03 // dfn
Dialog Hash = 0xa606 // dialog
Dir Hash = 0x7603 // dir
Disabled Hash = 0x18008 // disabled
Div Hash = 0x18703 // div
Dl Hash = 0x1b902 // dl
Dt Hash = 0x23102 // dt
Em Hash = 0x4302 // em
Embed Hash = 0x4905 // embed
Enabled Hash = 0x26c07 // enabled
Enctype Hash = 0x1fa07 // enctype
Face Hash = 0x5604 // face
Fieldset Hash = 0x21408 // fieldset
Figcaption Hash = 0x21c0a // figcaption
Figure Hash = 0x22606 // figure
Footer Hash = 0xdb06 // footer
For Hash = 0x23b03 // for
Form Hash = 0x23b04 // form
Formaction Hash = 0x23b0a // formaction
Formnovalidate Hash = 0x2450e // formnovalidate
Frame Hash = 0x28c05 // frame
Frameborder Hash = 0x28c0b // frameborder
H1 Hash = 0x2e002 // h1
H2 Hash = 0x25302 // h2
H3 Hash = 0x25502 // h3
H4 Hash = 0x25702 // h4
H5 Hash = 0x25902 // h5
H6 Hash = 0x25b02 // h6
Head Hash = 0x2d204 // head
Header Hash = 0x2d206 // header
Hgroup Hash = 0x25d06 // hgroup
Hidden Hash = 0x26806 // hidden
Hr Hash = 0x32d02 // hr
Href Hash = 0x32d04 // href
Hreflang Hash = 0x32d08 // hreflang
Html Hash = 0x27304 // html
Http_Equiv Hash = 0x2770a // http-equiv
I Hash = 0x2401 // i
Icon Hash = 0x28304 // icon
Id Hash = 0xb602 // id
Iframe Hash = 0x28b06 // iframe
Img Hash = 0x29703 // img
Inert Hash = 0xf605 // inert
Inlist Hash = 0x29a06 // inlist
Input Hash = 0x2a405 // input
Ins Hash = 0x2a903 // ins
Ismap Hash = 0x11205 // ismap
Itemscope Hash = 0xfc09 // itemscope
Kbd Hash = 0x7403 // kbd
Keygen Hash = 0x1f606 // keygen
Label Hash = 0xbe05 // label
Lang Hash = 0x33104 // lang
Language Hash = 0x33108 // language
Legend Hash = 0x2c506 // legend
Li Hash = 0x2302 // li
Link Hash = 0x7104 // link
Longdesc Hash = 0xc208 // longdesc
Main Hash = 0xf404 // main
Manifest Hash = 0x2bc08 // manifest
Map Hash = 0xee03 // map
Mark Hash = 0x2cb04 // mark
Math Hash = 0x2cf04 // math
Max Hash = 0x2d803 // max
Maxlength Hash = 0x2d809 // maxlength
Media Hash = 0xa405 // media
Menu Hash = 0x12204 // menu
Meta Hash = 0x2e204 // meta
Meter Hash = 0x2f705 // meter
Method Hash = 0x2fc06 // method
Multiple Hash = 0x30208 // multiple
Muted Hash = 0x30a05 // muted
Name Hash = 0xa204 // name
Nav Hash = 0x32403 // nav
Nohref Hash = 0x32b06 // nohref
Noresize Hash = 0x13608 // noresize
Noscript Hash = 0x14d08 // noscript
Noshade Hash = 0x16e07 // noshade
Novalidate Hash = 0x2490a // novalidate
Nowrap Hash = 0x1d506 // nowrap
Object Hash = 0xd506 // object
Ol Hash = 0xcb02 // ol
Open Hash = 0x32104 // open
Optgroup Hash = 0x35608 // optgroup
Option Hash = 0x30f06 // option
Output Hash = 0x206 // output
P Hash = 0x501 // p
Param Hash = 0xf005 // param
Pauseonexit Hash = 0x1160b // pauseonexit
Picture Hash = 0x1c207 // picture
Plaintext Hash = 0x1da09 // plaintext
Poster Hash = 0x26206 // poster
Pre Hash = 0x35d03 // pre
Prefix Hash = 0x35d06 // prefix
Profile Hash = 0x36407 // profile
Progress Hash = 0x34208 // progress
Property Hash = 0x31508 // property
Q Hash = 0x14301 // q
Rb Hash = 0x2f02 // rb
Readonly Hash = 0x1e408 // readonly
Rel Hash = 0xbc03 // rel
Required Hash = 0x22a08 // required
Resource Hash = 0x1c708 // resource
Rev Hash = 0x7803 // rev
Reversed Hash = 0x7808 // reversed
Rows Hash = 0x9c04 // rows
Rowspan Hash = 0x9c07 // rowspan
Rp Hash = 0x6a02 // rp
Rt Hash = 0x2802 // rt
Rtc Hash = 0xf903 // rtc
Ruby Hash = 0xe004 // ruby
Rules Hash = 0x12c05 // rules
S Hash = 0x1c01 // s
Samp Hash = 0x6004 // samp
Scope Hash = 0x10005 // scope
Scoped Hash = 0x10006 // scoped
Script Hash = 0x14f06 // script
Scrolling Hash = 0xc809 // scrolling
Seamless Hash = 0x19808 // seamless
Section Hash = 0x13007 // section
Select Hash = 0x16506 // select
Selected Hash = 0x16508 // selected
Shape Hash = 0x19f05 // shape
Size Hash = 0x13a04 // size
Slot Hash = 0x20804 // slot
Small Hash = 0x2ab05 // small
Sortable Hash = 0x2ef08 // sortable
Source Hash = 0x1c906 // source
Span Hash = 0x9f04 // span
Src Hash = 0x34903 // src
Srcset Hash = 0x34906 // srcset
Start Hash = 0x2505 // start
Strong Hash = 0x29e06 // strong
Style Hash = 0x2c205 // style
Sub Hash = 0x31d03 // sub
Summary Hash = 0x33907 // summary
Sup Hash = 0x34003 // sup
Svg Hash = 0x34f03 // svg
Tabindex Hash = 0x2e408 // tabindex
Table Hash = 0x2f205 // table
Target Hash = 0x706 // target
Tbody Hash = 0xc05 // tbody
Td Hash = 0x1e02 // td
Template Hash = 0x4208 // template
Text Hash = 0x1df04 // text
Textarea Hash = 0x1df08 // textarea
Tfoot Hash = 0xda05 // tfoot
Th Hash = 0x2d102 // th
Thead Hash = 0x2d105 // thead
Time Hash = 0x12004 // time
Title Hash = 0x15405 // title
Tr Hash = 0x1f202 // tr
Track Hash = 0x1f205 // track
Translate Hash = 0x20b09 // translate
Truespeed Hash = 0x23209 // truespeed
Type Hash = 0x5104 // type
Typemustmatch Hash = 0x1a80d // typemustmatch
Typeof Hash = 0x5106 // typeof
U Hash = 0x301 // u
Ul Hash = 0x8302 // ul
Undeterminate Hash = 0x370d // undeterminate
Usemap Hash = 0xeb06 // usemap
Valign Hash = 0x32606 // valign
Value Hash = 0x18905 // value
Valuetype Hash = 0x18909 // valuetype
Var Hash = 0x28003 // var
Video Hash = 0x35205 // video
Visible Hash = 0x36b07 // visible
Vlink Hash = 0x37205 // vlink
Vocab Hash = 0x37705 // vocab
Wbr Hash = 0x37e03 // wbr
Xmlns Hash = 0x2eb05 // xmlns
Xmp Hash = 0x36203 // xmp
Iframe Hash = 0x6 // iframe
Math Hash = 0x604 // math
Plaintext Hash = 0x1e09 // plaintext
Script Hash = 0xa06 // script
Style Hash = 0x1405 // style
Svg Hash = 0x1903 // svg
Textarea Hash = 0x2308 // textarea
Title Hash = 0xf05 // title
Xmp Hash = 0x1c03 // xmp
)
// String returns the hash' name.
@ -289,255 +65,17 @@ NEXT:
}
const _Hash_hash0 = 0x9acb0442
const _Hash_maxLen = 15
const _Hash_text = "aboutputargetbodyaccept-charsetdatalistarticlearbackgroundet" +
"erminatemplatembedatatypeofaceaddressamp-boilerplatealinkbdi" +
"reversedefaultMutedefaultSelectedeferowspanamedialogasyncanv" +
"asideclarelabelongdescrollingaudiobjectfooterubyautofocusema" +
"paramainertcitemscopedelautoplayaxismapauseonexitimenubgcolo" +
"rulesectionoresizeblockquotebuttonoscriptitleclassidetailsel" +
"ectedfnoshadefaultCheckedisabledivaluetypecodebaseamlesshape" +
"codetypemustmatcheckedlcolgroupicturesourcecolspanowraplaint" +
"extareadonlycompactrackeygenctypecontrolslotranslatefieldset" +
"figcaptionfigurequiredtruespeedformactionformnovalidateh2h3h" +
"4h5h6hgrouposterhiddenabledhtmlhttp-equivaricontentiframebor" +
"derimginlistronginputinsmallowfullscreenmanifestylegendmarkm" +
"atheadermaxlength1metabindexmlnsortablemetermethodmultiplemu" +
"tedoptionpropertysubdopenavalignohreflanguagesummarysuprogre" +
"ssrcsetsvgvideoptgrouprefixmprofilevisiblevlinkvocabbrwbr"
const _Hash_maxLen = 9
const _Hash_text = "iframemathscriptitlestylesvgxmplaintextarea"
var _Hash_table = [1 << 9]Hash{
0x0: 0x1df08, // textarea
0x4: 0x32d02, // hr
0x8: 0x1c207, // picture
0xb: 0x18905, // value
0xf: 0x2e408, // tabindex
0x12: 0x15905, // class
0x15: 0x37e03, // wbr
0x18: 0x1a80d, // typemustmatch
0x1a: 0x1b902, // dl
0x1d: 0xf903, // rtc
0x1e: 0x25702, // h4
0x22: 0x2ef08, // sortable
0x24: 0x4208, // template
0x25: 0x28c0b, // frameborder
0x28: 0x37a04, // abbr
0x29: 0x28b06, // iframe
0x2a: 0x610f, // amp-boilerplate
0x2c: 0x1e408, // readonly
0x30: 0x23f06, // action
0x33: 0x28c05, // frame
0x35: 0x12c05, // rules
0x36: 0x30208, // multiple
0x38: 0x31f03, // bdo
0x39: 0x1d506, // nowrap
0x3e: 0x21408, // fieldset
0x3f: 0x7503, // bdi
0x46: 0x7f0c, // defaultMuted
0x49: 0x35205, // video
0x4c: 0x19808, // seamless
0x4d: 0x13608, // noresize
0x4f: 0xb602, // id
0x51: 0x25d06, // hgroup
0x52: 0x23102, // dt
0x55: 0x12805, // color
0x56: 0x34003, // sup
0x59: 0x370d, // undeterminate
0x5a: 0x35608, // optgroup
0x5b: 0x2d206, // header
0x5c: 0xb405, // aside
0x5f: 0x10005, // scope
0x60: 0x101, // b
0x61: 0xcb02, // ol
0x64: 0x32b06, // nohref
0x65: 0x1da09, // plaintext
0x66: 0x20804, // slot
0x67: 0x11004, // axis
0x68: 0x12803, // col
0x69: 0x32606, // valign
0x6c: 0x2d105, // thead
0x70: 0x34906, // srcset
0x71: 0x26806, // hidden
0x76: 0x1bb08, // colgroup
0x78: 0x34f03, // svg
0x7b: 0x2cb04, // mark
0x7e: 0x33104, // lang
0x81: 0x1cf04, // cols
0x86: 0x5a07, // address
0x8b: 0xf404, // main
0x8c: 0x4302, // em
0x8f: 0x32d08, // hreflang
0x93: 0x1b307, // checked
0x94: 0x25902, // h5
0x95: 0x301, // u
0x96: 0x32705, // align
0x97: 0x14301, // q
0x99: 0xd506, // object
0x9b: 0x28407, // content
0x9d: 0xc809, // scrolling
0x9f: 0x36407, // profile
0xa0: 0x34903, // src
0xa1: 0xda05, // tfoot
0xa3: 0x2f705, // meter
0xa4: 0x37705, // vocab
0xa6: 0xd04, // body
0xa8: 0x19204, // code
0xac: 0x20108, // controls
0xb0: 0x2ab05, // small
0xb1: 0x18008, // disabled
0xb5: 0x5604, // face
0xb6: 0x501, // p
0xb9: 0x2302, // li
0xbb: 0xe409, // autofocus
0xbf: 0x27304, // html
0xc2: 0x4d08, // datatype
0xc6: 0x35d06, // prefix
0xcb: 0x35d03, // pre
0xcc: 0x1106, // accept
0xd1: 0x23b03, // for
0xd5: 0x29e06, // strong
0xd6: 0x9c07, // rowspan
0xd7: 0x25502, // h3
0xd8: 0x2cf04, // math
0xde: 0x16e07, // noshade
0xdf: 0x19f05, // shape
0xe1: 0x10006, // scoped
0xe3: 0x706, // target
0xe6: 0x21c0a, // figcaption
0xe9: 0x1df04, // text
0xea: 0x1c708, // resource
0xec: 0xee03, // map
0xf0: 0x29a06, // inlist
0xf1: 0x16506, // select
0xf2: 0x1f606, // keygen
0xf3: 0x5106, // typeof
0xf6: 0xb006, // canvas
0xf7: 0x30f06, // option
0xf8: 0xbe05, // label
0xf9: 0xbc03, // rel
0xfb: 0x1f04, // data
0xfd: 0x6004, // samp
0x100: 0x110e, // accept-charset
0x101: 0xeb06, // usemap
0x103: 0x2bc08, // manifest
0x109: 0xa204, // name
0x10a: 0x14806, // button
0x10b: 0x2b05, // clear
0x10e: 0x33907, // summary
0x10f: 0x2e204, // meta
0x110: 0x33108, // language
0x112: 0x300a, // background
0x113: 0x2707, // article
0x116: 0x23b0a, // formaction
0x119: 0x1, // a
0x11b: 0x5, // about
0x11c: 0xfc09, // itemscope
0x11e: 0x14d08, // noscript
0x11f: 0x15907, // classid
0x120: 0x36203, // xmp
0x121: 0x19604, // base
0x123: 0x1c01, // s
0x124: 0x36b07, // visible
0x126: 0x37b02, // bb
0x127: 0x9c04, // rows
0x12d: 0x2450e, // formnovalidate
0x131: 0x1f205, // track
0x135: 0x18703, // div
0x136: 0xac05, // async
0x137: 0x31508, // property
0x13a: 0x16c03, // dfn
0x13e: 0xf605, // inert
0x142: 0x10503, // del
0x144: 0x25302, // h2
0x147: 0x2c205, // style
0x149: 0x29703, // img
0x14a: 0xc05, // tbody
0x14b: 0x7603, // dir
0x14c: 0x2eb05, // xmlns
0x14e: 0x1f08, // datalist
0x14f: 0x32d04, // href
0x150: 0x1f202, // tr
0x151: 0x13e0a, // blockquote
0x152: 0x18909, // valuetype
0x155: 0xdb06, // footer
0x157: 0x14f06, // script
0x158: 0x1cf07, // colspan
0x15d: 0x1730e, // defaultChecked
0x15f: 0x2490a, // novalidate
0x164: 0x1a408, // codetype
0x165: 0x2c506, // legend
0x16b: 0x1160b, // pauseonexit
0x16c: 0x21f07, // caption
0x16f: 0x26c07, // enabled
0x173: 0x26206, // poster
0x175: 0x30a05, // muted
0x176: 0x11205, // ismap
0x178: 0x2a903, // ins
0x17a: 0xe004, // ruby
0x17b: 0x37c02, // br
0x17c: 0x8a0f, // defaultSelected
0x17d: 0x7403, // kbd
0x17f: 0x1c906, // source
0x182: 0x9f04, // span
0x184: 0x2d803, // max
0x18a: 0x5b02, // dd
0x18b: 0x13a04, // size
0x18c: 0xa405, // media
0x18d: 0x19208, // codebase
0x18f: 0x4905, // embed
0x192: 0x5104, // type
0x193: 0xf005, // param
0x194: 0x25b02, // h6
0x197: 0x28304, // icon
0x198: 0x12607, // bgcolor
0x199: 0x2ad0f, // allowfullscreen
0x19a: 0x12004, // time
0x19b: 0x7803, // rev
0x19d: 0x34208, // progress
0x19e: 0x22606, // figure
0x1a0: 0x6a02, // rp
0x1a2: 0xa606, // dialog
0x1a4: 0x2802, // rt
0x1a7: 0x1e304, // area
0x1a8: 0x7808, // reversed
0x1aa: 0x32104, // open
0x1ac: 0x2d204, // head
0x1ad: 0x7005, // alink
0x1af: 0x28003, // var
0x1b0: 0x15f07, // details
0x1b1: 0x2401, // i
0x1b3: 0x1e02, // td
0x1b4: 0xb707, // declare
0x1b5: 0x8302, // ul
0x1ba: 0x2fc06, // method
0x1bd: 0x13007, // section
0x1be: 0x22a08, // required
0x1c2: 0x9805, // defer
0x1c3: 0x37205, // vlink
0x1c4: 0x15405, // title
0x1c5: 0x2770a, // http-equiv
0x1c6: 0x1fa07, // enctype
0x1c7: 0x1ec07, // compact
0x1c8: 0x2d809, // maxlength
0x1c9: 0x16508, // selected
0x1cc: 0xd105, // audio
0x1cd: 0xc208, // longdesc
0x1d1: 0xfb04, // cite
0x1da: 0x2505, // start
0x1de: 0x2d102, // th
0x1df: 0x10808, // autoplay
0x1e2: 0x7104, // link
0x1e3: 0x206, // output
0x1e5: 0x12204, // menu
0x1e6: 0x2a405, // input
0x1eb: 0x32403, // nav
0x1ec: 0x31d03, // sub
0x1ee: 0x1807, // charset
0x1ef: 0x7f07, // default
0x1f3: 0x2f205, // table
0x1f4: 0x23b04, // form
0x1f5: 0x23209, // truespeed
0x1f6: 0x2f02, // rb
0x1fb: 0x20b09, // translate
0x1fd: 0x2e002, // h1
var _Hash_table = [1 << 4]Hash{
0x0: 0x2308, // textarea
0x2: 0x6, // iframe
0x4: 0xf05, // title
0x5: 0x1e09, // plaintext
0x7: 0x1405, // style
0x8: 0x604, // math
0x9: 0xa06, // script
0xa: 0x1903, // svg
0xb: 0x1c03, // xmp
}

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@ func ParseFloat(b []byte) (float64, int) {
neg = b[i] == '-'
i++
}
start := i
dot := -1
trunk := -1
n := uint64(0)
@ -40,6 +40,9 @@ func ParseFloat(b []byte) (float64, int) {
break
}
}
if i == start || i == start+1 && dot == start {
return 0.0, 0
}
f := float64(n)
if neg {
@ -57,10 +60,13 @@ func ParseFloat(b []byte) (float64, int) {
}
expExp := int64(0)
if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
startExp := i
i++
if e, expLen := ParseInt(b[i:]); expLen > 0 {
expExp = e
i += expLen
} else {
i = startExp
}
}
exp := expExp - mantExp

View File

@ -13,6 +13,7 @@ func ParseInt(b []byte) (int64, int) {
neg = b[0] == '-'
i++
}
start := i
n := uint64(0)
for i < len(b) {
c := b[i]
@ -26,6 +27,9 @@ func ParseInt(b []byte) (int64, int) {
}
i++
}
if i == start {
return 0, 0
}
if !neg && n > uint64(math.MaxInt64) || n > uint64(math.MaxInt64)+1 {
return 0, 0
} else if neg {

View File

@ -97,6 +97,12 @@ func IoctlSetRTCTime(fd int, value *RTCTime) error {
return err
}
func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
return err
}
func IoctlGetUint32(fd int, req uint) (uint32, error) {
var value uint32
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
@ -109,6 +115,12 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) {
return &value, err
}
func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
var value RTCWkAlrm
err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value)))
return &value, err
}
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
func Link(oldpath string, newpath string) (err error) {

View File

@ -160,78 +160,28 @@ const (
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_ALU = 0x4
BPF_ALU64 = 0x7
BPF_AND = 0x50
BPF_ANY = 0x0
BPF_ARSH = 0xc0
BPF_B = 0x10
BPF_BUILD_ID_SIZE = 0x14
BPF_CALL = 0x80
BPF_DEVCG_ACC_MKNOD = 0x1
BPF_DEVCG_ACC_READ = 0x2
BPF_DEVCG_ACC_WRITE = 0x4
BPF_DEVCG_DEV_BLOCK = 0x1
BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
BPF_DW = 0x18
BPF_END = 0xd0
BPF_EXIST = 0x2
BPF_EXIT = 0x90
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
BPF_FROM_BE = 0x8
BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
BPF_F_ALLOW_MULTI = 0x2
BPF_F_ALLOW_OVERRIDE = 0x1
BPF_F_ANY_ALIGNMENT = 0x2
BPF_F_CLONE = 0x200
BPF_F_CTXLEN_MASK = 0xfffff00000000
BPF_F_CURRENT_CPU = 0xffffffff
BPF_F_CURRENT_NETNS = -0x1
BPF_F_DONT_FRAGMENT = 0x4
BPF_F_FAST_STACK_CMP = 0x200
BPF_F_HDR_FIELD_MASK = 0xf
BPF_F_INDEX_MASK = 0xffffffff
BPF_F_INGRESS = 0x1
BPF_F_INVALIDATE_HASH = 0x2
BPF_F_LOCK = 0x4
BPF_F_MARK_ENFORCE = 0x40
BPF_F_MARK_MANGLED_0 = 0x20
BPF_F_MMAPABLE = 0x400
BPF_F_NO_COMMON_LRU = 0x2
BPF_F_NO_PREALLOC = 0x1
BPF_F_NUMA_NODE = 0x4
BPF_F_PSEUDO_HDR = 0x10
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_RDONLY = 0x8
BPF_F_RDONLY_PROG = 0x80
BPF_F_RECOMPUTE_CSUM = 0x1
BPF_F_REPLACE = 0x4
BPF_F_REUSE_STACKID = 0x400
BPF_F_SEQ_NUMBER = 0x8
BPF_F_SKIP_FIELD_MASK = 0xff
BPF_F_STACK_BUILD_ID = 0x20
BPF_F_STRICT_ALIGNMENT = 0x1
BPF_F_SYSCTL_BASE_NAME = 0x1
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_STATE_FREQ = 0x8
BPF_F_TUNINFO_IPV6 = 0x1
BPF_F_USER_BUILD_ID = 0x800
BPF_F_USER_STACK = 0x100
BPF_F_WRONLY = 0x10
BPF_F_WRONLY_PROG = 0x100
BPF_F_ZERO_CSUM_TX = 0x2
BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@ -267,7 +217,6 @@ const (
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
BPF_NOEXIST = 0x1
BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
BPF_PSEUDO_CALL = 0x1
@ -275,12 +224,6 @@ const (
BPF_PSEUDO_MAP_VALUE = 0x2
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_SK_STORAGE_GET_F_CREATE = 0x1
BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf
BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
BPF_SOCK_OPS_RTT_CB_FLAG = 0x8
BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
@ -378,12 +321,14 @@ const (
CLOCK_TXINT = 0x3
CLONE_ARGS_SIZE_VER0 = 0x40
CLONE_ARGS_SIZE_VER1 = 0x50
CLONE_ARGS_SIZE_VER2 = 0x58
CLONE_CHILD_CLEARTID = 0x200000
CLONE_CHILD_SETTID = 0x1000000
CLONE_CLEAR_SIGHAND = 0x100000000
CLONE_DETACHED = 0x400000
CLONE_FILES = 0x400
CLONE_FS = 0x200
CLONE_INTO_CGROUP = 0x200000000
CLONE_IO = 0x80000000
CLONE_NEWCGROUP = 0x2000000
CLONE_NEWIPC = 0x8000000
@ -598,7 +543,9 @@ const (
FAN_DELETE = 0x200
FAN_DELETE_SELF = 0x400
FAN_DENY = 0x2
FAN_DIR_MODIFY = 0x80000
FAN_ENABLE_AUDIT = 0x40
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_FID = 0x1
FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000
@ -2108,8 +2055,6 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
TCP_BPF_IW = 0x3e9
TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
@ -2384,8 +2329,9 @@ const (
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
XDP_FLAGS_MASK = 0xf
XDP_FLAGS_MASK = 0x1f
XDP_FLAGS_MODES = 0xe
XDP_FLAGS_REPLACE = 0x10
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1

View File

@ -75,6 +75,7 @@ const (
FP_XSTATE_MAGIC2 = 0x46505845
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80046601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613

View File

@ -75,6 +75,7 @@ const (
FP_XSTATE_MAGIC2 = 0x46505845
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80046601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613

View File

@ -77,6 +77,7 @@ const (
FPSIMD_MAGIC = 0x46508001
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40046601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40046601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x800000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x800000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613

View File

@ -74,6 +74,7 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613

View File

@ -78,6 +78,7 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613

View File

@ -1871,175 +1871,249 @@ const (
)
const (
BPF_REG_0 = 0x0
BPF_REG_1 = 0x1
BPF_REG_2 = 0x2
BPF_REG_3 = 0x3
BPF_REG_4 = 0x4
BPF_REG_5 = 0x5
BPF_REG_6 = 0x6
BPF_REG_7 = 0x7
BPF_REG_8 = 0x8
BPF_REG_9 = 0x9
BPF_REG_10 = 0xa
BPF_MAP_CREATE = 0x0
BPF_MAP_LOOKUP_ELEM = 0x1
BPF_MAP_UPDATE_ELEM = 0x2
BPF_MAP_DELETE_ELEM = 0x3
BPF_MAP_GET_NEXT_KEY = 0x4
BPF_PROG_LOAD = 0x5
BPF_OBJ_PIN = 0x6
BPF_OBJ_GET = 0x7
BPF_PROG_ATTACH = 0x8
BPF_PROG_DETACH = 0x9
BPF_PROG_TEST_RUN = 0xa
BPF_PROG_GET_NEXT_ID = 0xb
BPF_MAP_GET_NEXT_ID = 0xc
BPF_PROG_GET_FD_BY_ID = 0xd
BPF_MAP_GET_FD_BY_ID = 0xe
BPF_OBJ_GET_INFO_BY_FD = 0xf
BPF_PROG_QUERY = 0x10
BPF_RAW_TRACEPOINT_OPEN = 0x11
BPF_BTF_LOAD = 0x12
BPF_BTF_GET_FD_BY_ID = 0x13
BPF_TASK_FD_QUERY = 0x14
BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
BPF_MAP_FREEZE = 0x16
BPF_BTF_GET_NEXT_ID = 0x17
BPF_MAP_TYPE_UNSPEC = 0x0
BPF_MAP_TYPE_HASH = 0x1
BPF_MAP_TYPE_ARRAY = 0x2
BPF_MAP_TYPE_PROG_ARRAY = 0x3
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
BPF_MAP_TYPE_PERCPU_HASH = 0x5
BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
BPF_MAP_TYPE_STACK_TRACE = 0x7
BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
BPF_MAP_TYPE_LRU_HASH = 0x9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
BPF_MAP_TYPE_LPM_TRIE = 0xb
BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
BPF_MAP_TYPE_DEVMAP = 0xe
BPF_MAP_TYPE_SOCKMAP = 0xf
BPF_MAP_TYPE_CPUMAP = 0x10
BPF_MAP_TYPE_XSKMAP = 0x11
BPF_MAP_TYPE_SOCKHASH = 0x12
BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
BPF_MAP_TYPE_QUEUE = 0x16
BPF_MAP_TYPE_STACK = 0x17
BPF_MAP_TYPE_SK_STORAGE = 0x18
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
BPF_PROG_TYPE_UNSPEC = 0x0
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
BPF_PROG_TYPE_KPROBE = 0x2
BPF_PROG_TYPE_SCHED_CLS = 0x3
BPF_PROG_TYPE_SCHED_ACT = 0x4
BPF_PROG_TYPE_TRACEPOINT = 0x5
BPF_PROG_TYPE_XDP = 0x6
BPF_PROG_TYPE_PERF_EVENT = 0x7
BPF_PROG_TYPE_CGROUP_SKB = 0x8
BPF_PROG_TYPE_CGROUP_SOCK = 0x9
BPF_PROG_TYPE_LWT_IN = 0xa
BPF_PROG_TYPE_LWT_OUT = 0xb
BPF_PROG_TYPE_LWT_XMIT = 0xc
BPF_PROG_TYPE_SOCK_OPS = 0xd
BPF_PROG_TYPE_SK_SKB = 0xe
BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
BPF_PROG_TYPE_SK_MSG = 0x10
BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
BPF_PROG_TYPE_LIRC_MODE2 = 0x14
BPF_PROG_TYPE_SK_REUSEPORT = 0x15
BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18
BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19
BPF_PROG_TYPE_TRACING = 0x1a
BPF_CGROUP_INET_INGRESS = 0x0
BPF_CGROUP_INET_EGRESS = 0x1
BPF_CGROUP_INET_SOCK_CREATE = 0x2
BPF_CGROUP_SOCK_OPS = 0x3
BPF_SK_SKB_STREAM_PARSER = 0x4
BPF_SK_SKB_STREAM_VERDICT = 0x5
BPF_CGROUP_DEVICE = 0x6
BPF_SK_MSG_VERDICT = 0x7
BPF_CGROUP_INET4_BIND = 0x8
BPF_CGROUP_INET6_BIND = 0x9
BPF_CGROUP_INET4_CONNECT = 0xa
BPF_CGROUP_INET6_CONNECT = 0xb
BPF_CGROUP_INET4_POST_BIND = 0xc
BPF_CGROUP_INET6_POST_BIND = 0xd
BPF_CGROUP_UDP4_SENDMSG = 0xe
BPF_CGROUP_UDP6_SENDMSG = 0xf
BPF_LIRC_MODE2 = 0x10
BPF_FLOW_DISSECTOR = 0x11
BPF_CGROUP_SYSCTL = 0x12
BPF_CGROUP_UDP4_RECVMSG = 0x13
BPF_CGROUP_UDP6_RECVMSG = 0x14
BPF_CGROUP_GETSOCKOPT = 0x15
BPF_CGROUP_SETSOCKOPT = 0x16
BPF_TRACE_RAW_TP = 0x17
BPF_TRACE_FENTRY = 0x18
BPF_TRACE_FEXIT = 0x19
BPF_STACK_BUILD_ID_EMPTY = 0x0
BPF_STACK_BUILD_ID_VALID = 0x1
BPF_STACK_BUILD_ID_IP = 0x2
BPF_ADJ_ROOM_NET = 0x0
BPF_ADJ_ROOM_MAC = 0x1
BPF_HDR_START_MAC = 0x0
BPF_HDR_START_NET = 0x1
BPF_LWT_ENCAP_SEG6 = 0x0
BPF_LWT_ENCAP_SEG6_INLINE = 0x1
BPF_LWT_ENCAP_IP = 0x2
BPF_OK = 0x0
BPF_DROP = 0x2
BPF_REDIRECT = 0x7
BPF_LWT_REROUTE = 0x80
BPF_SOCK_OPS_VOID = 0x0
BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
BPF_SOCK_OPS_RWND_INIT = 0x2
BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
BPF_SOCK_OPS_NEEDS_ECN = 0x6
BPF_SOCK_OPS_BASE_RTT = 0x7
BPF_SOCK_OPS_RTO_CB = 0x8
BPF_SOCK_OPS_RETRANS_CB = 0x9
BPF_SOCK_OPS_STATE_CB = 0xa
BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
BPF_SOCK_OPS_RTT_CB = 0xc
BPF_TCP_ESTABLISHED = 0x1
BPF_TCP_SYN_SENT = 0x2
BPF_TCP_SYN_RECV = 0x3
BPF_TCP_FIN_WAIT1 = 0x4
BPF_TCP_FIN_WAIT2 = 0x5
BPF_TCP_TIME_WAIT = 0x6
BPF_TCP_CLOSE = 0x7
BPF_TCP_CLOSE_WAIT = 0x8
BPF_TCP_LAST_ACK = 0x9
BPF_TCP_LISTEN = 0xa
BPF_TCP_CLOSING = 0xb
BPF_TCP_NEW_SYN_RECV = 0xc
BPF_TCP_MAX_STATES = 0xd
BPF_FIB_LKUP_RET_SUCCESS = 0x0
BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
BPF_FIB_LKUP_RET_PROHIBIT = 0x3
BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
BPF_FD_TYPE_TRACEPOINT = 0x1
BPF_FD_TYPE_KPROBE = 0x2
BPF_FD_TYPE_KRETPROBE = 0x3
BPF_FD_TYPE_UPROBE = 0x4
BPF_FD_TYPE_URETPROBE = 0x5
BPF_REG_0 = 0x0
BPF_REG_1 = 0x1
BPF_REG_2 = 0x2
BPF_REG_3 = 0x3
BPF_REG_4 = 0x4
BPF_REG_5 = 0x5
BPF_REG_6 = 0x6
BPF_REG_7 = 0x7
BPF_REG_8 = 0x8
BPF_REG_9 = 0x9
BPF_REG_10 = 0xa
BPF_MAP_CREATE = 0x0
BPF_MAP_LOOKUP_ELEM = 0x1
BPF_MAP_UPDATE_ELEM = 0x2
BPF_MAP_DELETE_ELEM = 0x3
BPF_MAP_GET_NEXT_KEY = 0x4
BPF_PROG_LOAD = 0x5
BPF_OBJ_PIN = 0x6
BPF_OBJ_GET = 0x7
BPF_PROG_ATTACH = 0x8
BPF_PROG_DETACH = 0x9
BPF_PROG_TEST_RUN = 0xa
BPF_PROG_GET_NEXT_ID = 0xb
BPF_MAP_GET_NEXT_ID = 0xc
BPF_PROG_GET_FD_BY_ID = 0xd
BPF_MAP_GET_FD_BY_ID = 0xe
BPF_OBJ_GET_INFO_BY_FD = 0xf
BPF_PROG_QUERY = 0x10
BPF_RAW_TRACEPOINT_OPEN = 0x11
BPF_BTF_LOAD = 0x12
BPF_BTF_GET_FD_BY_ID = 0x13
BPF_TASK_FD_QUERY = 0x14
BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
BPF_MAP_FREEZE = 0x16
BPF_BTF_GET_NEXT_ID = 0x17
BPF_MAP_LOOKUP_BATCH = 0x18
BPF_MAP_LOOKUP_AND_DELETE_BATCH = 0x19
BPF_MAP_UPDATE_BATCH = 0x1a
BPF_MAP_DELETE_BATCH = 0x1b
BPF_LINK_CREATE = 0x1c
BPF_LINK_UPDATE = 0x1d
BPF_MAP_TYPE_UNSPEC = 0x0
BPF_MAP_TYPE_HASH = 0x1
BPF_MAP_TYPE_ARRAY = 0x2
BPF_MAP_TYPE_PROG_ARRAY = 0x3
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
BPF_MAP_TYPE_PERCPU_HASH = 0x5
BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
BPF_MAP_TYPE_STACK_TRACE = 0x7
BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
BPF_MAP_TYPE_LRU_HASH = 0x9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
BPF_MAP_TYPE_LPM_TRIE = 0xb
BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
BPF_MAP_TYPE_DEVMAP = 0xe
BPF_MAP_TYPE_SOCKMAP = 0xf
BPF_MAP_TYPE_CPUMAP = 0x10
BPF_MAP_TYPE_XSKMAP = 0x11
BPF_MAP_TYPE_SOCKHASH = 0x12
BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
BPF_MAP_TYPE_QUEUE = 0x16
BPF_MAP_TYPE_STACK = 0x17
BPF_MAP_TYPE_SK_STORAGE = 0x18
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
BPF_MAP_TYPE_STRUCT_OPS = 0x1a
BPF_PROG_TYPE_UNSPEC = 0x0
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
BPF_PROG_TYPE_KPROBE = 0x2
BPF_PROG_TYPE_SCHED_CLS = 0x3
BPF_PROG_TYPE_SCHED_ACT = 0x4
BPF_PROG_TYPE_TRACEPOINT = 0x5
BPF_PROG_TYPE_XDP = 0x6
BPF_PROG_TYPE_PERF_EVENT = 0x7
BPF_PROG_TYPE_CGROUP_SKB = 0x8
BPF_PROG_TYPE_CGROUP_SOCK = 0x9
BPF_PROG_TYPE_LWT_IN = 0xa
BPF_PROG_TYPE_LWT_OUT = 0xb
BPF_PROG_TYPE_LWT_XMIT = 0xc
BPF_PROG_TYPE_SOCK_OPS = 0xd
BPF_PROG_TYPE_SK_SKB = 0xe
BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
BPF_PROG_TYPE_SK_MSG = 0x10
BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
BPF_PROG_TYPE_LIRC_MODE2 = 0x14
BPF_PROG_TYPE_SK_REUSEPORT = 0x15
BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18
BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19
BPF_PROG_TYPE_TRACING = 0x1a
BPF_PROG_TYPE_STRUCT_OPS = 0x1b
BPF_PROG_TYPE_EXT = 0x1c
BPF_PROG_TYPE_LSM = 0x1d
BPF_CGROUP_INET_INGRESS = 0x0
BPF_CGROUP_INET_EGRESS = 0x1
BPF_CGROUP_INET_SOCK_CREATE = 0x2
BPF_CGROUP_SOCK_OPS = 0x3
BPF_SK_SKB_STREAM_PARSER = 0x4
BPF_SK_SKB_STREAM_VERDICT = 0x5
BPF_CGROUP_DEVICE = 0x6
BPF_SK_MSG_VERDICT = 0x7
BPF_CGROUP_INET4_BIND = 0x8
BPF_CGROUP_INET6_BIND = 0x9
BPF_CGROUP_INET4_CONNECT = 0xa
BPF_CGROUP_INET6_CONNECT = 0xb
BPF_CGROUP_INET4_POST_BIND = 0xc
BPF_CGROUP_INET6_POST_BIND = 0xd
BPF_CGROUP_UDP4_SENDMSG = 0xe
BPF_CGROUP_UDP6_SENDMSG = 0xf
BPF_LIRC_MODE2 = 0x10
BPF_FLOW_DISSECTOR = 0x11
BPF_CGROUP_SYSCTL = 0x12
BPF_CGROUP_UDP4_RECVMSG = 0x13
BPF_CGROUP_UDP6_RECVMSG = 0x14
BPF_CGROUP_GETSOCKOPT = 0x15
BPF_CGROUP_SETSOCKOPT = 0x16
BPF_TRACE_RAW_TP = 0x17
BPF_TRACE_FENTRY = 0x18
BPF_TRACE_FEXIT = 0x19
BPF_MODIFY_RETURN = 0x1a
BPF_LSM_MAC = 0x1b
BPF_ANY = 0x0
BPF_NOEXIST = 0x1
BPF_EXIST = 0x2
BPF_F_LOCK = 0x4
BPF_F_NO_PREALLOC = 0x1
BPF_F_NO_COMMON_LRU = 0x2
BPF_F_NUMA_NODE = 0x4
BPF_F_RDONLY = 0x8
BPF_F_WRONLY = 0x10
BPF_F_STACK_BUILD_ID = 0x20
BPF_F_ZERO_SEED = 0x40
BPF_F_RDONLY_PROG = 0x80
BPF_F_WRONLY_PROG = 0x100
BPF_F_CLONE = 0x200
BPF_F_MMAPABLE = 0x400
BPF_STACK_BUILD_ID_EMPTY = 0x0
BPF_STACK_BUILD_ID_VALID = 0x1
BPF_STACK_BUILD_ID_IP = 0x2
BPF_F_RECOMPUTE_CSUM = 0x1
BPF_F_INVALIDATE_HASH = 0x2
BPF_F_HDR_FIELD_MASK = 0xf
BPF_F_PSEUDO_HDR = 0x10
BPF_F_MARK_MANGLED_0 = 0x20
BPF_F_MARK_ENFORCE = 0x40
BPF_F_INGRESS = 0x1
BPF_F_TUNINFO_IPV6 = 0x1
BPF_F_SKIP_FIELD_MASK = 0xff
BPF_F_USER_STACK = 0x100
BPF_F_FAST_STACK_CMP = 0x200
BPF_F_REUSE_STACKID = 0x400
BPF_F_USER_BUILD_ID = 0x800
BPF_F_ZERO_CSUM_TX = 0x2
BPF_F_DONT_FRAGMENT = 0x4
BPF_F_SEQ_NUMBER = 0x8
BPF_F_INDEX_MASK = 0xffffffff
BPF_F_CURRENT_CPU = 0xffffffff
BPF_F_CTXLEN_MASK = 0xfffff00000000
BPF_F_CURRENT_NETNS = -0x1
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_F_SYSCTL_BASE_NAME = 0x1
BPF_SK_STORAGE_GET_F_CREATE = 0x1
BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1
BPF_ADJ_ROOM_NET = 0x0
BPF_ADJ_ROOM_MAC = 0x1
BPF_HDR_START_MAC = 0x0
BPF_HDR_START_NET = 0x1
BPF_LWT_ENCAP_SEG6 = 0x0
BPF_LWT_ENCAP_SEG6_INLINE = 0x1
BPF_LWT_ENCAP_IP = 0x2
BPF_OK = 0x0
BPF_DROP = 0x2
BPF_REDIRECT = 0x7
BPF_LWT_REROUTE = 0x80
BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_SOCK_OPS_RTT_CB_FLAG = 0x8
BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf
BPF_SOCK_OPS_VOID = 0x0
BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
BPF_SOCK_OPS_RWND_INIT = 0x2
BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
BPF_SOCK_OPS_NEEDS_ECN = 0x6
BPF_SOCK_OPS_BASE_RTT = 0x7
BPF_SOCK_OPS_RTO_CB = 0x8
BPF_SOCK_OPS_RETRANS_CB = 0x9
BPF_SOCK_OPS_STATE_CB = 0xa
BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
BPF_SOCK_OPS_RTT_CB = 0xc
BPF_TCP_ESTABLISHED = 0x1
BPF_TCP_SYN_SENT = 0x2
BPF_TCP_SYN_RECV = 0x3
BPF_TCP_FIN_WAIT1 = 0x4
BPF_TCP_FIN_WAIT2 = 0x5
BPF_TCP_TIME_WAIT = 0x6
BPF_TCP_CLOSE = 0x7
BPF_TCP_CLOSE_WAIT = 0x8
BPF_TCP_LAST_ACK = 0x9
BPF_TCP_LISTEN = 0xa
BPF_TCP_CLOSING = 0xb
BPF_TCP_NEW_SYN_RECV = 0xc
BPF_TCP_MAX_STATES = 0xd
TCP_BPF_IW = 0x3e9
TCP_BPF_SNDCWND_CLAMP = 0x3ea
BPF_DEVCG_ACC_MKNOD = 0x1
BPF_DEVCG_ACC_READ = 0x2
BPF_DEVCG_ACC_WRITE = 0x4
BPF_DEVCG_DEV_BLOCK = 0x1
BPF_DEVCG_DEV_CHAR = 0x2
BPF_FIB_LOOKUP_DIRECT = 0x1
BPF_FIB_LOOKUP_OUTPUT = 0x2
BPF_FIB_LKUP_RET_SUCCESS = 0x0
BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
BPF_FIB_LKUP_RET_PROHIBIT = 0x3
BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
BPF_FD_TYPE_TRACEPOINT = 0x1
BPF_FD_TYPE_KPROBE = 0x2
BPF_FD_TYPE_KRETPROBE = 0x3
BPF_FD_TYPE_UPROBE = 0x4
BPF_FD_TYPE_URETPROBE = 0x5
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
)
const (
@ -2205,7 +2279,7 @@ const (
DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20
DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21
DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22
DEVLINK_CMD_MAX = 0x44
DEVLINK_CMD_MAX = 0x48
DEVLINK_PORT_TYPE_NOTSET = 0x0
DEVLINK_PORT_TYPE_AUTO = 0x1
DEVLINK_PORT_TYPE_ETH = 0x2
@ -2285,7 +2359,7 @@ const (
DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c
DEVLINK_ATTR_PAD = 0x3d
DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e
DEVLINK_ATTR_MAX = 0x8c
DEVLINK_ATTR_MAX = 0x90
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1
DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0

View File

@ -23,4 +23,9 @@ const (
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002
QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001
QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008
QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004
)

View File

@ -308,6 +308,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys GetProcessId(process Handle) (id uint32, err error)
//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error)
//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost
//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32)
//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error)
// Volume Management Functions
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW

View File

@ -217,6 +217,8 @@ var (
procGetProcessId = modkernel32.NewProc("GetProcessId")
procOpenThread = modkernel32.NewProc("OpenThread")
procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost")
procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx")
procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx")
procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW")
procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW")
procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW")
@ -2414,6 +2416,23 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
return
}
func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
return
}
func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {

View File

@ -54,6 +54,13 @@ type UnmarshalOptions struct {
// Unmarshal reads the given []byte and populates the given proto.Message using options in
// UnmarshalOptions object.
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
return o.unmarshal(b, m)
}
// unmarshal is a centralized function that all unmarshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for unmarshal that do not go through this.
func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
proto.Reset(m)
if o.Resolver == nil {

View File

@ -102,6 +102,13 @@ func (o MarshalOptions) Format(m proto.Message) string {
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(m)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {

View File

@ -52,7 +52,7 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
Minor = 23
Minor = 24
Patch = 0
PreRelease = ""
)

View File

@ -63,12 +63,15 @@ func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoifa
return o.unmarshal(in.Buf, in.Message)
}
// unmarshal is a centralized function that all unmarshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for unmarshal that do not go through this.
func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) {
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
if !o.Merge {
Reset(m.Interface()) // TODO
Reset(m.Interface())
}
allowPartial := o.AllowPartial
o.Merge = true
@ -105,7 +108,7 @@ func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) err
func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error {
md := m.Descriptor()
if messageset.IsMessageSet(md) {
return unmarshalMessageSet(b, m, o)
return o.unmarshalMessageSet(b, m)
}
fields := md.Fields()
for len(b) > 0 {

View File

@ -134,6 +134,9 @@ func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.Mar
return o.marshal(in.Buf, in.Message)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) {
allowPartial := o.AllowPartial
o.AllowPartial = true
@ -206,7 +209,7 @@ func growcap(oldcap, wantcap int) (newcap int) {
func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) {
if messageset.IsMessageSet(m.Descriptor()) {
return marshalMessageSet(b, m, o)
return o.marshalMessageSet(b, m)
}
// There are many choices for what order we visit fields in. The default one here
// is chosen for reasonable efficiency and simplicity given the protoreflect API.

View File

@ -13,24 +13,24 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
func sizeMessageSet(m protoreflect.Message) (size int) {
func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) {
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
size += messageset.SizeField(fd.Number())
size += protowire.SizeTag(messageset.FieldMessage)
size += protowire.SizeBytes(sizeMessage(v.Message()))
size += protowire.SizeBytes(o.size(v.Message()))
return true
})
size += messageset.SizeUnknown(m.GetUnknown())
return size
}
func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]byte, error) {
func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) {
if !flags.ProtoLegacy {
return b, errors.New("no support for message_set_wire_format")
}
var err error
o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
b, err = marshalMessageSetField(b, fd, v, o)
b, err = o.marshalMessageSetField(b, fd, v)
return err == nil
})
if err != nil {
@ -39,7 +39,7 @@ func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]by
return messageset.AppendUnknown(b, m.GetUnknown())
}
func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value, o MarshalOptions) ([]byte, error) {
func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
b = messageset.AppendFieldStart(b, fd.Number())
b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
@ -51,12 +51,12 @@ func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value pro
return b, nil
}
func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) error {
func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error {
if !flags.ProtoLegacy {
return errors.New("no support for message_set_wire_format")
}
return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error {
err := unmarshalMessageSetField(m, num, v, o)
err := o.unmarshalMessageSetField(m, num, v)
if err == errUnknown {
unknown := m.GetUnknown()
unknown = protowire.AppendTag(unknown, num, protowire.BytesType)
@ -68,7 +68,7 @@ func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) e
})
}
func unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte, o UnmarshalOptions) error {
func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error {
md := m.Descriptor()
if !md.ExtensionRanges().Has(num) {
return errUnknown

View File

@ -23,10 +23,13 @@ func (o MarshalOptions) Size(m Message) int {
return 0
}
return sizeMessage(m.ProtoReflect())
return o.size(m.ProtoReflect())
}
func sizeMessage(m protoreflect.Message) (size int) {
// size is a centralized function that all size operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for size that do not go through this.
func (o MarshalOptions) size(m protoreflect.Message) (size int) {
methods := protoMethods(m)
if methods != nil && methods.Size != nil {
out := methods.Size(protoiface.SizeInput{
@ -42,52 +45,52 @@ func sizeMessage(m protoreflect.Message) (size int) {
})
return len(out.Buf)
}
return sizeMessageSlow(m)
return o.sizeMessageSlow(m)
}
func sizeMessageSlow(m protoreflect.Message) (size int) {
func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) {
if messageset.IsMessageSet(m.Descriptor()) {
return sizeMessageSet(m)
return o.sizeMessageSet(m)
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
size += sizeField(fd, v)
size += o.sizeField(fd, v)
return true
})
size += len(m.GetUnknown())
return size
}
func sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
num := fd.Number()
switch {
case fd.IsList():
return sizeList(num, fd, value.List())
return o.sizeList(num, fd, value.List())
case fd.IsMap():
return sizeMap(num, fd, value.Map())
return o.sizeMap(num, fd, value.Map())
default:
return protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), value)
return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value)
}
}
func sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
if fd.IsPacked() && list.Len() > 0 {
content := 0
for i, llen := 0, list.Len(); i < llen; i++ {
content += sizeSingular(num, fd.Kind(), list.Get(i))
content += o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return protowire.SizeTag(num) + protowire.SizeBytes(content)
}
for i, llen := 0, list.Len(); i < llen; i++ {
size += protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), list.Get(i))
size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return size
}
func sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
size += protowire.SizeTag(num)
size += protowire.SizeBytes(sizeField(fd.MapKey(), key.Value()) + sizeField(fd.MapValue(), value))
size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value))
return true
})
return size

View File

@ -11,7 +11,7 @@ import (
"google.golang.org/protobuf/reflect/protoreflect"
)
func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
switch kind {
case protoreflect.BoolKind:
return protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
@ -46,9 +46,9 @@ func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.V
case protoreflect.BytesKind:
return protowire.SizeBytes(len(v.Bytes()))
case protoreflect.MessageKind:
return protowire.SizeBytes(sizeMessage(v.Message()))
return protowire.SizeBytes(o.size(v.Message()))
case protoreflect.GroupKind:
return protowire.SizeGroup(num, sizeMessage(v.Message()))
return protowire.SizeGroup(num, o.size(v.Message()))
default:
return 0
}

View File

@ -85,6 +85,8 @@ func ValueOf(v interface{}) Value {
return ValueOfEnum(v)
case Message, List, Map:
return valueOfIface(v)
case ProtoMessage:
panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v))
default:
panic(fmt.Sprintf("invalid type: %T", v))
}

View File

@ -96,6 +96,38 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
}
path := file.Path()
if prev := r.filesByPath[path]; prev != nil {
// TODO: Remove this after some soak-in period after moving these types.
var prevPath string
const prevModule = "google.golang.org/genproto"
const prevVersion = "cb27e3aa (May 26th, 2020)"
switch path {
case "google/protobuf/field_mask.proto":
prevPath = prevModule + "/protobuf/field_mask"
case "google/protobuf/api.proto":
prevPath = prevModule + "/protobuf/api"
case "google/protobuf/type.proto":
prevPath = prevModule + "/protobuf/ptype"
case "google/protobuf/source_context.proto":
prevPath = prevModule + "/protobuf/source_context"
}
if r == GlobalFiles && prevPath != "" {
pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto")
pkgName = strings.Replace(pkgName, "_", "", -1) + "pb"
currPath := "google.golang.org/protobuf/types/known/" + pkgName
panic(fmt.Sprintf(""+
"duplicate registration of %q\n"+
"\n"+
"The generated definition for this file has moved:\n"+
"\tfrom: %q\n"+
"\tto: %q\n"+
"A dependency on the %q module must\n"+
"be at version %v or higher.\n"+
"\n"+
"Upgrade the dependency by running:\n"+
"\tgo get -u %v\n",
path, prevPath, currPath, prevModule, prevVersion, prevPath))
}
err := errors.New("file %q is already registered", file.Path())
err = amendErrorWithCaller(err, prev, file)
if r == GlobalFiles && ignoreConflict(file, err) {

17
vendor/gopkg.in/ini.v1/struct.go generated vendored
View File

@ -278,7 +278,9 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo
return rawName, omitEmpty, allowShadow, allowNonUnique
}
func (s *Section) mapToField(val reflect.Value, isStrict bool) error {
// mapToField maps the given value to the matching field of the given section.
// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
@ -307,13 +309,16 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool) error {
}
if isAnonymous || isStruct || isStructPtr {
if sec, err := s.f.GetSection(fieldName); err == nil {
if secs, err := s.f.SectionsByName(fieldName); err == nil {
if len(secs) <= sectionIndex {
return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
}
// Only set the field to non-nil struct value if we have a section for it.
// Otherwise, we end up with a non-nil struct ptr even though there is no data.
if isStructPtr && field.IsNil() {
field.Set(reflect.New(tpField.Type.Elem()))
}
if err = sec.mapToField(field, isStrict); err != nil {
if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex); err != nil {
return fmt.Errorf("map to field %q: %v", fieldName, err)
}
continue
@ -350,9 +355,9 @@ func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (
}
typ := val.Type().Elem()
for _, sec := range secs {
for i, sec := range secs {
elem := reflect.New(typ)
if err = sec.mapToField(elem, isStrict); err != nil {
if err = sec.mapToField(elem, isStrict, i); err != nil {
return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
}
@ -382,7 +387,7 @@ func (s *Section) mapTo(v interface{}, isStrict bool) error {
return nil
}
return s.mapToField(val, isStrict)
return s.mapToField(val, isStrict, 0)
}
// MapTo maps section to given struct.

Some files were not shown because too many files have changed in this diff Show More