Persist, renew and delete sessions, refactor storage package, move reusable packages to pkg

This commit is contained in:
Ken-Håvard Lieng 2018-05-31 23:24:59 +02:00
parent 121582f72a
commit 24f9553aa5
48 changed files with 1872 additions and 1171 deletions

View file

@ -202,6 +202,7 @@ func (s *Scorch) introduceSegment(next *segmentIntroduction) error {
s.nextSnapshotEpoch++
rootPrev := s.root
s.root = newSnapshot
atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch)
// release lock
s.rootLock.Unlock()
@ -265,6 +266,7 @@ func (s *Scorch) introducePersist(persist *persistIntroduction) {
s.rootLock.Lock()
rootPrev := s.root
s.root = newIndexSnapshot
atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch)
s.rootLock.Unlock()
if rootPrev != nil {
@ -369,6 +371,7 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) {
s.nextSnapshotEpoch++
rootPrev := s.root
s.root = newSnapshot
atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch)
// release lock
s.rootLock.Unlock()
@ -430,6 +433,8 @@ func (s *Scorch) revertToSnapshot(revertTo *snapshotReversion) error {
// swap in new snapshot
rootPrev := s.root
s.root = newSnapshot
atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch)
// release lock
s.rootLock.Unlock()

View file

@ -72,6 +72,8 @@ OUTER:
}
lastEpochMergePlanned = ourSnapshot.epoch
atomic.StoreUint64(&s.stats.LastMergedEpoch, ourSnapshot.epoch)
s.fireEvent(EventKindMergerProgress, time.Since(startTime))
}
_ = ourSnapshot.DecRef()

View file

@ -109,6 +109,8 @@ OUTER:
continue OUTER
}
atomic.StoreUint64(&s.stats.LastPersistedEpoch, ourSnapshot.epoch)
lastPersistedEpoch = ourSnapshot.epoch
for _, ew := range persistWatchers {
close(ew.notifyCh)

View file

@ -424,7 +424,9 @@ func (s *Scorch) Reader() (index.IndexReader, error) {
func (s *Scorch) currentSnapshot() *IndexSnapshot {
s.rootLock.RLock()
rv := s.root
rv.AddRef()
if rv != nil {
rv.AddRef()
}
s.rootLock.RUnlock()
return rv
}
@ -508,14 +510,18 @@ func (s *Scorch) AddEligibleForRemoval(epoch uint64) {
s.rootLock.Unlock()
}
func (s *Scorch) MemoryUsed() uint64 {
func (s *Scorch) MemoryUsed() (memUsed uint64) {
indexSnapshot := s.currentSnapshot()
if indexSnapshot == nil {
return
}
defer func() {
_ = indexSnapshot.Close()
}()
// Account for current root snapshot overhead
memUsed := uint64(indexSnapshot.Size())
memUsed += uint64(indexSnapshot.Size())
// Account for snapshot that the persister may be working on
persistEpoch := atomic.LoadUint64(&s.iStats.persistEpoch)

View file

@ -28,11 +28,10 @@ import (
// Dictionary is the zap representation of the term dictionary
type Dictionary struct {
sb *SegmentBase
field string
fieldID uint16
fst *vellum.FST
fstReader *vellum.Reader
sb *SegmentBase
field string
fieldID uint16
fst *vellum.FST
}
// PostingsList returns the postings list for the specified term
@ -47,14 +46,14 @@ func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap,
}
func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) {
if d.fstReader == nil {
if d.fst == nil {
if rv == nil || rv == emptyPostingsList {
return emptyPostingsList, nil
}
return d.postingsListInit(rv, except), nil
}
postingsOffset, exists, err := d.fstReader.Get(term)
postingsOffset, exists, err := d.fst.Get(term)
if err != nil {
return nil, fmt.Errorf("vellum err: %v", err)
}

View file

@ -69,9 +69,9 @@ func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader {
rv.curChunkNum = math.MaxUint64
rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable
rv.dvDataLoc = di.dvDataLoc
rv.curChunkHeader = nil
rv.curChunkHeader = rv.curChunkHeader[:0]
rv.curChunkData = nil
rv.uncompressed = nil
rv.uncompressed = rv.uncompressed[:0]
return rv
}
@ -150,7 +150,11 @@ func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error
chunkMetaLoc := destChunkDataLoc + uint64(read)
offset := uint64(0)
di.curChunkHeader = make([]MetaData, int(numDocs))
if cap(di.curChunkHeader) < int(numDocs) {
di.curChunkHeader = make([]MetaData, int(numDocs))
} else {
di.curChunkHeader = di.curChunkHeader[:int(numDocs)]
}
for i := 0; i < int(numDocs); i++ {
di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])
offset += uint64(read)
@ -301,12 +305,5 @@ func (s *Segment) VisitDocumentFieldTerms(localDocNum uint64, fields []string,
// persisted doc value terms ready to be visitable using the
// VisitDocumentFieldTerms method.
func (s *Segment) VisitableDocValueFields() ([]string, error) {
rv := make([]string, 0, len(s.fieldDvReaders))
for fieldID, field := range s.fieldsInv {
if dvIter, ok := s.fieldDvReaders[uint16(fieldID)]; ok &&
dvIter != nil {
rv = append(rv, field)
}
}
return rv, nil
return s.fieldDvNames, nil
}

View file

@ -599,8 +599,13 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
typs := make([][]byte, len(fieldsInv))
poss := make([][][]uint64, len(fieldsInv))
var posBuf []uint64
docNumOffsets := make([]uint64, newSegDocCount)
vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
defer visitDocumentCtxPool.Put(vdc)
// for each segment
for segI, segment := range segments {
segNewDocNums := make([]uint64, segment.numDocs)
@ -639,17 +644,32 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
metaBuf.Reset()
data = data[:0]
posTemp := posBuf
// collect all the data
for i := 0; i < len(fieldsInv); i++ {
vals[i] = vals[i][:0]
typs[i] = typs[i][:0]
poss[i] = poss[i][:0]
}
err := segment.VisitDocument(docNum, func(field string, typ byte, value []byte, pos []uint64) bool {
err := segment.visitDocument(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool {
fieldID := int(fieldsMap[field]) - 1
vals[fieldID] = append(vals[fieldID], value)
typs[fieldID] = append(typs[fieldID], typ)
poss[fieldID] = append(poss[fieldID], pos)
// copy array positions to preserve them beyond the scope of this callback
var curPos []uint64
if len(pos) > 0 {
if cap(posTemp) < len(pos) {
posBuf = make([]uint64, len(pos)*len(fieldsInv))
posTemp = posBuf
}
curPos = posTemp[0:len(pos)]
copy(curPos, pos)
posTemp = posTemp[len(pos):]
}
poss[fieldID] = append(poss[fieldID], curPos)
return true
})
if err != nil {

View file

@ -99,6 +99,7 @@ type SegmentBase struct {
docValueOffset uint64
dictLocs []uint64
fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field
fieldDvNames []string // field names cached in fieldDvReaders
size uint64
}
@ -265,10 +266,6 @@ func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) {
if err != nil {
return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err)
}
rv.fstReader, err = rv.fst.Reader()
if err != nil {
return nil, fmt.Errorf("dictionary field %s vellum Reader err: %v", field, err)
}
}
}
}
@ -294,10 +291,15 @@ var visitDocumentCtxPool = sync.Pool{
// VisitDocument invokes the DocFieldValueVistor for each stored field
// for the specified doc number
func (s *SegmentBase) VisitDocument(num uint64, visitor segment.DocumentFieldValueVisitor) error {
vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
defer visitDocumentCtxPool.Put(vdc)
return s.visitDocument(vdc, num, visitor)
}
func (s *SegmentBase) visitDocument(vdc *visitDocumentCtx, num uint64,
visitor segment.DocumentFieldValueVisitor) error {
// first make sure this is a valid number in this segment
if num < s.numDocs {
vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
meta, compressed := s.getDocStoredMetaAndCompressed(num)
vdc.reader.Reset(meta)
@ -367,7 +369,6 @@ func (s *SegmentBase) VisitDocument(num uint64, visitor segment.DocumentFieldVal
}
vdc.buf = uncompressed
visitDocumentCtxPool.Put(vdc)
}
return nil
}
@ -528,7 +529,12 @@ func (s *SegmentBase) loadDvReaders() error {
}
read += uint64(n)
s.fieldDvReaders[uint16(fieldID)], _ = s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd)
fieldDvReader, _ := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd)
if fieldDvReader != nil {
s.fieldDvReaders[uint16(fieldID)] = fieldDvReader
s.fieldDvNames = append(s.fieldDvNames, field)
}
}
return nil
}

View file

@ -15,7 +15,6 @@
package scorch
import (
"bytes"
"container/heap"
"encoding/binary"
"fmt"
@ -314,21 +313,26 @@ func (i *IndexSnapshot) Document(id string) (rv *document.Document, err error) {
segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum)
rv = document.NewDocument(id)
err = i.segment[segmentIndex].VisitDocument(localDocNum, func(name string, typ byte, value []byte, pos []uint64) bool {
err = i.segment[segmentIndex].VisitDocument(localDocNum, func(name string, typ byte, val []byte, pos []uint64) bool {
if name == "_id" {
return true
}
// copy value, array positions to preserve them beyond the scope of this callback
value := append([]byte(nil), val...)
arrayPos := append([]uint64(nil), pos...)
switch typ {
case 't':
rv.AddField(document.NewTextField(name, pos, value))
rv.AddField(document.NewTextField(name, arrayPos, value))
case 'n':
rv.AddField(document.NewNumericFieldFromBytes(name, pos, value))
rv.AddField(document.NewNumericFieldFromBytes(name, arrayPos, value))
case 'd':
rv.AddField(document.NewDateTimeFieldFromBytes(name, pos, value))
rv.AddField(document.NewDateTimeFieldFromBytes(name, arrayPos, value))
case 'b':
rv.AddField(document.NewBooleanFieldFromBytes(name, pos, value))
rv.AddField(document.NewBooleanFieldFromBytes(name, arrayPos, value))
case 'g':
rv.AddField(document.NewGeoPointFieldFromBytes(name, pos, value))
rv.AddField(document.NewGeoPointFieldFromBytes(name, arrayPos, value))
}
return true
@ -492,124 +496,117 @@ func (i *IndexSnapshot) DocumentVisitFieldTerms(id index.IndexInternalID,
}
func (i *IndexSnapshot) documentVisitFieldTerms(id index.IndexInternalID,
fields []string, visitor index.DocumentFieldTermVisitor, dvs segment.DocVisitState) (
segment.DocVisitState, error) {
fields []string, visitor index.DocumentFieldTermVisitor,
dvs segment.DocVisitState) (segment.DocVisitState, error) {
docNum, err := docInternalToNumber(id)
if err != nil {
return nil, err
}
segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum)
if segmentIndex >= len(i.segment) {
return nil, nil
}
_, dvs, err = i.documentVisitFieldTermsOnSegment(
segmentIndex, localDocNum, fields, nil, visitor, dvs)
return dvs, err
}
func (i *IndexSnapshot) documentVisitFieldTermsOnSegment(
segmentIndex int, localDocNum uint64, fields []string, cFields []string,
visitor index.DocumentFieldTermVisitor, dvs segment.DocVisitState) (
cFieldsOut []string, dvsOut segment.DocVisitState, err error) {
ss := i.segment[segmentIndex]
if zaps, ok := ss.segment.(segment.DocumentFieldTermVisitable); ok {
// get the list of doc value persisted fields
pFields, err := zaps.VisitableDocValueFields()
var vFields []string // fields that are visitable via the segment
ssv, ssvOk := ss.segment.(segment.DocumentFieldTermVisitable)
if ssvOk && ssv != nil {
vFields, err = ssv.VisitableDocValueFields()
if err != nil {
return nil, err
return nil, nil, err
}
// assort the fields for which terms look up have to
// be performed runtime
dvPendingFields := extractDvPendingFields(fields, pFields)
// all fields are doc value persisted
if len(dvPendingFields) == 0 {
return zaps.VisitDocumentFieldTerms(localDocNum, fields, visitor, dvs)
}
var errCh chan error
// cFields represents the fields that we'll need from the
// cachedDocs, and might be optionally be provided by the caller,
// if the caller happens to know we're on the same segmentIndex
// from a previous invocation
if cFields == nil {
cFields = subtractStrings(fields, vFields)
if !ss.cachedDocs.hasFields(cFields) {
errCh = make(chan error, 1)
go func() {
err := ss.cachedDocs.prepareFields(cFields, ss)
if err != nil {
errCh <- err
}
close(errCh)
}()
}
}
// concurrently trigger the runtime doc value preparations for
// pending fields as well as the visit of the persisted doc values
errCh := make(chan error, 1)
go func() {
defer close(errCh)
err := ss.cachedDocs.prepareFields(dvPendingFields, ss)
if err != nil {
errCh <- err
}
}()
// visit the requested persisted dv while the cache preparation in progress
dvs, err = zaps.VisitDocumentFieldTerms(localDocNum, fields, visitor, dvs)
if ssvOk && ssv != nil && len(vFields) > 0 {
dvs, err = ssv.VisitDocumentFieldTerms(localDocNum, fields, visitor, dvs)
if err != nil {
return nil, err
return nil, nil, err
}
}
// err out if fieldCache preparation failed
if errCh != nil {
err = <-errCh
if err != nil {
return nil, err
}
visitDocumentFieldCacheTerms(localDocNum, dvPendingFields, ss, visitor)
return dvs, nil
}
return dvs, prepareCacheVisitDocumentFieldTerms(localDocNum, fields, ss, visitor)
}
func prepareCacheVisitDocumentFieldTerms(localDocNum uint64, fields []string,
ss *SegmentSnapshot, visitor index.DocumentFieldTermVisitor) error {
err := ss.cachedDocs.prepareFields(fields, ss)
if err != nil {
return err
}
visitDocumentFieldCacheTerms(localDocNum, fields, ss, visitor)
return nil
}
func visitDocumentFieldCacheTerms(localDocNum uint64, fields []string,
ss *SegmentSnapshot, visitor index.DocumentFieldTermVisitor) {
for _, field := range fields {
if cachedFieldDocs, exists := ss.cachedDocs.cache[field]; exists {
if tlist, exists := cachedFieldDocs.docs[localDocNum]; exists {
for {
i := bytes.Index(tlist, TermSeparatorSplitSlice)
if i < 0 {
break
}
visitor(field, tlist[0:i])
tlist = tlist[i+1:]
}
}
return nil, nil, err
}
}
}
func extractDvPendingFields(requestedFields, persistedFields []string) []string {
removeMap := make(map[string]struct{}, len(persistedFields))
for _, str := range persistedFields {
removeMap[str] = struct{}{}
if len(cFields) > 0 {
ss.cachedDocs.visitDoc(localDocNum, cFields, visitor)
}
rv := make([]string, 0, len(requestedFields))
for _, s := range requestedFields {
if _, ok := removeMap[s]; !ok {
rv = append(rv, s)
}
}
return rv
return cFields, dvs, nil
}
func (i *IndexSnapshot) DocValueReader(fields []string) (index.DocValueReader, error) {
return &DocValueReader{i: i, fields: fields}, nil
func (i *IndexSnapshot) DocValueReader(fields []string) (
index.DocValueReader, error) {
return &DocValueReader{i: i, fields: fields, currSegmentIndex: -1}, nil
}
type DocValueReader struct {
i *IndexSnapshot
fields []string
dvs segment.DocVisitState
currSegmentIndex int
currCachedFields []string
}
func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID,
visitor index.DocumentFieldTermVisitor) (err error) {
dvr.dvs, err = dvr.i.documentVisitFieldTerms(id, dvr.fields, visitor, dvr.dvs)
docNum, err := docInternalToNumber(id)
if err != nil {
return err
}
segmentIndex, localDocNum := dvr.i.segmentIndexAndLocalDocNumFromGlobal(docNum)
if segmentIndex >= len(dvr.i.segment) {
return nil
}
if dvr.currSegmentIndex != segmentIndex {
dvr.currSegmentIndex = segmentIndex
dvr.currCachedFields = nil
}
dvr.currCachedFields, dvr.dvs, err = dvr.i.documentVisitFieldTermsOnSegment(
dvr.currSegmentIndex, localDocNum, dvr.fields, dvr.currCachedFields, visitor, dvr.dvs)
return err
}
@ -636,3 +633,22 @@ func (i *IndexSnapshot) DumpFields() chan interface{} {
}()
return rv
}
// subtractStrings returns set a minus elements of set b.
func subtractStrings(a, b []string) []string {
if len(b) <= 0 {
return a
}
rv := make([]string, 0, len(a))
OUTER:
for _, as := range a {
for _, bs := range b {
if as == bs {
continue OUTER
}
}
rv = append(rv, as)
}
return rv
}

View file

@ -15,10 +15,12 @@
package scorch
import (
"bytes"
"sync"
"sync/atomic"
"github.com/RoaringBitmap/roaring"
"github.com/blevesearch/bleve/index"
"github.com/blevesearch/bleve/index/scorch/segment"
"github.com/blevesearch/bleve/size"
)
@ -106,7 +108,6 @@ func (s *SegmentSnapshot) DocID(num uint64) ([]byte, error) {
}
func (s *SegmentSnapshot) Count() uint64 {
rv := s.segment.Count()
if s.deleted != nil {
rv -= s.deleted.GetCardinality()
@ -166,7 +167,7 @@ type cachedFieldDocs struct {
size uint64
}
func (cfd *cachedFieldDocs) prepareFields(field string, ss *SegmentSnapshot) {
func (cfd *cachedFieldDocs) prepareField(field string, ss *SegmentSnapshot) {
defer close(cfd.readyCh)
cfd.size += uint64(size.SizeOfUint64) /* size field */
@ -222,6 +223,7 @@ type cachedDocs struct {
func (c *cachedDocs) prepareFields(wantedFields []string, ss *SegmentSnapshot) error {
c.m.Lock()
if c.cache == nil {
c.cache = make(map[string]*cachedFieldDocs, len(ss.Fields()))
}
@ -234,7 +236,7 @@ func (c *cachedDocs) prepareFields(wantedFields []string, ss *SegmentSnapshot) e
docs: make(map[uint64][]byte),
}
go c.cache[field].prepareFields(field, ss)
go c.cache[field].prepareField(field, ss)
}
}
@ -248,12 +250,26 @@ func (c *cachedDocs) prepareFields(wantedFields []string, ss *SegmentSnapshot) e
}
c.m.Lock()
}
c.updateSizeLOCKED()
c.m.Unlock()
return nil
}
// hasFields returns true if the cache has all the given fields
func (c *cachedDocs) hasFields(fields []string) bool {
c.m.Lock()
for _, field := range fields {
if _, exists := c.cache[field]; !exists {
c.m.Unlock()
return false // found a field not in cache
}
}
c.m.Unlock()
return true
}
func (c *cachedDocs) Size() int {
return int(atomic.LoadUint64(&c.size))
}
@ -270,3 +286,29 @@ func (c *cachedDocs) updateSizeLOCKED() {
}
atomic.StoreUint64(&c.size, uint64(sizeInBytes))
}
func (c *cachedDocs) visitDoc(localDocNum uint64,
fields []string, visitor index.DocumentFieldTermVisitor) {
c.m.Lock()
for _, field := range fields {
if cachedFieldDocs, exists := c.cache[field]; exists {
c.m.Unlock()
<-cachedFieldDocs.readyCh
c.m.Lock()
if tlist, exists := cachedFieldDocs.docs[localDocNum]; exists {
for {
i := bytes.Index(tlist, TermSeparatorSplitSlice)
if i < 0 {
break
}
visitor(field, tlist[0:i])
tlist = tlist[i+1:]
}
}
}
}
c.m.Unlock()
}

View file

@ -33,6 +33,10 @@ type Stats struct {
TotBatchIntroTime uint64
MaxBatchIntroTime uint64
CurRootEpoch uint64
LastPersistedEpoch uint64
LastMergedEpoch uint64
TotOnErrors uint64
TotAnalysisTime uint64

View file

@ -77,6 +77,10 @@ func (p PrefixCoded) Int64() (int64, error) {
}
func ValidPrefixCodedTerm(p string) (bool, int) {
return ValidPrefixCodedTermBytes([]byte(p))
}
func ValidPrefixCodedTermBytes(p []byte) (bool, int) {
if len(p) > 0 {
if p[0] < ShiftStartInt64 || p[0] > ShiftStartInt64+63 {
return false, 0

View file

@ -15,6 +15,7 @@
package search
import (
"bytes"
"encoding/json"
"fmt"
"math"
@ -342,14 +343,15 @@ type SortField struct {
Type SortFieldType
Mode SortFieldMode
Missing SortFieldMissing
values []string
values [][]byte
tmp [][]byte
}
// UpdateVisitor notifies this sort field that in this document
// this field has the specified term
func (s *SortField) UpdateVisitor(field string, term []byte) {
if field == s.Field {
s.values = append(s.values, string(term))
s.values = append(s.values, term)
}
}
@ -359,7 +361,7 @@ func (s *SortField) UpdateVisitor(field string, term []byte) {
func (s *SortField) Value(i *DocumentMatch) string {
iTerms := s.filterTermsByType(s.values)
iTerm := s.filterTermsByMode(iTerms)
s.values = nil
s.values = s.values[:0]
return iTerm
}
@ -368,17 +370,17 @@ func (s *SortField) Descending() bool {
return s.Desc
}
func (s *SortField) filterTermsByMode(terms []string) string {
func (s *SortField) filterTermsByMode(terms [][]byte) string {
if len(terms) == 1 || (len(terms) > 1 && s.Mode == SortFieldDefault) {
return terms[0]
return string(terms[0])
} else if len(terms) > 1 {
switch s.Mode {
case SortFieldMin:
sort.Strings(terms)
return terms[0]
sort.Sort(BytesSlice(terms))
return string(terms[0])
case SortFieldMax:
sort.Strings(terms)
return terms[len(terms)-1]
sort.Sort(BytesSlice(terms))
return string(terms[len(terms)-1])
}
}
@ -400,13 +402,13 @@ func (s *SortField) filterTermsByMode(terms []string) string {
// return only the terms which had shift of 0
// if we are in explicit number or date mode, return only valid
// prefix coded numbers with shift of 0
func (s *SortField) filterTermsByType(terms []string) []string {
func (s *SortField) filterTermsByType(terms [][]byte) [][]byte {
stype := s.Type
if stype == SortFieldAuto {
allTermsPrefixCoded := true
var termsWithShiftZero []string
termsWithShiftZero := s.tmp[:0]
for _, term := range terms {
valid, shift := numeric.ValidPrefixCodedTerm(term)
valid, shift := numeric.ValidPrefixCodedTermBytes(term)
if valid && shift == 0 {
termsWithShiftZero = append(termsWithShiftZero, term)
} else if !valid {
@ -415,16 +417,18 @@ func (s *SortField) filterTermsByType(terms []string) []string {
}
if allTermsPrefixCoded {
terms = termsWithShiftZero
s.tmp = termsWithShiftZero[:0]
}
} else if stype == SortFieldAsNumber || stype == SortFieldAsDate {
var termsWithShiftZero []string
termsWithShiftZero := s.tmp[:0]
for _, term := range terms {
valid, shift := numeric.ValidPrefixCodedTerm(term)
valid, shift := numeric.ValidPrefixCodedTermBytes(term)
if valid && shift == 0 {
termsWithShiftZero = append(termsWithShiftZero, term)
}
}
terms = termsWithShiftZero
s.tmp = termsWithShiftZero[:0]
}
return terms
}
@ -619,7 +623,7 @@ func (s *SortGeoDistance) UpdateVisitor(field string, term []byte) {
func (s *SortGeoDistance) Value(i *DocumentMatch) string {
iTerms := s.filterTermsByType(s.values)
iTerm := s.filterTermsByMode(iTerms)
s.values = nil
s.values = s.values[:0]
if iTerm == "" {
return maxDistance
@ -700,3 +704,9 @@ func (s *SortGeoDistance) Copy() SearchSort {
rv := *s
return &rv
}
type BytesSlice [][]byte
func (p BytesSlice) Len() int { return len(p) }
func (p BytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) < 0 }
func (p BytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

166
vendor/vendor.json vendored
View file

@ -11,230 +11,236 @@
{
"checksumSHA1": "W+LrvPPrjucuzGEmslEPztRDDOI=",
"path": "github.com/blevesearch/bleve",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "XX5+Amhdr+mxVY7iDzanrQrcNyI=",
"path": "github.com/blevesearch/bleve/analysis",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "OM2QW7G5DfzaUzCoe23282875TE=",
"path": "github.com/blevesearch/bleve/analysis/analyzer/keyword",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "IefDmVwLU3UiILeN35DA25gPFnc=",
"path": "github.com/blevesearch/bleve/analysis/analyzer/standard",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "P+ay5l3LO/xoWJXKfyK4Ma1hGvw=",
"path": "github.com/blevesearch/bleve/analysis/datetime/flexible",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "uIHCAnZoB7dKDPFc3SkiO1hN4BY=",
"path": "github.com/blevesearch/bleve/analysis/datetime/optional",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "AdhWAC/hkZLFXUcihmzhMspNk3w=",
"path": "github.com/blevesearch/bleve/analysis/lang/en",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "5rJgE+eR0dB+cjHkENWqTKfX0T8=",
"path": "github.com/blevesearch/bleve/analysis/token/keyword",
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "3VIPkl12t1ko4y6DkbPcz+MtQjY=",
"path": "github.com/blevesearch/bleve/analysis/token/lowercase",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "QOw3ypU4VTmFT8XYS/52P3RILZw=",
"path": "github.com/blevesearch/bleve/analysis/token/porter",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "8wCAW8E4SO7gGxt0tsr4NZ4APIg=",
"path": "github.com/blevesearch/bleve/analysis/token/stop",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "Lnopn2j55CFd15EBle12dzqQar8=",
"path": "github.com/blevesearch/bleve/analysis/tokenizer/single",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "q7C04nlJLxKmemXLop0oyJhfi5M=",
"path": "github.com/blevesearch/bleve/analysis/tokenizer/unicode",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "+vKBTffiCd1lsVOahRE1H3/eIuo=",
"path": "github.com/blevesearch/bleve/document",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "8+NkVEqldBSg13whAM0Fgk0aIQU=",
"path": "github.com/blevesearch/bleve/geo",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "BD1BDYaRaKBUHfeoXr7Om1G/h+k=",
"path": "github.com/blevesearch/bleve/index",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "lxqhrjo3SYry9yRCfuJmVqSHLAE=",
"checksumSHA1": "ksbZyEYxUW3IJzvHN+l5fDXzbH0=",
"path": "github.com/blevesearch/bleve/index/scorch",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "0Ef3ooWYliWUWCa9YdNJ1T3sJFk=",
"path": "github.com/blevesearch/bleve/index/scorch/mergeplan",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "gQgYsSMtCzm01zvuI52qGEPAio4=",
"path": "github.com/blevesearch/bleve/index/scorch/segment",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "ucFyMsvVO6Dw5kkmejVKVHDBA+I=",
"checksumSHA1": "0e/pIoPrfIu5tU511Dxv7WU3ZJk=",
"path": "github.com/blevesearch/bleve/index/scorch/segment/zap",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "3ttI5qH9k/gOBaW8FJFVmOh5oIA=",
"path": "github.com/blevesearch/bleve/index/store",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "9cJS6D7IAwrzK/opywK0ZgAmpTQ=",
"path": "github.com/blevesearch/bleve/index/store/boltdb",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "yeAX9ygUYTMbFpL20NJ0MjR7u6M=",
"path": "github.com/blevesearch/bleve/index/store/gtreap",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "9HX6569+W5I72PAtzoUkwi2s8xs=",
"path": "github.com/blevesearch/bleve/index/upsidedown",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "7/6MZFLZzfBAsuOWTFs79xomnBE=",
"path": "github.com/blevesearch/bleve/mapping",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "Qyi8BmpvHc83X9J06QB7GV7O+6M=",
"checksumSHA1": "UnotAMIXNVNwOZvPeJAYFhYp9vg=",
"path": "github.com/blevesearch/bleve/numeric",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "Qj1wH6TzvIl4OAiPQaFDpkWvwLM=",
"path": "github.com/blevesearch/bleve/registry",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "zkRYi4evy7/mBB0fGgpeT/F2lfw=",
"checksumSHA1": "1TjupJvROj0OOzdiL5OTe1JbJKg=",
"path": "github.com/blevesearch/bleve/search",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "DFJ6M+PN7kH10K9ZaRoO62uMHQU=",
"path": "github.com/blevesearch/bleve/search/collector",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "h38ir3/VB/uR5txN0sfk1hBrIaw=",
"path": "github.com/blevesearch/bleve/search/facet",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "J/bdoPp+OZ6vSqsXF10484C7asc=",
"path": "github.com/blevesearch/bleve/search/highlight",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "rAz4wfq/O/Tx5aYz/6BN09jm0io=",
"path": "github.com/blevesearch/bleve/search/highlight/format/html",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "JQCH82+IdGvTtmKn+rDxCDxISxI=",
"path": "github.com/blevesearch/bleve/search/highlight/fragmenter/simple",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "/4Q1eosaGj0eU+F4YWQRdaOS5XA=",
"path": "github.com/blevesearch/bleve/search/highlight/highlighter/html",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "m4s4+yGUKuSVYHDOQpzSZ8Jdeyg=",
"path": "github.com/blevesearch/bleve/search/highlight/highlighter/simple",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "3c9y+4nTwE5+iW4tdAPAk9M181U=",
"path": "github.com/blevesearch/bleve/search/query",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "WnfAv5lWULhk5H/DE7roBVQoJOU=",
"path": "github.com/blevesearch/bleve/search/scorer",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "Lu0Efd4WmYV5ildYZ88dExUV640=",
"path": "github.com/blevesearch/bleve/search/searcher",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "lycEaUs7grxzfMYWTt+p/IniQsE=",
"path": "github.com/blevesearch/bleve/size",
"revision": "ecf672f9bf46edfafa0262cbe05cc943b72ff48b",
"revisionTime": "2018-05-03T18:49:31Z"
"revision": "1d6d47ed3ad966075bf9162fee4caa5d8984733c",
"revisionTime": "2018-05-25T17:44:03Z"
},
{
"checksumSHA1": "F6iBQThfd04TIlxT49zaPRGvlqE=",