Use MessagePack on disk

This commit is contained in:
Ken-Håvard Lieng 2016-01-17 23:33:52 +01:00
parent 1def24500a
commit 76f99c8332
55 changed files with 11993 additions and 85 deletions

9
Godeps/Godeps.json generated
View File

@ -79,6 +79,10 @@
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "f7d28d5aeab42b9b95d2e6d6b956f73a290077fc"
},
{
"ImportPath": "github.com/philhofer/fwd",
"Rev": "f429d72be3997413f67cdb6e3c87bfc72475e717"
},
{
"ImportPath": "github.com/ryszard/goskiplist/skiplist",
"Rev": "2dfbae5fcf46374f166f8969cb07e167f1be6273"
@ -124,6 +128,11 @@
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
},
{
"ImportPath": "github.com/tinylib/msgp/msgp",
"Comment": "v1.0-beta-4-g0d29f4b",
"Rev": "0d29f4bb1d9c5d1ad8ad54e0507ca54c0fa50482"
},
{
"ImportPath": "github.com/willf/bitset",
"Comment": "v1.0.0-17-g4b22041",

View File

@ -0,0 +1,7 @@
Copyright (c) 2014-2015, Philip Hofer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,311 @@
# fwd
import "github.com/philhofer/fwd"
The `fwd` package provides a buffered reader
and writer. Each has methods that help improve
the encoding/decoding performance of some binary
protocols.
The `fwd.Writer` and `fwd.Reader` type provide similar
functionality to their counterparts in `bufio`, plus
a few extra utility methods that simplify read-ahead
and write-ahead. I wrote this package to improve serialization
performance for <a href="http://github.com/philhofer/msgp">http://github.com/philhofer/msgp</a>,
where it provided about a 2x speedup over `bufio`. However,
care must be taken to understand the semantics of the
extra methods provided by this package, as they allow
the user to access and manipulate the buffer memory
directly.
The extra methods for `fwd.Reader` are `Peek`, `Skip`
and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
will re-allocate the read buffer in order to accommodate arbitrarily
large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
in the stream, and uses the `io.Seeker` interface if the underlying
stream implements it. `(*fwd.Reader).Next` returns a slice pointing
to the next `n` bytes in the read buffer (like `Peek`), but also
increments the read position. This allows users to process streams
in aribtrary block sizes without having to manage appropriately-sized
slices. Additionally, obviating the need to copy the data from the
buffer to another location in memory can improve performance dramatically
in CPU-bound applications.
`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
returns a slice pointing to the next `n` bytes of the writer, and increments
the write position by the length of the returned slice. This allows users
to write directly to the end of the buffer.
## Constants
``` go
const (
// DefaultReaderSize is the default size of the read buffer
DefaultReaderSize = 2048
)
```
``` go
const (
// DefaultWriterSize is the
// default write buffer size.
DefaultWriterSize = 2048
)
```
## type Reader
``` go
type Reader struct {
// contains filtered or unexported fields
}
```
Reader is a buffered look-ahead reader
### func NewReader
``` go
func NewReader(r io.Reader) *Reader
```
NewReader returns a new *Reader that reads from 'r'
### func NewReaderSize
``` go
func NewReaderSize(r io.Reader, n int) *Reader
```
NewReaderSize returns a new *Reader that
reads from 'r' and has a buffer size 'n'
### func (\*Reader) BufferSize
``` go
func (r *Reader) BufferSize() int
```
BufferSize returns the total size of the buffer
### func (\*Reader) Buffered
``` go
func (r *Reader) Buffered() int
```
Buffered returns the number of bytes currently in the buffer
### func (\*Reader) Next
``` go
func (r *Reader) Next(n int) ([]byte, error)
```
Next returns the next 'n' bytes in the stream.
If the returned slice has a length less than 'n',
an error will also be returned.
Unlike Peek, Next advances the reader position.
The returned bytes point to the same
data as the buffer, so the slice is
only valid until the next reader method call.
An EOF is considered an unexpected error.
### func (\*Reader) Peek
``` go
func (r *Reader) Peek(n int) ([]byte, error)
```
Peek returns the next 'n' buffered bytes,
reading from the underlying reader if necessary.
It will only return a slice shorter than 'n' bytes
if it also returns an error. Peek does not advance
the reader. EOF errors are *not* returned as
io.ErrUnexpectedEOF.
### func (\*Reader) Read
``` go
func (r *Reader) Read(b []byte) (int, error)
```
Read implements `io.Reader`
### func (\*Reader) ReadByte
``` go
func (r *Reader) ReadByte() (byte, error)
```
ReadByte implements `io.ByteReader`
### func (\*Reader) ReadFull
``` go
func (r *Reader) ReadFull(b []byte) (int, error)
```
ReadFull attempts to read len(b) bytes into
'b'. It returns the number of bytes read into
'b', and an error if it does not return len(b).
### func (\*Reader) Reset
``` go
func (r *Reader) Reset(rd io.Reader)
```
Reset resets the underlying reader
and the read buffer.
### func (\*Reader) Skip
``` go
func (r *Reader) Skip(n int) (int, error)
```
Skip moves the reader forward 'n' bytes.
Returns the number of bytes skipped and any
errors encountered. It is analagous to Seek(n, 1).
If the underlying reader implements io.Seeker, then
that method will be used to skip forward.
If the reader encounters
an EOF before skipping 'n' bytes, it
returns io.ErrUnexpectedEOF. If the
underlying reader implements io.Seeker, then
those rules apply instead. (Many implementations
will not return `io.EOF` until the next call
to Read.)
### func (\*Reader) WriteTo
``` go
func (r *Reader) WriteTo(w io.Writer) (int64, error)
```
WriteTo implements `io.WriterTo`
## type Writer
``` go
type Writer struct {
// contains filtered or unexported fields
}
```
Writer is a buffered writer
### func NewWriter
``` go
func NewWriter(w io.Writer) *Writer
```
NewWriter returns a new writer
that writes to 'w' and has a buffer
that is `DefaultWriterSize` bytes.
### func NewWriterSize
``` go
func NewWriterSize(w io.Writer, size int) *Writer
```
NewWriterSize returns a new writer
that writes to 'w' and has a buffer
that is 'size' bytes.
### func (\*Writer) BufferSize
``` go
func (w *Writer) BufferSize() int
```
BufferSize returns the maximum size of the buffer.
### func (\*Writer) Buffered
``` go
func (w *Writer) Buffered() int
```
Buffered returns the number of buffered bytes
in the reader.
### func (\*Writer) Flush
``` go
func (w *Writer) Flush() error
```
Flush flushes any buffered bytes
to the underlying writer.
### func (\*Writer) Next
``` go
func (w *Writer) Next(n int) ([]byte, error)
```
Next returns the next 'n' free bytes
in the write buffer, flushing the writer
as necessary. Next will return `io.ErrShortBuffer`
if 'n' is greater than the size of the write buffer.
### func (\*Writer) ReadFrom
``` go
func (w *Writer) ReadFrom(r io.Reader) (int64, error)
```
ReadFrom implements `io.ReaderFrom`
### func (\*Writer) Write
``` go
func (w *Writer) Write(p []byte) (int, error)
```
Write implements `io.Writer`
### func (\*Writer) WriteByte
``` go
func (w *Writer) WriteByte(b byte) error
```
WriteByte implements `io.ByteWriter`
### func (\*Writer) WriteString
``` go
func (w *Writer) WriteString(s string) (int, error)
```
WriteString is analagous to Write, but it takes a string.
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

View File

@ -0,0 +1,375 @@
// The `fwd` package provides a buffered reader
// and writer. Each has methods that help improve
// the encoding/decoding performance of some binary
// protocols.
//
// The `fwd.Writer` and `fwd.Reader` type provide similar
// functionality to their counterparts in `bufio`, plus
// a few extra utility methods that simplify read-ahead
// and write-ahead. I wrote this package to improve serialization
// performance for http://github.com/tinylib/msgp,
// where it provided about a 2x speedup over `bufio` for certain
// workloads. However, care must be taken to understand the semantics of the
// extra methods provided by this package, as they allow
// the user to access and manipulate the buffer memory
// directly.
//
// The extra methods for `fwd.Reader` are `Peek`, `Skip`
// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
// will re-allocate the read buffer in order to accommodate arbitrarily
// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
// in the stream, and uses the `io.Seeker` interface if the underlying
// stream implements it. `(*fwd.Reader).Next` returns a slice pointing
// to the next `n` bytes in the read buffer (like `Peek`), but also
// increments the read position. This allows users to process streams
// in aribtrary block sizes without having to manage appropriately-sized
// slices. Additionally, obviating the need to copy the data from the
// buffer to another location in memory can improve performance dramatically
// in CPU-bound applications.
//
// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
// returns a slice pointing to the next `n` bytes of the writer, and increments
// the write position by the length of the returned slice. This allows users
// to write directly to the end of the buffer.
//
package fwd
import "io"
const (
// DefaultReaderSize is the default size of the read buffer
DefaultReaderSize = 2048
// minimum read buffer; straight from bufio
minReaderSize = 16
)
// NewReader returns a new *Reader that reads from 'r'
func NewReader(r io.Reader) *Reader {
return NewReaderSize(r, DefaultReaderSize)
}
// NewReaderSize returns a new *Reader that
// reads from 'r' and has a buffer size 'n'
func NewReaderSize(r io.Reader, n int) *Reader {
rd := &Reader{
r: r,
data: make([]byte, 0, max(minReaderSize, n)),
}
if s, ok := r.(io.Seeker); ok {
rd.rs = s
}
return rd
}
// Reader is a buffered look-ahead reader
type Reader struct {
r io.Reader // underlying reader
// data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space
data []byte // data
n int // read offset
state error // last read error
// if the reader past to NewReader was
// also an io.Seeker, this is non-nil
rs io.Seeker
}
// Reset resets the underlying reader
// and the read buffer.
func (r *Reader) Reset(rd io.Reader) {
r.r = rd
r.data = r.data[0:0]
r.n = 0
r.state = nil
if s, ok := rd.(io.Seeker); ok {
r.rs = s
} else {
r.rs = nil
}
}
// more() does one read on the underlying reader
func (r *Reader) more() {
// move data backwards so that
// the read offset is 0; this way
// we can supply the maximum number of
// bytes to the reader
if r.n != 0 {
r.data = r.data[:copy(r.data[0:], r.data[r.n:])]
r.n = 0
}
var a int
a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)])
if a == 0 && r.state == nil {
r.state = io.ErrNoProgress
return
}
r.data = r.data[:len(r.data)+a]
}
// pop error
func (r *Reader) err() (e error) {
e, r.state = r.state, nil
return
}
// pop error; EOF -> io.ErrUnexpectedEOF
func (r *Reader) noEOF() (e error) {
e, r.state = r.state, nil
if e == io.EOF {
e = io.ErrUnexpectedEOF
}
return
}
// buffered bytes
func (r *Reader) buffered() int { return len(r.data) - r.n }
// Buffered returns the number of bytes currently in the buffer
func (r *Reader) Buffered() int { return len(r.data) - r.n }
// BufferSize returns the total size of the buffer
func (r *Reader) BufferSize() int { return cap(r.data) }
// Peek returns the next 'n' buffered bytes,
// reading from the underlying reader if necessary.
// It will only return a slice shorter than 'n' bytes
// if it also returns an error. Peek does not advance
// the reader. EOF errors are *not* returned as
// io.ErrUnexpectedEOF.
func (r *Reader) Peek(n int) ([]byte, error) {
// in the degenerate case,
// we may need to realloc
// (the caller asked for more
// bytes than the size of the buffer)
if cap(r.data) < n {
old := r.data[r.n:]
r.data = make([]byte, n+r.buffered())
r.data = r.data[:copy(r.data, old)]
r.n = 0
}
// keep filling until
// we hit an error or
// read enough bytes
for r.buffered() < n && r.state == nil {
r.more()
}
// we must have hit an error
if r.buffered() < n {
return r.data[r.n:], r.err()
}
return r.data[r.n : r.n+n], nil
}
// Skip moves the reader forward 'n' bytes.
// Returns the number of bytes skipped and any
// errors encountered. It is analogous to Seek(n, 1).
// If the underlying reader implements io.Seeker, then
// that method will be used to skip forward.
//
// If the reader encounters
// an EOF before skipping 'n' bytes, it
// returns io.ErrUnexpectedEOF. If the
// underlying reader implements io.Seeker, then
// those rules apply instead. (Many implementations
// will not return `io.EOF` until the next call
// to Read.)
func (r *Reader) Skip(n int) (int, error) {
// fast path
if r.buffered() >= n {
r.n += n
return n, nil
}
// use seeker implementation
// if we can
if r.rs != nil {
return r.skipSeek(n)
}
// loop on filling
// and then erasing
o := n
for r.buffered() < n && r.state == nil {
r.more()
// we can skip forward
// up to r.buffered() bytes
step := min(r.buffered(), n)
r.n += step
n -= step
}
// at this point, n should be
// 0 if everything went smoothly
return o - n, r.noEOF()
}
// Next returns the next 'n' bytes in the stream.
// Unlike Peek, Next advances the reader position.
// The returned bytes point to the same
// data as the buffer, so the slice is
// only valid until the next reader method call.
// An EOF is considered an unexpected error.
// If an the returned slice is less than the
// length asked for, an error will be returned,
// and the reader position will not be incremented.
func (r *Reader) Next(n int) ([]byte, error) {
// in case the buffer is too small
if cap(r.data) < n {
old := r.data[r.n:]
r.data = make([]byte, n+r.buffered())
r.data = r.data[:copy(r.data, old)]
r.n = 0
}
// fill at least 'n' bytes
for r.buffered() < n && r.state == nil {
r.more()
}
if r.buffered() < n {
return r.data[r.n:], r.noEOF()
}
out := r.data[r.n : r.n+n]
r.n += n
return out, nil
}
// skipSeek uses the io.Seeker to seek forward.
// only call this function when n > r.buffered()
func (r *Reader) skipSeek(n int) (int, error) {
o := r.buffered()
// first, clear buffer
n -= o
r.n = 0
r.data = r.data[:0]
// then seek forward remaning bytes
i, err := r.rs.Seek(int64(n), 1)
return int(i) + o, err
}
// Read implements `io.Reader`
func (r *Reader) Read(b []byte) (int, error) {
// if we have data in the buffer, just
// return that.
if r.buffered() != 0 {
x := copy(b, r.data[r.n:])
r.n += x
return x, nil
}
var n int
// we have no buffered data; determine
// whether or not to buffer or call
// the underlying reader directly
if len(b) >= cap(r.data) {
n, r.state = r.r.Read(b)
} else {
r.more()
n = copy(b, r.data)
r.n = n
}
if n == 0 {
return 0, r.err()
}
return n, nil
}
// ReadFull attempts to read len(b) bytes into
// 'b'. It returns the number of bytes read into
// 'b', and an error if it does not return len(b).
// EOF is considered an unexpected error.
func (r *Reader) ReadFull(b []byte) (int, error) {
var n int // read into b
var nn int // scratch
l := len(b)
// either read buffered data,
// or read directly for the underlying
// buffer, or fetch more buffered data.
for n < l && r.state == nil {
if r.buffered() != 0 {
nn = copy(b[n:], r.data[r.n:])
n += nn
r.n += nn
} else if l-n > cap(r.data) {
nn, r.state = r.r.Read(b[n:])
n += nn
} else {
r.more()
}
}
if n < l {
return n, r.noEOF()
}
return n, nil
}
// ReadByte implements `io.ByteReader`
func (r *Reader) ReadByte() (byte, error) {
for r.buffered() < 1 && r.state == nil {
r.more()
}
if r.buffered() < 1 {
return 0, r.err()
}
b := r.data[r.n]
r.n++
return b, nil
}
// WriteTo implements `io.WriterTo`
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
var (
i int64
ii int
err error
)
// first, clear buffer
if r.buffered() > 0 {
ii, err = w.Write(r.data[r.n:])
i += int64(ii)
if err != nil {
return i, err
}
r.data = r.data[0:0]
r.n = 0
}
for r.state == nil {
// here we just do
// 1:1 reads and writes
r.more()
if r.buffered() > 0 {
ii, err = w.Write(r.data)
i += int64(ii)
if err != nil {
return i, err
}
r.data = r.data[0:0]
r.n = 0
}
}
if r.state != io.EOF {
return i, r.err()
}
return i, nil
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
func max(a int, b int) int {
if a < b {
return b
}
return a
}

View File

@ -0,0 +1,354 @@
package fwd
import (
"bytes"
"io"
"io/ioutil"
"math/rand"
"testing"
"unsafe"
)
// partialReader reads into only
// part of the supplied byte slice
// to the underlying reader
type partialReader struct {
r io.Reader
}
func (p partialReader) Read(b []byte) (int, error) {
n := max(1, rand.Intn(len(b)))
return p.r.Read(b[:n])
}
func randomBts(sz int) []byte {
o := make([]byte, sz)
for i := 0; i < len(o); i += 8 {
j := (*int64)(unsafe.Pointer(&o[i]))
*j = rand.Int63()
}
return o
}
func TestRead(t *testing.T) {
bts := randomBts(512)
// make the buffer much
// smaller than the underlying
// bytes to incur multiple fills
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 128)
if rd.BufferSize() != cap(rd.data) {
t.Errorf("BufferSize() returned %d; should return %d", rd.BufferSize(), cap(rd.data))
}
// starting Buffered() should be 0
if rd.Buffered() != 0 {
t.Errorf("Buffered() should return 0 at initialization; got %d", rd.Buffered())
}
some := make([]byte, 32)
n, err := rd.Read(some)
if err != nil {
t.Fatal(err)
}
if n == 0 {
t.Fatal("read 0 bytes w/ a non-nil error!")
}
some = some[:n]
more := make([]byte, 64)
j, err := rd.Read(more)
if err != nil {
t.Fatal(err)
}
if j == 0 {
t.Fatal("read 0 bytes w/ a non-nil error")
}
more = more[:j]
out, err := ioutil.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
all := append(some, more...)
all = append(all, out...)
if !bytes.Equal(bts, all) {
t.Errorf("bytes not equal; %d bytes in and %d bytes out", len(bts), len(out))
}
// test filling out of the underlying reader
big := randomBts(1 << 21)
rd = NewReaderSize(partialReader{bytes.NewReader(big)}, 2048)
buf := make([]byte, 3100)
n, err = rd.ReadFull(buf)
if err != nil {
t.Fatal(err)
}
if n != 3100 {
t.Errorf("expected 3100 bytes read by ReadFull; got %d", n)
}
if !bytes.Equal(buf[:n], big[:n]) {
t.Error("data parity")
}
rest := make([]byte, (1<<21)-3100)
n, err = io.ReadFull(rd, rest)
if err != nil {
t.Fatal(err)
}
if n != len(rest) {
t.Errorf("expected %d bytes read by io.ReadFull; got %d", len(rest), n)
}
if !bytes.Equal(append(buf, rest...), big) {
t.Fatal("data parity")
}
}
func TestReadByte(t *testing.T) {
bts := randomBts(512)
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 98)
var (
err error
i int
b byte
)
// scan through the whole
// array byte-by-byte
for err != io.EOF {
b, err = rd.ReadByte()
if err == nil {
if b != bts[i] {
t.Fatalf("offset %d: %d in; %d out", i, b, bts[i])
}
}
i++
}
if err != io.EOF {
t.Fatal(err)
}
}
func TestSkipNoSeek(t *testing.T) {
bts := randomBts(1024)
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
n, err := rd.Skip(512)
if err != nil {
t.Fatal(err)
}
if n != 512 {
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512)
}
var b byte
b, err = rd.ReadByte()
if err != nil {
t.Fatal(err)
}
if b != bts[512] {
t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b)
}
n, err = rd.Skip(10)
if err != nil {
t.Fatal(err)
}
if n != 10 {
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10)
}
// now try to skip past the end
rd = NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
n, err = rd.Skip(2000)
if err != io.ErrUnexpectedEOF {
t.Fatalf("expected error %q; got %q", io.EOF, err)
}
if n != 1024 {
t.Fatalf("expected to skip only 1024 bytes; skipped %d", n)
}
}
func TestSkipSeek(t *testing.T) {
bts := randomBts(1024)
// bytes.Reader implements io.Seeker
rd := NewReaderSize(bytes.NewReader(bts), 200)
n, err := rd.Skip(512)
if err != nil {
t.Fatal(err)
}
if n != 512 {
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512)
}
var b byte
b, err = rd.ReadByte()
if err != nil {
t.Fatal(err)
}
if b != bts[512] {
t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b)
}
n, err = rd.Skip(10)
if err != nil {
t.Fatal(err)
}
if n != 10 {
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10)
}
// now try to skip past the end
rd.Reset(bytes.NewReader(bts))
// because of how bytes.Reader
// implements Seek, this should
// return (2000, nil)
n, err = rd.Skip(2000)
if err != nil {
t.Fatal(err)
}
if n != 2000 {
t.Fatalf("should have returned %d bytes; returned %d", 2000, n)
}
// the next call to Read()
// should return io.EOF
n, err = rd.Read([]byte{0, 0, 0})
if err != io.EOF {
t.Errorf("expected %q; got %q", io.EOF, err)
}
if n != 0 {
t.Errorf("expected 0 bytes read; got %d", n)
}
}
func TestPeek(t *testing.T) {
bts := randomBts(1024)
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
// first, a peek < buffer size
var (
peek []byte
err error
)
peek, err = rd.Peek(100)
if err != nil {
t.Fatal(err)
}
if len(peek) != 100 {
t.Fatalf("asked for %d bytes; got %d", 100, len(peek))
}
if !bytes.Equal(peek, bts[:100]) {
t.Fatal("peeked bytes not equal")
}
// now, a peek > buffer size
peek, err = rd.Peek(256)
if err != nil {
t.Fatal(err)
}
if len(peek) != 256 {
t.Fatalf("asked for %d bytes; got %d", 100, len(peek))
}
if !bytes.Equal(peek, bts[:256]) {
t.Fatal("peeked bytes not equal")
}
// now try to peek past EOF
peek, err = rd.Peek(2048)
if err != io.EOF {
t.Fatalf("expected error %q; got %q", io.EOF, err)
}
if len(peek) != 1024 {
t.Fatalf("expected %d bytes peek-able; got %d", 1024, len(peek))
}
}
func TestNext(t *testing.T) {
size := 1024
bts := randomBts(size)
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
chunksize := 256
chunks := size / chunksize
for i := 0; i < chunks; i++ {
out, err := rd.Next(chunksize)
if err != nil {
t.Fatal(err)
}
start := chunksize * i
if !bytes.Equal(bts[start:start+chunksize], out) {
t.Fatalf("chunk %d: chunks not equal", i+1)
}
}
}
func TestWriteTo(t *testing.T) {
bts := randomBts(2048)
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
// cause the buffer
// to fill a little, just
// to complicate things
rd.Peek(25)
var out bytes.Buffer
n, err := rd.WriteTo(&out)
if err != nil {
t.Fatal(err)
}
if n != 2048 {
t.Fatalf("should have written %d bytes; wrote %d", 2048, n)
}
if !bytes.Equal(out.Bytes(), bts) {
t.Fatal("bytes not equal")
}
}
func TestReadFull(t *testing.T) {
bts := randomBts(1024)
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 256)
// try to ReadFull() the whole thing
out := make([]byte, 1024)
n, err := rd.ReadFull(out)
if err != nil {
t.Fatal(err)
}
if n != 1024 {
t.Fatalf("expected to read %d bytes; read %d", 1024, n)
}
if !bytes.Equal(bts, out) {
t.Fatal("bytes not equal")
}
// we've read everything; this should EOF
n, err = rd.Read(out)
if err != io.EOF {
t.Fatalf("expected %q; got %q", io.EOF, err)
}
rd.Reset(partialReader{bytes.NewReader(bts)})
// now try to read *past* EOF
out = make([]byte, 1500)
n, err = rd.ReadFull(out)
if err != io.ErrUnexpectedEOF {
t.Fatalf("expected error %q; got %q", io.EOF, err)
}
if n != 1024 {
t.Fatalf("expected to read %d bytes; read %d", 1024, n)
}
}

View File

@ -0,0 +1,224 @@
package fwd
import "io"
const (
// DefaultWriterSize is the
// default write buffer size.
DefaultWriterSize = 2048
minWriterSize = minReaderSize
)
// Writer is a buffered writer
type Writer struct {
w io.Writer // writer
buf []byte // 0:len(buf) is bufered data
}
// NewWriter returns a new writer
// that writes to 'w' and has a buffer
// that is `DefaultWriterSize` bytes.
func NewWriter(w io.Writer) *Writer {
if wr, ok := w.(*Writer); ok {
return wr
}
return &Writer{
w: w,
buf: make([]byte, 0, DefaultWriterSize),
}
}
// NewWriterSize returns a new writer
// that writes to 'w' and has a buffer
// that is 'size' bytes.
func NewWriterSize(w io.Writer, size int) *Writer {
if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size {
return wr
}
return &Writer{
w: w,
buf: make([]byte, 0, max(size, minWriterSize)),
}
}
// Buffered returns the number of buffered bytes
// in the reader.
func (w *Writer) Buffered() int { return len(w.buf) }
// BufferSize returns the maximum size of the buffer.
func (w *Writer) BufferSize() int { return cap(w.buf) }
// Flush flushes any buffered bytes
// to the underlying writer.
func (w *Writer) Flush() error {
l := len(w.buf)
if l > 0 {
n, err := w.w.Write(w.buf)
// if we didn't write the whole
// thing, copy the unwritten
// bytes to the beginnning of the
// buffer.
if n < l && n > 0 {
w.pushback(n)
if err == nil {
err = io.ErrShortWrite
}
}
if err != nil {
return err
}
w.buf = w.buf[:0]
return nil
}
return nil
}
// Write implements `io.Writer`
func (w *Writer) Write(p []byte) (int, error) {
c, l, ln := cap(w.buf), len(w.buf), len(p)
avail := c - l
// requires flush
if avail < ln {
if err := w.Flush(); err != nil {
return 0, err
}
l = len(w.buf)
}
// too big to fit in buffer;
// write directly to w.w
if c < ln {
return w.w.Write(p)
}
// grow buf slice; copy; return
w.buf = w.buf[:l+ln]
return copy(w.buf[l:], p), nil
}
// WriteString is analogous to Write, but it takes a string.
func (w *Writer) WriteString(s string) (int, error) {
c, l, ln := cap(w.buf), len(w.buf), len(s)
avail := c - l
// requires flush
if avail < ln {
if err := w.Flush(); err != nil {
return 0, err
}
l = len(w.buf)
}
// too big to fit in buffer;
// write directly to w.w
//
// yes, this is unsafe. *but*
// io.Writer is not allowed
// to mutate its input or
// maintain a reference to it,
// per the spec in package io.
//
// plus, if the string is really
// too big to fit in the buffer, then
// creating a copy to write it is
// expensive (and, strictly speaking,
// unnecessary)
if c < ln {
return w.w.Write(unsafestr(s))
}
// grow buf slice; copy; return
w.buf = w.buf[:l+ln]
return copy(w.buf[l:], s), nil
}
// WriteByte implements `io.ByteWriter`
func (w *Writer) WriteByte(b byte) error {
if len(w.buf) == cap(w.buf) {
if err := w.Flush(); err != nil {
return err
}
}
w.buf = append(w.buf, b)
return nil
}
// Next returns the next 'n' free bytes
// in the write buffer, flushing the writer
// as necessary. Next will return `io.ErrShortBuffer`
// if 'n' is greater than the size of the write buffer.
// Calls to 'next' increment the write position by
// the size of the returned buffer.
func (w *Writer) Next(n int) ([]byte, error) {
c, l := cap(w.buf), len(w.buf)
if n > c {
return nil, io.ErrShortBuffer
}
avail := c - l
if avail < n {
if err := w.Flush(); err != nil {
return nil, err
}
l = len(w.buf)
}
w.buf = w.buf[:l+n]
return w.buf[l:], nil
}
// take the bytes from w.buf[n:len(w.buf)]
// and put them at the beginning of w.buf,
// and resize to the length of the copied segment.
func (w *Writer) pushback(n int) {
w.buf = w.buf[:copy(w.buf, w.buf[n:])]
}
// ReadFrom implements `io.ReaderFrom`
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
// anticipatory flush
if err := w.Flush(); err != nil {
return 0, err
}
w.buf = w.buf[0:cap(w.buf)] // expand buffer
var nn int64 // written
var err error // error
var x int // read
// 1:1 reads and writes
for err == nil {
x, err = r.Read(w.buf)
if x > 0 {
n, werr := w.w.Write(w.buf[:x])
nn += int64(n)
if err != nil {
if n < x && n > 0 {
w.pushback(n - x)
}
return nn, werr
}
if n < x {
w.pushback(n - x)
return nn, io.ErrShortWrite
}
} else if err == nil {
err = io.ErrNoProgress
break
}
}
if err != io.EOF {
return nn, err
}
// we only clear here
// because we are sure
// the writes have
// succeeded. otherwise,
// we retain the data in case
// future writes succeed.
w.buf = w.buf[0:0]
return nn, nil
}

View File

@ -0,0 +1,5 @@
// +build appengine
package fwd
func unsafestr(s string) []byte { return []byte(s) }

View File

@ -0,0 +1,239 @@
package fwd
import (
"bytes"
"io"
"math/rand"
"testing"
)
type chunkedWriter struct {
w *Writer
}
// writes 'p' in randomly-sized chunks
func (c chunkedWriter) Write(p []byte) (int, error) {
l := len(p)
n := 0
for n < l {
amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1
nn, err := c.w.Write(p[n : n+amt]) //
n += nn
if err == nil && nn < amt {
err = io.ErrShortWrite
}
if err != nil {
return n, err
}
}
return n, nil
}
// analagous to Write(), but w/ str
func (c chunkedWriter) WriteString(s string) (int, error) {
l := len(s)
n := 0
for n < l {
amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1
nn, err := c.w.WriteString(s[n : n+amt]) //
n += nn
if err == nil && nn < amt {
err = io.ErrShortWrite
}
if err != nil {
return n, err
}
}
return n, nil
}
// writes via random calls to Next()
type nextWriter struct {
wr *Writer
}
func (c nextWriter) Write(p []byte) (int, error) {
l := len(p)
n := 0
for n < l {
amt := max(rand.Intn(l-n), 1) // at least 1 byte
fwd, err := c.wr.Next(amt) // get next (amt) bytes
if err != nil {
// this may happen occasionally
if err == io.ErrShortBuffer {
if cap(c.wr.buf) >= amt {
panic("bad io.ErrShortBuffer")
}
continue
}
return n, err
}
if len(fwd) != amt {
panic("bad Next() len")
}
n += copy(fwd, p[n:])
}
return n, nil
}
func TestWrite(t *testing.T) {
nbts := 4096
bts := randomBts(nbts)
var buf bytes.Buffer
wr := NewWriterSize(&buf, 512)
if wr.BufferSize() != 512 {
t.Fatalf("expected BufferSize() to be %d; found %d", 512, wr.BufferSize())
}
cwr := chunkedWriter{wr}
nb, err := cwr.Write(bts)
if err != nil {
t.Fatal(err)
}
if nb != nbts {
t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if wr.Buffered() != 0 {
t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered())
}
if buf.Len() != nbts {
t.Fatalf("wrote %d bytes, but buffer is %d bytes long", nbts, buf.Len())
}
if !bytes.Equal(bts, buf.Bytes()) {
t.Fatal("buf.Bytes() is not the same as the input bytes")
}
}
func TestWriteString(t *testing.T) {
nbts := 3998
str := string(randomBts(nbts))
var buf bytes.Buffer
wr := NewWriterSize(&buf, 1137)
if wr.BufferSize() != 1137 {
t.Fatalf("expected BufferSize() to return %d; returned %d", 1137, wr.BufferSize())
}
cwr := chunkedWriter{wr}
nb, err := cwr.WriteString(str)
if err != nil {
t.Fatal(err)
}
if nb != nbts {
t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if wr.Buffered() != 0 {
t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered())
}
if buf.Len() != nbts {
t.Fatalf("wrote %d bytes, buf buffer is %d bytes long", nbts, buf.Len())
}
if buf.String() != str {
t.Fatal("buf.String() is not the same as input string")
}
}
func TestWriteByte(t *testing.T) {
nbts := 3200
bts := randomBts(nbts)
var buf bytes.Buffer
wr := NewWriter(&buf)
if wr.BufferSize() != DefaultWriterSize {
t.Fatalf("expected BufferSize() to return %d; returned %d", DefaultWriterSize, wr.BufferSize())
}
// write byte-by-byte
for _, b := range bts {
if err := wr.WriteByte(b); err != nil {
t.Fatal(err)
}
}
err := wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() != nbts {
t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
}
if !bytes.Equal(buf.Bytes(), bts) {
t.Fatal("buf.Bytes() and input are not equal")
}
}
func TestWriterNext(t *testing.T) {
nbts := 1871
bts := randomBts(nbts)
var buf bytes.Buffer
wr := NewWriterSize(&buf, 500)
nwr := nextWriter{wr}
nb, err := nwr.Write(bts)
if err != nil {
t.Fatal(err)
}
if nb != nbts {
t.Fatalf("expected to write %d bytes; wrote %d", nbts, nb)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() != nbts {
t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
}
if !bytes.Equal(buf.Bytes(), bts) {
t.Fatal("buf.Bytes() and input are not equal")
}
}
func TestReadFrom(t *testing.T) {
nbts := 2139
bts := randomBts(nbts)
var buf bytes.Buffer
wr := NewWriterSize(&buf, 987)
rd := partialReader{bytes.NewReader(bts)}
nb, err := wr.ReadFrom(rd)
if err != nil {
t.Fatal(err)
}
if nb != int64(nbts) {
t.Fatalf("expeted to write %d bytes; wrote %d", nbts, nb)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() != nbts {
t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
}
if !bytes.Equal(buf.Bytes(), bts) {
t.Fatal("buf.Bytes() and input are not equal")
}
}

View File

@ -0,0 +1,18 @@
// +build !appengine
package fwd
import (
"reflect"
"unsafe"
)
// unsafe cast string as []byte
func unsafestr(b string) []byte {
l := len(b)
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: l,
Cap: l,
Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data,
}))
}

View File

@ -0,0 +1,24 @@
// +build linux,!appengine
package msgp
import (
"os"
"syscall"
)
func adviseRead(mem []byte) {
syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
}
func adviseWrite(mem []byte) {
syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
}
func fallocate(f *os.File, sz int64) error {
err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
if err == syscall.ENOTSUP {
return f.Truncate(sz)
}
return err
}

View File

@ -0,0 +1,17 @@
// +build !linux appengine
package msgp
import (
"os"
)
// TODO: darwin, BSD support
func adviseRead(mem []byte) {}
func adviseWrite(mem []byte) {}
func fallocate(f *os.File, sz int64) error {
return f.Truncate(sz)
}

View File

@ -0,0 +1,15 @@
// +build appengine
package msgp
// let's just assume appengine
// uses 64-bit hardware...
const smallint = false
func UnsafeString(b []byte) string {
return string(b)
}
func UnsafeBytes(s string) []byte {
return []byte(s)
}

View File

@ -0,0 +1,39 @@
package msgp
type timer interface {
StartTimer()
StopTimer()
}
// EndlessReader is an io.Reader
// that loops over the same data
// endlessly. It is used for benchmarking.
type EndlessReader struct {
tb timer
data []byte
offset int
}
// NewEndlessReader returns a new endless reader
func NewEndlessReader(b []byte, tb timer) *EndlessReader {
return &EndlessReader{tb: tb, data: b, offset: 0}
}
// Read implements io.Reader. In practice, it
// always returns (len(p), nil), although it
// fills the supplied slice while the benchmark
// timer is stopped.
func (c *EndlessReader) Read(p []byte) (int, error) {
c.tb.StopTimer()
var n int
l := len(p)
m := len(c.data)
for n < l {
nn := copy(p[n:], c.data[c.offset:])
n += nn
c.offset += nn
c.offset %= m
}
c.tb.StartTimer()
return n, nil
}

View File

@ -0,0 +1,142 @@
// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp).
//
// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack
// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code
// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces.
//
// This package defines four "families" of functions:
// - AppendXxxx() appends an object to a []byte in MessagePack encoding.
// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes.
// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type.
// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type.
//
// Once a type has satisfied the `Encodable` and `Decodable` interfaces,
// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using
// msgp.Encode(io.Writer, msgp.Encodable)
// and
// msgp.Decode(io.Reader, msgp.Decodable)
//
// There are also methods for converting MessagePack to JSON without
// an explicit de-serialization step.
//
// For additional tips, tricks, and gotchas, please visit
// the wiki at http://github.com/tinylib/msgp
package msgp
const last4 = 0x0f
const first4 = 0xf0
const last5 = 0x1f
const first3 = 0xe0
const last7 = 0x7f
func isfixint(b byte) bool {
return b>>7 == 0
}
func isnfixint(b byte) bool {
return b&first3 == mnfixint
}
func isfixmap(b byte) bool {
return b&first4 == mfixmap
}
func isfixarray(b byte) bool {
return b&first4 == mfixarray
}
func isfixstr(b byte) bool {
return b&first3 == mfixstr
}
func wfixint(u uint8) byte {
return u & last7
}
func rfixint(b byte) uint8 {
return b
}
func wnfixint(i int8) byte {
return byte(i) | mnfixint
}
func rnfixint(b byte) int8 {
return int8(b)
}
func rfixmap(b byte) uint8 {
return b & last4
}
func wfixmap(u uint8) byte {
return mfixmap | (u & last4)
}
func rfixstr(b byte) uint8 {
return b & last5
}
func wfixstr(u uint8) byte {
return (u & last5) | mfixstr
}
func rfixarray(b byte) uint8 {
return (b & last4)
}
func wfixarray(u uint8) byte {
return (u & last4) | mfixarray
}
// These are all the byte
// prefixes defined by the
// msgpack standard
const (
// 0XXXXXXX
mfixint uint8 = 0x00
// 111XXXXX
mnfixint uint8 = 0xe0
// 1000XXXX
mfixmap uint8 = 0x80
// 1001XXXX
mfixarray uint8 = 0x90
// 101XXXXX
mfixstr uint8 = 0xa0
mnil uint8 = 0xc0
mfalse uint8 = 0xc2
mtrue uint8 = 0xc3
mbin8 uint8 = 0xc4
mbin16 uint8 = 0xc5
mbin32 uint8 = 0xc6
mext8 uint8 = 0xc7
mext16 uint8 = 0xc8
mext32 uint8 = 0xc9
mfloat32 uint8 = 0xca
mfloat64 uint8 = 0xcb
muint8 uint8 = 0xcc
muint16 uint8 = 0xcd
muint32 uint8 = 0xce
muint64 uint8 = 0xcf
mint8 uint8 = 0xd0
mint16 uint8 = 0xd1
mint32 uint8 = 0xd2
mint64 uint8 = 0xd3
mfixext1 uint8 = 0xd4
mfixext2 uint8 = 0xd5
mfixext4 uint8 = 0xd6
mfixext8 uint8 = 0xd7
mfixext16 uint8 = 0xd8
mstr8 uint8 = 0xd9
mstr16 uint8 = 0xda
mstr32 uint8 = 0xdb
marray16 uint8 = 0xdc
marray32 uint8 = 0xdd
mmap16 uint8 = 0xde
mmap32 uint8 = 0xdf
)

View File

@ -0,0 +1,12 @@
package msgp_test
//go:generate msgp -o=defgen_test.go -tests=false
type Blobs []Blob
type Blob struct {
Name string `msg:"name"`
Float float64 `msg:"float"`
Bytes []byte `msg:"bytes"`
Amount int64 `msg:"amount"`
}

View File

@ -0,0 +1,241 @@
package msgp
import (
"math"
)
// Locate returns a []byte pointing to the field
// in a messagepack map with the provided key. (The returned []byte
// points to a sub-slice of 'raw'; Locate does no allocations.) If the
// key doesn't exist in the map, a zero-length []byte will be returned.
func Locate(key string, raw []byte) []byte {
s, n := locate(raw, key)
return raw[s:n]
}
// Replace takes a key ("key") in a messagepack map ("raw")
// and replaces its value with the one provided and returns
// the new []byte. The returned []byte may point to the same
// memory as "raw". Replace makes no effort to evaluate the validity
// of the contents of 'val'. It may use up to the full capacity of 'raw.'
// Replace returns 'nil' if the field doesn't exist or if the object in 'raw'
// is not a map.
func Replace(key string, raw []byte, val []byte) []byte {
start, end := locate(raw, key)
if start == end {
return nil
}
return replace(raw, start, end, val, true)
}
// CopyReplace works similarly to Replace except that the returned
// byte slice does not point to the same memory as 'raw'. CopyReplace
// returns 'nil' if the field doesn't exist or 'raw' isn't a map.
func CopyReplace(key string, raw []byte, val []byte) []byte {
start, end := locate(raw, key)
if start == end {
return nil
}
return replace(raw, start, end, val, false)
}
// Remove removes a key-value pair from 'raw'. It returns
// 'raw' unchanged if the key didn't exist.
func Remove(key string, raw []byte) []byte {
start, end := locateKV(raw, key)
if start == end {
return raw
}
raw = raw[:start+copy(raw[start:], raw[end:])]
return resizeMap(raw, -1)
}
// HasKey returns whether the map in 'raw' has
// a field with key 'key'
func HasKey(key string, raw []byte) bool {
sz, bts, err := ReadMapHeaderBytes(raw)
if err != nil {
return false
}
var field []byte
for i := uint32(0); i < sz; i++ {
field, bts, err = ReadStringZC(bts)
if err != nil {
return false
}
if UnsafeString(field) == key {
return true
}
}
return false
}
func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte {
ll := end - start // length of segment to replace
lv := len(val)
if inplace {
extra := lv - ll
// fastest case: we're doing
// a 1:1 replacement
if extra == 0 {
copy(raw[start:], val)
return raw
} else if extra < 0 {
// 'val' smaller than replaced value
// copy in place and shift back
x := copy(raw[start:], val)
y := copy(raw[start+x:], raw[end:])
return raw[:start+x+y]
} else if extra < cap(raw)-len(raw) {
// 'val' less than (cap-len) extra bytes
// copy in place and shift forward
raw = raw[0 : len(raw)+extra]
// shift end forward
copy(raw[end+extra:], raw[end:])
copy(raw[start:], val)
return raw
}
}
// we have to allocate new space
out := make([]byte, len(raw)+len(val)-ll)
x := copy(out, raw[:start])
y := copy(out[x:], val)
copy(out[x+y:], raw[end:])
return out
}
// locate does a naive O(n) search for the map key; returns start, end
// (returns 0,0 on error)
func locate(raw []byte, key string) (start int, end int) {
var (
sz uint32
bts []byte
field []byte
err error
)
sz, bts, err = ReadMapHeaderBytes(raw)
if err != nil {
return
}
// loop and locate field
for i := uint32(0); i < sz; i++ {
field, bts, err = ReadStringZC(bts)
if err != nil {
return 0, 0
}
if UnsafeString(field) == key {
// start location
l := len(raw)
start = l - len(bts)
bts, err = Skip(bts)
if err != nil {
return 0, 0
}
end = l - len(bts)
return
}
bts, err = Skip(bts)
if err != nil {
return 0, 0
}
}
return 0, 0
}
// locate key AND value
func locateKV(raw []byte, key string) (start int, end int) {
var (
sz uint32
bts []byte
field []byte
err error
)
sz, bts, err = ReadMapHeaderBytes(raw)
if err != nil {
return 0, 0
}
for i := uint32(0); i < sz; i++ {
tmp := len(bts)
field, bts, err = ReadStringZC(bts)
if err != nil {
return 0, 0
}
if UnsafeString(field) == key {
start = len(raw) - tmp
bts, err = Skip(bts)
if err != nil {
return 0, 0
}
end = len(raw) - len(bts)
return
}
bts, err = Skip(bts)
if err != nil {
return 0, 0
}
}
return 0, 0
}
// delta is delta on map size
func resizeMap(raw []byte, delta int64) []byte {
var sz int64
switch raw[0] {
case mmap16:
sz = int64(big.Uint16(raw[1:]))
if sz+delta <= math.MaxUint16 {
big.PutUint16(raw[1:], uint16(sz+delta))
return raw
}
if cap(raw)-len(raw) >= 2 {
raw = raw[0 : len(raw)+2]
copy(raw[5:], raw[3:])
big.PutUint32(raw[1:], uint32(sz+delta))
return raw
}
n := make([]byte, 0, len(raw)+5)
n = AppendMapHeader(n, uint32(sz+delta))
return append(n, raw[3:]...)
case mmap32:
sz = int64(big.Uint32(raw[1:]))
big.PutUint32(raw[1:], uint32(sz+delta))
return raw
default:
sz = int64(rfixmap(raw[0]))
if sz+delta < 16 {
raw[0] = wfixmap(uint8(sz + delta))
return raw
} else if sz+delta <= math.MaxUint16 {
if cap(raw)-len(raw) >= 2 {
raw = raw[0 : len(raw)+2]
copy(raw[3:], raw[1:])
raw[0] = mmap16
big.PutUint16(raw[1:], uint16(sz+delta))
return raw
}
n := make([]byte, 0, len(raw)+5)
n = AppendMapHeader(n, uint32(sz+delta))
return append(n, raw[1:]...)
}
if cap(raw)-len(raw) >= 4 {
raw = raw[0 : len(raw)+4]
copy(raw[5:], raw[1:])
raw[0] = mmap32
big.PutUint32(raw[1:], uint32(sz+delta))
return raw
}
n := make([]byte, 0, len(raw)+5)
n = AppendMapHeader(n, uint32(sz+delta))
return append(n, raw[1:]...)
}
}

View File

@ -0,0 +1,200 @@
package msgp
import (
"bytes"
"reflect"
"testing"
)
func TestRemove(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
w.WriteMapHeader(3)
w.WriteString("first")
w.WriteFloat64(-3.1)
w.WriteString("second")
w.WriteString("DELETE ME!!!")
w.WriteString("third")
w.WriteBytes([]byte("blah"))
w.Flush()
raw := Remove("second", buf.Bytes())
m, _, err := ReadMapStrIntfBytes(raw, nil)
if err != nil {
t.Fatal(err)
}
if len(m) != 2 {
t.Errorf("expected %d fields; found %d", 2, len(m))
}
if _, ok := m["first"]; !ok {
t.Errorf("field %q not found", "first")
}
if _, ok := m["third"]; !ok {
t.Errorf("field %q not found", "third")
}
if _, ok := m["second"]; ok {
t.Errorf("field %q (deleted field) still present", "second")
}
}
func TestLocate(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteMapHeader(2)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(2.0)
en.Flush()
field := Locate("thing_one", buf.Bytes())
if len(field) == 0 {
t.Fatal("field not found")
}
if !HasKey("thing_one", buf.Bytes()) {
t.Fatal("field not found")
}
var zbuf bytes.Buffer
w := NewWriter(&zbuf)
w.WriteString("value_one")
w.Flush()
if !bytes.Equal(zbuf.Bytes(), field) {
t.Errorf("got %q; wanted %q", field, zbuf.Bytes())
}
zbuf.Reset()
w.WriteFloat64(2.0)
w.Flush()
field = Locate("thing_two", buf.Bytes())
if len(field) == 0 {
t.Fatal("field not found")
}
if !bytes.Equal(zbuf.Bytes(), field) {
t.Errorf("got %q; wanted %q", field, zbuf.Bytes())
}
field = Locate("nope", buf.Bytes())
if len(field) != 0 {
t.Fatalf("wanted a zero-length returned slice")
}
}
func TestReplace(t *testing.T) {
// there are 4 cases that need coverage:
// - new value is smaller than old value
// - new value is the same size as the old value
// - new value is larger than old, but fits within cap(b)
// - new value is larger than old, and doesn't fit within cap(b)
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteMapHeader(3)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(2.0)
en.WriteString("some_bytes")
en.WriteBytes([]byte("here are some bytes"))
en.Flush()
// same-size replacement
var fbuf bytes.Buffer
w := NewWriter(&fbuf)
w.WriteFloat64(4.0)
w.Flush()
// replace 2.0 with 4.0 in field two
raw := Replace("thing_two", buf.Bytes(), fbuf.Bytes())
if len(raw) == 0 {
t.Fatal("field not found")
}
var err error
m := make(map[string]interface{})
m, _, err = ReadMapStrIntfBytes(raw, m)
if err != nil {
t.Logf("%q", raw)
t.Fatal(err)
}
if !reflect.DeepEqual(m["thing_two"], 4.0) {
t.Errorf("wanted %v; got %v", 4.0, m["thing_two"])
}
// smaller-size replacement
// replace 2.0 with []byte("hi!")
fbuf.Reset()
w.WriteBytes([]byte("hi!"))
w.Flush()
raw = Replace("thing_two", raw, fbuf.Bytes())
if len(raw) == 0 {
t.Fatal("field not found")
}
m, _, err = ReadMapStrIntfBytes(raw, m)
if err != nil {
t.Logf("%q", raw)
t.Fatal(err)
}
if !reflect.DeepEqual(m["thing_two"], []byte("hi!")) {
t.Errorf("wanted %v; got %v", []byte("hi!"), m["thing_two"])
}
// larger-size replacement
fbuf.Reset()
w.WriteBytes([]byte("some even larger bytes than before"))
w.Flush()
raw = Replace("some_bytes", raw, fbuf.Bytes())
if len(raw) == 0 {
t.Logf("%q", raw)
t.Fatal(err)
}
m, _, err = ReadMapStrIntfBytes(raw, m)
if err != nil {
t.Logf("%q", raw)
t.Fatal(err)
}
if !reflect.DeepEqual(m["some_bytes"], []byte("some even larger bytes than before")) {
t.Errorf("wanted %v; got %v", []byte("hello there!"), m["some_bytes"])
}
// identical in-place replacement
field := Locate("some_bytes", raw)
newraw := CopyReplace("some_bytes", raw, field)
if !bytes.Equal(newraw, raw) {
t.Logf("in: %q", raw)
t.Logf("out: %q", newraw)
t.Error("bytes not equal after copyreplace")
}
}
func BenchmarkLocate(b *testing.B) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteMapHeader(3)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(2.0)
en.WriteString("thing_three")
en.WriteBytes([]byte("hello!"))
en.Flush()
raw := buf.Bytes()
// bytes/s will be the number of bytes traversed per unit of time
field := Locate("thing_three", raw)
b.SetBytes(int64(len(raw) - len(field)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
Locate("thing_three", raw)
}
}

View File

@ -0,0 +1,99 @@
package msgp
// size of every object on the wire,
// plus type information. gives us
// constant-time type information
// for traversing composite objects.
//
var sizes = [256]bytespec{
mnil: {size: 1, extra: constsize, typ: NilType},
mfalse: {size: 1, extra: constsize, typ: BoolType},
mtrue: {size: 1, extra: constsize, typ: BoolType},
mbin8: {size: 2, extra: extra8, typ: BinType},
mbin16: {size: 3, extra: extra16, typ: BinType},
mbin32: {size: 5, extra: extra32, typ: BinType},
mext8: {size: 3, extra: extra8, typ: ExtensionType},
mext16: {size: 4, extra: extra16, typ: ExtensionType},
mext32: {size: 6, extra: extra32, typ: ExtensionType},
mfloat32: {size: 5, extra: constsize, typ: Float32Type},
mfloat64: {size: 9, extra: constsize, typ: Float64Type},
muint8: {size: 2, extra: constsize, typ: UintType},
muint16: {size: 3, extra: constsize, typ: UintType},
muint32: {size: 5, extra: constsize, typ: UintType},
muint64: {size: 9, extra: constsize, typ: UintType},
mint8: {size: 2, extra: constsize, typ: IntType},
mint16: {size: 3, extra: constsize, typ: IntType},
mint32: {size: 5, extra: constsize, typ: IntType},
mint64: {size: 9, extra: constsize, typ: IntType},
mfixext1: {size: 3, extra: constsize, typ: ExtensionType},
mfixext2: {size: 4, extra: constsize, typ: ExtensionType},
mfixext4: {size: 6, extra: constsize, typ: ExtensionType},
mfixext8: {size: 10, extra: constsize, typ: ExtensionType},
mfixext16: {size: 18, extra: constsize, typ: ExtensionType},
mstr8: {size: 2, extra: extra8, typ: StrType},
mstr16: {size: 3, extra: extra16, typ: StrType},
mstr32: {size: 5, extra: extra32, typ: StrType},
marray16: {size: 3, extra: array16v, typ: ArrayType},
marray32: {size: 5, extra: array32v, typ: ArrayType},
mmap16: {size: 3, extra: map16v, typ: MapType},
mmap32: {size: 5, extra: map32v, typ: MapType},
}
func init() {
// set up fixed fields
// fixint
for i := mfixint; i < 0x80; i++ {
sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType}
}
// nfixint
for i := uint16(mnfixint); i < 0x100; i++ {
sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType}
}
// fixstr gets constsize,
// since the prefix yields the size
for i := mfixstr; i < 0xc0; i++ {
sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType}
}
// fixmap
for i := mfixmap; i < 0x90; i++ {
sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType}
}
// fixarray
for i := mfixarray; i < 0xa0; i++ {
sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType}
}
}
// a valid bytespsec has
// non-zero 'size' and
// non-zero 'typ'
type bytespec struct {
size uint8 // prefix size information
extra varmode // extra size information
typ Type // type
_ byte // makes bytespec 4 bytes (yes, this matters)
}
// size mode
// if positive, # elements for composites
type varmode int8
const (
constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects)
extra8 = -1 // has uint8(p[1]) extra bytes
extra16 = -2 // has be16(p[1:]) extra bytes
extra32 = -3 // has be32(p[1:]) extra bytes
map16v = -4 // use map16
map32v = -5 // use map32
array16v = -6 // use array16
array32v = -7 // use array32
)
func getType(v byte) Type {
return sizes[v].typ
}

View File

@ -0,0 +1,142 @@
package msgp
import (
"fmt"
"reflect"
)
var (
// ErrShortBytes is returned when the
// slice being decoded is too short to
// contain the contents of the message
ErrShortBytes error = errShort{}
// this error is only returned
// if we reach code that should
// be unreachable
fatal error = errFatal{}
)
// Error is the interface satisfied
// by all of the errors that originate
// from this package.
type Error interface {
error
// Resumable returns whether
// or not the error means that
// the stream of data is malformed
// and the information is unrecoverable.
Resumable() bool
}
type errShort struct{}
func (e errShort) Error() string { return "msgp: too few bytes left to read object" }
func (e errShort) Resumable() bool { return false }
type errFatal struct{}
func (f errFatal) Error() string { return "msgp: fatal decoding error (unreachable code)" }
func (f errFatal) Resumable() bool { return false }
// ArrayError is an error returned
// when decoding a fix-sized array
// of the wrong size
type ArrayError struct {
Wanted uint32
Got uint32
}
// Error implements the error interface
func (a ArrayError) Error() string {
return fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got)
}
// Resumable is always 'true' for ArrayErrors
func (a ArrayError) Resumable() bool { return true }
// IntOverflow is returned when a call
// would downcast an integer to a type
// with too few bits to hold its value.
type IntOverflow struct {
Value int64 // the value of the integer
FailedBitsize int // the bit size that the int64 could not fit into
}
// Error implements the error interface
func (i IntOverflow) Error() string {
return fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize)
}
// Resumable is always 'true' for overflows
func (i IntOverflow) Resumable() bool { return true }
// UintOverflow is returned when a call
// would downcast an unsigned integer to a type
// with too few bits to hold its value
type UintOverflow struct {
Value uint64 // value of the uint
FailedBitsize int // the bit size that couldn't fit the value
}
// Error implements the error interface
func (u UintOverflow) Error() string {
return fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize)
}
// Resumable is always 'true' for overflows
func (u UintOverflow) Resumable() bool { return true }
// A TypeError is returned when a particular
// decoding method is unsuitable for decoding
// a particular MessagePack value.
type TypeError struct {
Method Type // Type expected by method
Encoded Type // Type actually encoded
}
// Error implements the error interface
func (t TypeError) Error() string {
return fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method)
}
// Resumable returns 'true' for TypeErrors
func (t TypeError) Resumable() bool { return true }
// returns either InvalidPrefixError or
// TypeError depending on whether or not
// the prefix is recognized
func badPrefix(want Type, lead byte) error {
t := sizes[lead].typ
if t == InvalidType {
return InvalidPrefixError(lead)
}
return TypeError{Method: want, Encoded: t}
}
// InvalidPrefixError is returned when a bad encoding
// uses a prefix that is not recognized in the MessagePack standard.
// This kind of error is unrecoverable.
type InvalidPrefixError byte
// Error implements the error interface
func (i InvalidPrefixError) Error() string {
return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i))
}
// Resumable returns 'false' for InvalidPrefixErrors
func (i InvalidPrefixError) Resumable() bool { return false }
// ErrUnsupportedType is returned
// when a bad argument is supplied
// to a function that takes `interface{}`.
type ErrUnsupportedType struct {
T reflect.Type
}
// Error implements error
func (e *ErrUnsupportedType) Error() string { return fmt.Sprintf("msgp: type %q not supported", e.T) }
// Resumable returns 'true' for ErrUnsupportedType
func (e *ErrUnsupportedType) Resumable() bool { return true }

View File

@ -0,0 +1,548 @@
package msgp
import (
"fmt"
"math"
)
const (
// Complex64Extension is the extension number used for complex64
Complex64Extension = 3
// Complex128Extension is the extension number used for complex128
Complex128Extension = 4
// TimeExtension is the extension number used for time.Time
TimeExtension = 5
)
// our extensions live here
var extensionReg = make(map[int8]func() Extension)
// RegisterExtension registers extensions so that they
// can be initialized and returned by methods that
// decode `interface{}` values. This should only
// be called during initialization. f() should return
// a newly-initialized zero value of the extension. Keep in
// mind that extensions 3, 4, and 5 are reserved for
// complex64, complex128, and time.Time, respectively,
// and that MessagePack reserves extension types from -127 to -1.
//
// For example, if you wanted to register a user-defined struct:
//
// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} })
//
// RegisterExtension will panic if you call it multiple times
// with the same 'typ' argument, or if you use a reserved
// type (3, 4, or 5).
func RegisterExtension(typ int8, f func() Extension) {
switch typ {
case Complex64Extension, Complex128Extension, TimeExtension:
panic(fmt.Sprint("msgp: forbidden extension type:", typ))
}
if _, ok := extensionReg[typ]; ok {
panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once"))
}
extensionReg[typ] = f
}
// ExtensionTypeError is an error type returned
// when there is a mis-match between an extension type
// and the type encoded on the wire
type ExtensionTypeError struct {
Got int8
Want int8
}
// Error implements the error interface
func (e ExtensionTypeError) Error() string {
return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got)
}
// Resumable returns 'true' for ExtensionTypeErrors
func (e ExtensionTypeError) Resumable() bool { return true }
func errExt(got int8, wanted int8) error {
return ExtensionTypeError{Got: got, Want: wanted}
}
// Extension is the interface fulfilled
// by types that want to define their
// own binary encoding.
type Extension interface {
// ExtensionType should return
// a int8 that identifies the concrete
// type of the extension. (Types <0 are
// officially reserved by the MessagePack
// specifications.)
ExtensionType() int8
// Len should return the length
// of the data to be encoded
Len() int
// MarshalBinaryTo should copy
// the data into the supplied slice,
// assuming that the slice has length Len()
MarshalBinaryTo([]byte) error
UnmarshalBinary([]byte) error
}
// RawExtension implements the Extension interface
type RawExtension struct {
Data []byte
Type int8
}
// ExtensionType implements Extension.ExtensionType, and returns r.Type
func (r *RawExtension) ExtensionType() int8 { return r.Type }
// Len implements Extension.Len, and returns len(r.Data)
func (r *RawExtension) Len() int { return len(r.Data) }
// MarshalBinaryTo implements Extension.MarshalBinaryTo,
// and returns a copy of r.Data
func (r *RawExtension) MarshalBinaryTo(d []byte) error {
copy(d, r.Data)
return nil
}
// UnmarshalBinary implements Extension.UnmarshalBinary,
// and sets r.Data to the contents of the provided slice
func (r *RawExtension) UnmarshalBinary(b []byte) error {
if cap(r.Data) >= len(b) {
r.Data = r.Data[0:len(b)]
} else {
r.Data = make([]byte, len(b))
}
copy(r.Data, b)
return nil
}
// WriteExtension writes an extension type to the writer
func (mw *Writer) WriteExtension(e Extension) error {
l := e.Len()
var err error
switch l {
case 0:
o, err := mw.require(3)
if err != nil {
return err
}
mw.buf[o] = mext8
mw.buf[o+1] = 0
mw.buf[o+2] = byte(e.ExtensionType())
case 1:
o, err := mw.require(2)
if err != nil {
return err
}
mw.buf[o] = mfixext1
mw.buf[o+1] = byte(e.ExtensionType())
case 2:
o, err := mw.require(2)
if err != nil {
return err
}
mw.buf[o] = mfixext2
mw.buf[o+1] = byte(e.ExtensionType())
case 4:
o, err := mw.require(2)
if err != nil {
return err
}
mw.buf[o] = mfixext4
mw.buf[o+1] = byte(e.ExtensionType())
case 8:
o, err := mw.require(2)
if err != nil {
return err
}
mw.buf[o] = mfixext8
mw.buf[o+1] = byte(e.ExtensionType())
case 16:
o, err := mw.require(2)
if err != nil {
return err
}
mw.buf[o] = mfixext16
mw.buf[o+1] = byte(e.ExtensionType())
default:
switch {
case l < math.MaxUint8:
o, err := mw.require(3)
if err != nil {
return err
}
mw.buf[o] = mext8
mw.buf[o+1] = byte(uint8(l))
mw.buf[o+2] = byte(e.ExtensionType())
case l < math.MaxUint16:
o, err := mw.require(4)
if err != nil {
return err
}
mw.buf[o] = mext16
big.PutUint16(mw.buf[o+1:], uint16(l))
mw.buf[o+3] = byte(e.ExtensionType())
default:
o, err := mw.require(6)
if err != nil {
return err
}
mw.buf[o] = mext32
big.PutUint32(mw.buf[o+1:], uint32(l))
mw.buf[o+5] = byte(e.ExtensionType())
}
}
// we can only write directly to the
// buffer if we're sure that it
// fits the object
if l <= mw.bufsize() {
o, err := mw.require(l)
if err != nil {
return err
}
return e.MarshalBinaryTo(mw.buf[o:])
}
// here we create a new buffer
// just large enough for the body
// and save it as the write buffer
err = mw.flush()
if err != nil {
return err
}
buf := make([]byte, l)
err = e.MarshalBinaryTo(buf)
if err != nil {
return err
}
mw.buf = buf
mw.wloc = l
return nil
}
// peek at the extension type, assuming the next
// kind to be read is Extension
func (m *Reader) peekExtensionType() (int8, error) {
p, err := m.R.Peek(2)
if err != nil {
return 0, err
}
spec := sizes[p[0]]
if spec.typ != ExtensionType {
return 0, badPrefix(ExtensionType, p[0])
}
if spec.extra == constsize {
return int8(p[1]), nil
}
size := spec.size
p, err = m.R.Peek(int(size))
if err != nil {
return 0, err
}
return int8(p[size-1]), nil
}
// peekExtension peeks at the extension encoding type
// (must guarantee at least 1 byte in 'b')
func peekExtension(b []byte) (int8, error) {
spec := sizes[b[0]]
size := spec.size
if spec.typ != ExtensionType {
return 0, badPrefix(ExtensionType, b[0])
}
if len(b) < int(size) {
return 0, ErrShortBytes
}
// for fixed extensions,
// the type information is in
// the second byte
if spec.extra == constsize {
return int8(b[1]), nil
}
// otherwise, it's in the last
// part of the prefix
return int8(b[size-1]), nil
}
// ReadExtension reads the next object from the reader
// as an extension. ReadExtension will fail if the next
// object in the stream is not an extension, or if
// e.Type() is not the same as the wire type.
func (m *Reader) ReadExtension(e Extension) (err error) {
var p []byte
p, err = m.R.Peek(2)
if err != nil {
return
}
lead := p[0]
var read int
var off int
switch lead {
case mfixext1:
if int8(p[1]) != e.ExtensionType() {
err = errExt(int8(p[1]), e.ExtensionType())
return
}
p, err = m.R.Peek(3)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
_, err = m.R.Skip(3)
}
return
case mfixext2:
if int8(p[1]) != e.ExtensionType() {
err = errExt(int8(p[1]), e.ExtensionType())
return
}
p, err = m.R.Peek(4)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
_, err = m.R.Skip(4)
}
return
case mfixext4:
if int8(p[1]) != e.ExtensionType() {
err = errExt(int8(p[1]), e.ExtensionType())
return
}
p, err = m.R.Peek(6)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
_, err = m.R.Skip(6)
}
return
case mfixext8:
if int8(p[1]) != e.ExtensionType() {
err = errExt(int8(p[1]), e.ExtensionType())
return
}
p, err = m.R.Peek(10)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
_, err = m.R.Skip(10)
}
return
case mfixext16:
if int8(p[1]) != e.ExtensionType() {
err = errExt(int8(p[1]), e.ExtensionType())
return
}
p, err = m.R.Peek(18)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
_, err = m.R.Skip(18)
}
return
case mext8:
p, err = m.R.Peek(3)
if err != nil {
return
}
if int8(p[2]) != e.ExtensionType() {
err = errExt(int8(p[2]), e.ExtensionType())
return
}
read = int(uint8(p[1]))
off = 3
case mext16:
p, err = m.R.Peek(4)
if err != nil {
return
}
if int8(p[3]) != e.ExtensionType() {
err = errExt(int8(p[3]), e.ExtensionType())
return
}
read = int(big.Uint16(p[1:]))
off = 4
case mext32:
p, err = m.R.Peek(6)
if err != nil {
return
}
if int8(p[5]) != e.ExtensionType() {
err = errExt(int8(p[5]), e.ExtensionType())
return
}
read = int(big.Uint32(p[1:]))
off = 6
default:
err = badPrefix(ExtensionType, lead)
return
}
p, err = m.R.Peek(read + off)
if err != nil {
return
}
err = e.UnmarshalBinary(p[off:])
if err == nil {
_, err = m.R.Skip(read + off)
}
return
}
// AppendExtension appends a MessagePack extension to the provided slice
func AppendExtension(b []byte, e Extension) ([]byte, error) {
l := e.Len()
var o []byte
var n int
switch l {
case 0:
o, n = ensure(b, 3)
o[n] = mext8
o[n+1] = 0
o[n+2] = byte(e.ExtensionType())
return o[:n+3], nil
case 1:
o, n = ensure(b, 3)
o[n] = mfixext1
o[n+1] = byte(e.ExtensionType())
n += 2
case 2:
o, n = ensure(b, 4)
o[n] = mfixext2
o[n+1] = byte(e.ExtensionType())
n += 2
case 4:
o, n = ensure(b, 6)
o[n] = mfixext4
o[n+1] = byte(e.ExtensionType())
n += 2
case 8:
o, n = ensure(b, 10)
o[n] = mfixext8
o[n+1] = byte(e.ExtensionType())
n += 2
case 16:
o, n = ensure(b, 18)
o[n] = mfixext16
o[n+1] = byte(e.ExtensionType())
n += 2
}
switch {
case l < math.MaxUint8:
o, n = ensure(b, l+3)
o[n] = mext8
o[n+1] = byte(uint8(l))
o[n+2] = byte(e.ExtensionType())
n += 3
case l < math.MaxUint16:
o, n = ensure(b, l+4)
o[n] = mext16
big.PutUint16(o[n+1:], uint16(l))
o[n+3] = byte(e.ExtensionType())
n += 4
default:
o, n = ensure(b, l+6)
o[n] = mext32
big.PutUint32(o[n+1:], uint32(l))
o[n+5] = byte(e.ExtensionType())
n += 6
}
return o, e.MarshalBinaryTo(o[n:])
}
// ReadExtensionBytes reads an extension from 'b' into 'e'
// and returns any remaining bytes.
// Possible errors:
// - ErrShortBytes ('b' not long enough)
// - ExtensionTypeErorr{} (wire type not the same as e.Type())
// - TypeErorr{} (next object not an extension)
// - InvalidPrefixError
// - An umarshal error returned from e.UnmarshalBinary
func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) {
l := len(b)
if l < 3 {
return b, ErrShortBytes
}
lead := b[0]
var (
sz int // size of 'data'
off int // offset of 'data'
typ int8
)
switch lead {
case mfixext1:
typ = int8(b[1])
sz = 1
off = 2
case mfixext2:
typ = int8(b[1])
sz = 2
off = 2
case mfixext4:
typ = int8(b[1])
sz = 4
off = 2
case mfixext8:
typ = int8(b[1])
sz = 8
off = 2
case mfixext16:
typ = int8(b[1])
sz = 16
off = 2
case mext8:
sz = int(uint8(b[1]))
typ = int8(b[2])
off = 3
if sz == 0 {
return b[3:], e.UnmarshalBinary(b[3:3])
}
case mext16:
if l < 4 {
return b, ErrShortBytes
}
sz = int(big.Uint16(b[1:]))
typ = int8(b[3])
off = 4
case mext32:
if l < 6 {
return b, ErrShortBytes
}
sz = int(big.Uint32(b[1:]))
typ = int8(b[5])
off = 6
default:
return b, badPrefix(ExtensionType, lead)
}
if typ != e.ExtensionType() {
return b, errExt(typ, e.ExtensionType())
}
// the data of the extension starts
// at 'off' and is 'sz' bytes long
if len(b[off:]) < sz {
return b, ErrShortBytes
}
tot := off + sz
return b[tot:], e.UnmarshalBinary(b[off:tot])
}

View File

@ -0,0 +1,49 @@
package msgp
import (
"bytes"
"math/rand"
"testing"
"time"
)
var extSizes = [...]int{0, 1, 2, 4, 8, 16, int(tint8), int(tuint16), int(tuint32)}
func randomExt() RawExtension {
e := RawExtension{}
e.Type = int8(rand.Int())
e.Data = RandBytes(extSizes[rand.Intn(len(extSizes))])
return e
}
func TestReadWriteExtension(t *testing.T) {
rand.Seed(time.Now().Unix())
var buf bytes.Buffer
en := NewWriter(&buf)
dc := NewReader(&buf)
for i := 0; i < 25; i++ {
buf.Reset()
e := randomExt()
en.WriteExtension(&e)
en.Flush()
err := dc.ReadExtension(&e)
if err != nil {
t.Errorf("error with extension (length %d): %s", len(buf.Bytes()), err)
}
}
}
func TestReadWriteExtensionBytes(t *testing.T) {
var bts []byte
rand.Seed(time.Now().Unix())
for i := 0; i < 24; i++ {
e := randomExt()
bts, _ = AppendExtension(bts[0:0], &e)
_, err := ReadExtensionBytes(bts, &e)
if err != nil {
t.Errorf("error with extension (length %d): %s", len(bts), err)
}
}
}

View File

@ -0,0 +1,91 @@
// +build linux,!appengine darwin dragonfly freebsd netbsd openbsd
package msgp
import (
"os"
"syscall"
)
// ReadFile reads a file into 'dst' using
// a read-only memory mapping. Consequently,
// the file must be mmap-able, and the
// Unmarshaler should never write to
// the source memory. (Methods generated
// by the msgp tool obey that constraint, but
// user-defined implementations may not.)
//
// Reading and writing through file mappings
// is only efficient for large files; small
// files are best read and written using
// the ordinary streaming interfaces.
//
func ReadFile(dst Unmarshaler, file *os.File) error {
stat, err := file.Stat()
if err != nil {
return err
}
data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
return err
}
adviseRead(data)
_, err = dst.UnmarshalMsg(data)
uerr := syscall.Munmap(data)
if err == nil {
err = uerr
}
return err
}
// MarshalSizer is the combination
// of the Marshaler and Sizer
// interfaces.
type MarshalSizer interface {
Marshaler
Sizer
}
// WriteFile writes a file from 'src' using
// memory mapping. It overwrites the entire
// contents of the previous file.
// The mapping size is calculated
// using the `Msgsize()` method
// of 'src', so it must produce a result
// equal to or greater than the actual encoded
// size of the object. Otherwise,
// a fault (SIGBUS) will occur.
//
// Reading and writing through file mappings
// is only efficient for large files; small
// files are best read and written using
// the ordinary streaming interfaces.
//
// NOTE: The performance of this call
// is highly OS- and filesystem-dependent.
// Users should take care to test that this
// performs as expected in a production environment.
// (Linux users should run a kernel and filesystem
// that support fallocate(2) for the best results.)
func WriteFile(src MarshalSizer, file *os.File) error {
sz := src.Msgsize()
err := fallocate(file, int64(sz))
if err != nil {
return err
}
data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
if err != nil {
return err
}
adviseWrite(data)
chunk := data[:0]
chunk, err = src.MarshalMsg(chunk)
if err != nil {
return err
}
uerr := syscall.Munmap(data)
if uerr != nil {
return uerr
}
return file.Truncate(int64(len(chunk)))
}

View File

@ -0,0 +1,47 @@
// +build windows appengine
package msgp
import (
"io/ioutil"
"os"
)
// MarshalSizer is the combination
// of the Marshaler and Sizer
// interfaces.
type MarshalSizer interface {
Marshaler
Sizer
}
func ReadFile(dst Unmarshaler, file *os.File) error {
if u, ok := dst.(Decodable); ok {
return u.DecodeMsg(NewReader(file))
}
data, err := ioutil.ReadAll(file)
if err != nil {
return err
}
_, err = dst.UnmarshalMsg(data)
return err
}
func WriteFile(src MarshalSizer, file *os.File) error {
if e, ok := src.(Encodable); ok {
w := NewWriter(file)
err := e.EncodeMsg(w)
if err == nil {
err = w.Flush()
}
return err
}
raw, err := src.MarshalMsg(nil)
if err != nil {
return err
}
_, err = file.Write(raw)
return err
}

View File

@ -0,0 +1,103 @@
// +build linux darwin dragonfly freebsd netbsd openbsd
package msgp_test
import (
"bytes"
"crypto/rand"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/tinylib/msgp/msgp"
prand "math/rand"
"os"
"testing"
)
type rawBytes []byte
func (r rawBytes) MarshalMsg(b []byte) ([]byte, error) {
return msgp.AppendBytes(b, []byte(r)), nil
}
func (r rawBytes) Msgsize() int {
return msgp.BytesPrefixSize + len(r)
}
func (r *rawBytes) UnmarshalMsg(b []byte) ([]byte, error) {
tmp, out, err := msgp.ReadBytesBytes(b, (*(*[]byte)(r))[:0])
*r = rawBytes(tmp)
return out, err
}
func TestReadWriteFile(t *testing.T) {
t.Parallel()
f, err := os.Create("tmpfile")
if err != nil {
t.Fatal(err)
}
defer func() {
f.Close()
os.Remove("tmpfile")
}()
data := make([]byte, 1024*1024)
rand.Read(data)
err = msgp.WriteFile(rawBytes(data), f)
if err != nil {
t.Fatal(err)
}
var out rawBytes
f.Seek(0, os.SEEK_SET)
err = msgp.ReadFile(&out, f)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal([]byte(out), []byte(data)) {
t.Fatal("Input and output not equal.")
}
}
var blobstrings = []string{"", "a string", "a longer string here!"}
var blobfloats = []float64{0.0, -1.0, 1.0, 3.1415926535}
var blobints = []int64{0, 1, -1, 80000, 1 << 30}
var blobbytes = [][]byte{[]byte{}, []byte("hello"), []byte("{\"is_json\":true,\"is_compact\":\"unable to determine\"}")}
func BenchmarkWriteReadFile(b *testing.B) {
// let's not run out of disk space...
if b.N > 10000000 {
b.N = 10000000
}
fname := "bench-tmpfile"
f, err := os.Create(fname)
if err != nil {
b.Fatal(err)
}
defer func(f *os.File, name string) {
f.Close()
os.Remove(name)
}(f, fname)
data := make(Blobs, b.N)
for i := range data {
data[i].Name = blobstrings[prand.Intn(len(blobstrings))]
data[i].Float = blobfloats[prand.Intn(len(blobfloats))]
data[i].Amount = blobints[prand.Intn(len(blobints))]
data[i].Bytes = blobbytes[prand.Intn(len(blobbytes))]
}
b.SetBytes(int64(data.Msgsize() / b.N))
b.ResetTimer()
err = msgp.WriteFile(data, f)
if err != nil {
b.Fatal(err)
}
err = msgp.ReadFile(&data, f)
if err != nil {
b.Fatal(err)
}
}

View File

@ -0,0 +1,25 @@
package msgp
import (
"testing"
)
func BenchmarkReadWriteFloat32(b *testing.B) {
var f float32 = 3.9081
bts := AppendFloat32([]byte{}, f)
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts = AppendFloat32(bts[0:0], f)
f, bts, _ = ReadFloat32Bytes(bts)
}
}
func BenchmarkReadWriteFloat64(b *testing.B) {
var f float64 = 3.9081
bts := AppendFloat64([]byte{}, f)
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts = AppendFloat64(bts[0:0], f)
f, bts, _ = ReadFloat64Bytes(bts)
}
}

View File

@ -0,0 +1,174 @@
package msgp
/* ----------------------------------
integer encoding utilities
(inline-able)
TODO(tinylib): there are faster,
albeit non-portable solutions
to the code below. implement
byteswap?
---------------------------------- */
func putMint64(b []byte, i int64) {
b[0] = mint64
b[1] = byte(i >> 56)
b[2] = byte(i >> 48)
b[3] = byte(i >> 40)
b[4] = byte(i >> 32)
b[5] = byte(i >> 24)
b[6] = byte(i >> 16)
b[7] = byte(i >> 8)
b[8] = byte(i)
}
func getMint64(b []byte) int64 {
return (int64(b[1]) << 56) | (int64(b[2]) << 48) |
(int64(b[3]) << 40) | (int64(b[4]) << 32) |
(int64(b[5]) << 24) | (int64(b[6]) << 16) |
(int64(b[7]) << 8) | (int64(b[8]))
}
func putMint32(b []byte, i int32) {
b[0] = mint32
b[1] = byte(i >> 24)
b[2] = byte(i >> 16)
b[3] = byte(i >> 8)
b[4] = byte(i)
}
func getMint32(b []byte) int32 {
return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4]))
}
func putMint16(b []byte, i int16) {
b[0] = mint16
b[1] = byte(i >> 8)
b[2] = byte(i)
}
func getMint16(b []byte) (i int16) {
return (int16(b[1]) << 8) | int16(b[2])
}
func putMint8(b []byte, i int8) {
b[0] = mint8
b[1] = byte(i)
}
func getMint8(b []byte) (i int8) {
return int8(b[1])
}
func putMuint64(b []byte, u uint64) {
b[0] = muint64
b[1] = byte(u >> 56)
b[2] = byte(u >> 48)
b[3] = byte(u >> 40)
b[4] = byte(u >> 32)
b[5] = byte(u >> 24)
b[6] = byte(u >> 16)
b[7] = byte(u >> 8)
b[8] = byte(u)
}
func getMuint64(b []byte) uint64 {
return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) |
(uint64(b[3]) << 40) | (uint64(b[4]) << 32) |
(uint64(b[5]) << 24) | (uint64(b[6]) << 16) |
(uint64(b[7]) << 8) | (uint64(b[8]))
}
func putMuint32(b []byte, u uint32) {
b[0] = muint32
b[1] = byte(u >> 24)
b[2] = byte(u >> 16)
b[3] = byte(u >> 8)
b[4] = byte(u)
}
func getMuint32(b []byte) uint32 {
return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4]))
}
func putMuint16(b []byte, u uint16) {
b[0] = muint16
b[1] = byte(u >> 8)
b[2] = byte(u)
}
func getMuint16(b []byte) uint16 {
return (uint16(b[1]) << 8) | uint16(b[2])
}
func putMuint8(b []byte, u uint8) {
b[0] = muint8
b[1] = byte(u)
}
func getMuint8(b []byte) uint8 {
return uint8(b[1])
}
func getUnix(b []byte) (sec int64, nsec int32) {
sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) |
(int64(b[2]) << 40) | (int64(b[3]) << 32) |
(int64(b[4]) << 24) | (int64(b[5]) << 16) |
(int64(b[6]) << 8) | (int64(b[7]))
nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11]))
return
}
func putUnix(b []byte, sec int64, nsec int32) {
b[0] = byte(sec >> 56)
b[1] = byte(sec >> 48)
b[2] = byte(sec >> 40)
b[3] = byte(sec >> 32)
b[4] = byte(sec >> 24)
b[5] = byte(sec >> 16)
b[6] = byte(sec >> 8)
b[7] = byte(sec)
b[8] = byte(nsec >> 24)
b[9] = byte(nsec >> 16)
b[10] = byte(nsec >> 8)
b[11] = byte(nsec)
}
/* -----------------------------
prefix utilities
----------------------------- */
// write prefix and uint8
func prefixu8(b []byte, pre byte, sz uint8) {
b[0] = pre
b[1] = byte(sz)
}
// write prefix and big-endian uint16
func prefixu16(b []byte, pre byte, sz uint16) {
b[0] = pre
b[1] = byte(sz >> 8)
b[2] = byte(sz)
}
// write prefix and big-endian uint32
func prefixu32(b []byte, pre byte, sz uint32) {
b[0] = pre
b[1] = byte(sz >> 24)
b[2] = byte(sz >> 16)
b[3] = byte(sz >> 8)
b[4] = byte(sz)
}
func prefixu64(b []byte, pre byte, sz uint64) {
b[0] = pre
b[1] = byte(sz >> 56)
b[2] = byte(sz >> 48)
b[3] = byte(sz >> 40)
b[4] = byte(sz >> 32)
b[5] = byte(sz >> 24)
b[6] = byte(sz >> 16)
b[7] = byte(sz >> 8)
b[8] = byte(sz)
}

View File

@ -0,0 +1,542 @@
package msgp
import (
"bufio"
"encoding/base64"
"encoding/json"
"io"
"strconv"
"unicode/utf8"
)
var (
null = []byte("null")
hex = []byte("0123456789abcdef")
)
var defuns [_maxtype]func(jsWriter, *Reader) (int, error)
// note: there is an initialization loop if
// this isn't set up during init()
func init() {
// since none of these functions are inline-able,
// there is not much of a penalty to the indirect
// call. however, this is best expressed as a jump-table...
defuns = [_maxtype]func(jsWriter, *Reader) (int, error){
StrType: rwString,
BinType: rwBytes,
MapType: rwMap,
ArrayType: rwArray,
Float64Type: rwFloat64,
Float32Type: rwFloat32,
BoolType: rwBool,
IntType: rwInt,
UintType: rwUint,
NilType: rwNil,
ExtensionType: rwExtension,
Complex64Type: rwExtension,
Complex128Type: rwExtension,
TimeType: rwTime,
}
}
// this is the interface
// used to write json
type jsWriter interface {
io.Writer
io.ByteWriter
WriteString(string) (int, error)
}
// CopyToJSON reads MessagePack from 'src' and copies it
// as JSON to 'dst' until EOF.
func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) {
r := NewReader(src)
n, err = r.WriteToJSON(dst)
freeR(r)
return
}
// WriteToJSON translates MessagePack from 'r' and writes it as
// JSON to 'w' until the underlying reader returns io.EOF. It returns
// the number of bytes written, and an error if it stopped before EOF.
func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
var j jsWriter
var bf *bufio.Writer
if jsw, ok := w.(jsWriter); ok {
j = jsw
} else {
bf = bufio.NewWriter(w)
j = bf
}
var nn int
for err == nil {
nn, err = rwNext(j, r)
n += int64(nn)
}
if err != io.EOF {
if bf != nil {
bf.Flush()
}
return
}
err = nil
if bf != nil {
err = bf.Flush()
}
return
}
func rwNext(w jsWriter, src *Reader) (int, error) {
t, err := src.NextType()
if err != nil {
return 0, err
}
return defuns[t](w, src)
}
func rwMap(dst jsWriter, src *Reader) (n int, err error) {
var comma bool
var sz uint32
var field []byte
sz, err = src.ReadMapHeader()
if err != nil {
return
}
if sz == 0 {
return dst.WriteString("{}")
}
err = dst.WriteByte('{')
if err != nil {
return
}
n++
var nn int
for i := uint32(0); i < sz; i++ {
if comma {
err = dst.WriteByte(',')
if err != nil {
return
}
n++
}
field, err = src.ReadMapKeyPtr()
if err != nil {
return
}
nn, err = rwquoted(dst, field)
n += nn
if err != nil {
return
}
err = dst.WriteByte(':')
if err != nil {
return
}
n++
nn, err = rwNext(dst, src)
n += nn
if err != nil {
return
}
if !comma {
comma = true
}
}
err = dst.WriteByte('}')
if err != nil {
return
}
n++
return
}
func rwArray(dst jsWriter, src *Reader) (n int, err error) {
err = dst.WriteByte('[')
if err != nil {
return
}
var sz uint32
var nn int
sz, err = src.ReadArrayHeader()
if err != nil {
return
}
comma := false
for i := uint32(0); i < sz; i++ {
if comma {
err = dst.WriteByte(',')
if err != nil {
return
}
n++
}
nn, err = rwNext(dst, src)
n += nn
if err != nil {
return
}
comma = true
}
err = dst.WriteByte(']')
if err != nil {
return
}
n++
return
}
func rwNil(dst jsWriter, src *Reader) (int, error) {
err := src.ReadNil()
if err != nil {
return 0, err
}
return dst.Write(null)
}
func rwFloat32(dst jsWriter, src *Reader) (int, error) {
f, err := src.ReadFloat32()
if err != nil {
return 0, err
}
src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64)
return dst.Write(src.scratch)
}
func rwFloat64(dst jsWriter, src *Reader) (int, error) {
f, err := src.ReadFloat64()
if err != nil {
return 0, err
}
src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32)
return dst.Write(src.scratch)
}
func rwInt(dst jsWriter, src *Reader) (int, error) {
i, err := src.ReadInt64()
if err != nil {
return 0, err
}
src.scratch = strconv.AppendInt(src.scratch[:0], i, 10)
return dst.Write(src.scratch)
}
func rwUint(dst jsWriter, src *Reader) (int, error) {
u, err := src.ReadUint64()
if err != nil {
return 0, err
}
src.scratch = strconv.AppendUint(src.scratch[:0], u, 10)
return dst.Write(src.scratch)
}
func rwBool(dst jsWriter, src *Reader) (int, error) {
b, err := src.ReadBool()
if err != nil {
return 0, err
}
if b {
return dst.WriteString("true")
}
return dst.WriteString("false")
}
func rwTime(dst jsWriter, src *Reader) (int, error) {
t, err := src.ReadTime()
if err != nil {
return 0, err
}
bts, err := t.MarshalJSON()
if err != nil {
return 0, err
}
return dst.Write(bts)
}
func rwExtension(dst jsWriter, src *Reader) (n int, err error) {
et, err := src.peekExtensionType()
if err != nil {
return 0, err
}
// registered extensions can override
// the JSON encoding
if j, ok := extensionReg[et]; ok {
var bts []byte
e := j()
err = src.ReadExtension(e)
if err != nil {
return
}
bts, err = json.Marshal(e)
if err != nil {
return
}
return dst.Write(bts)
}
e := RawExtension{}
e.Type = et
err = src.ReadExtension(&e)
if err != nil {
return
}
var nn int
err = dst.WriteByte('{')
if err != nil {
return
}
n++
nn, err = dst.WriteString(`"type:"`)
n += nn
if err != nil {
return
}
src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10)
nn, err = dst.Write(src.scratch)
n += nn
if err != nil {
return
}
nn, err = dst.WriteString(`,"data":"`)
n += nn
if err != nil {
return
}
enc := base64.NewEncoder(base64.StdEncoding, dst)
nn, err = enc.Write(e.Data)
n += nn
if err != nil {
return
}
err = enc.Close()
if err != nil {
return
}
nn, err = dst.WriteString(`"}`)
n += nn
return
}
func rwString(dst jsWriter, src *Reader) (n int, err error) {
var p []byte
p, err = src.R.Peek(1)
if err != nil {
return
}
lead := p[0]
var read int
if isfixstr(lead) {
read = int(rfixstr(lead))
src.R.Skip(1)
goto write
}
switch lead {
case mstr8:
p, err = src.R.Next(2)
if err != nil {
return
}
read = int(uint8(p[1]))
case mstr16:
p, err = src.R.Next(3)
if err != nil {
return
}
read = int(big.Uint16(p[1:]))
case mstr32:
p, err = src.R.Next(5)
if err != nil {
return
}
read = int(big.Uint32(p[1:]))
default:
err = badPrefix(StrType, lead)
return
}
write:
p, err = src.R.Next(read)
if err != nil {
return
}
n, err = rwquoted(dst, p)
return
}
func rwBytes(dst jsWriter, src *Reader) (n int, err error) {
var nn int
err = dst.WriteByte('"')
if err != nil {
return
}
n++
src.scratch, err = src.ReadBytes(src.scratch[:0])
if err != nil {
return
}
enc := base64.NewEncoder(base64.StdEncoding, dst)
nn, err = enc.Write(src.scratch)
n += nn
if err != nil {
return
}
err = enc.Close()
if err != nil {
return
}
err = dst.WriteByte('"')
if err != nil {
return
}
n++
return
}
// Below (c) The Go Authors, 2009-2014
// Subject to the BSD-style license found at http://golang.org
//
// see: encoding/json/encode.go:(*encodeState).stringbytes()
func rwquoted(dst jsWriter, s []byte) (n int, err error) {
var nn int
err = dst.WriteByte('"')
if err != nil {
return
}
n++
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
nn, err = dst.Write(s[start:i])
n += nn
if err != nil {
return
}
}
switch b {
case '\\', '"':
err = dst.WriteByte('\\')
if err != nil {
return
}
n++
err = dst.WriteByte(b)
if err != nil {
return
}
n++
case '\n':
err = dst.WriteByte('\\')
if err != nil {
return
}
n++
err = dst.WriteByte('n')
if err != nil {
return
}
n++
case '\r':
err = dst.WriteByte('\\')
if err != nil {
return
}
n++
err = dst.WriteByte('r')
if err != nil {
return
}
n++
default:
nn, err = dst.WriteString(`\u00`)
n += nn
if err != nil {
return
}
err = dst.WriteByte(hex[b>>4])
if err != nil {
return
}
n++
err = dst.WriteByte(hex[b&0xF])
if err != nil {
return
}
n++
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
nn, err = dst.Write(s[start:i])
n += nn
if err != nil {
return
}
nn, err = dst.WriteString(`\ufffd`)
n += nn
if err != nil {
return
}
i += size
start = i
continue
}
}
if c == '\u2028' || c == '\u2029' {
if start < i {
nn, err = dst.Write(s[start:i])
n += nn
if err != nil {
return
}
nn, err = dst.WriteString(`\u202`)
n += nn
if err != nil {
return
}
err = dst.WriteByte(hex[c&0xF])
if err != nil {
return
}
n++
}
}
i += size
}
if start < len(s) {
nn, err = dst.Write(s[start:])
n += nn
if err != nil {
return
}
}
err = dst.WriteByte('"')
if err != nil {
return
}
n++
return
}

View File

@ -0,0 +1,363 @@
package msgp
import (
"bufio"
"encoding/base64"
"encoding/json"
"io"
"strconv"
"time"
)
var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error)
func init() {
// NOTE(pmh): this is best expressed as a jump table,
// but gc doesn't do that yet. revisit post-go1.5.
unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){
StrType: rwStringBytes,
BinType: rwBytesBytes,
MapType: rwMapBytes,
ArrayType: rwArrayBytes,
Float64Type: rwFloat64Bytes,
Float32Type: rwFloat32Bytes,
BoolType: rwBoolBytes,
IntType: rwIntBytes,
UintType: rwUintBytes,
NilType: rwNullBytes,
ExtensionType: rwExtensionBytes,
Complex64Type: rwExtensionBytes,
Complex128Type: rwExtensionBytes,
TimeType: rwTimeBytes,
}
}
// UnmarshalAsJSON takes raw messagepack and writes
// it as JSON to 'w'. If an error is returned, the
// bytes not translated will also be returned. If
// no errors are encountered, the length of the returned
// slice will be zero.
func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) {
var (
scratch []byte
cast bool
dst jsWriter
err error
)
if jsw, ok := w.(jsWriter); ok {
dst = jsw
cast = true
} else {
dst = bufio.NewWriterSize(w, 512)
}
for len(msg) > 0 && err == nil {
msg, scratch, err = writeNext(dst, msg, scratch)
}
if !cast && err == nil {
err = dst.(*bufio.Writer).Flush()
}
return msg, err
}
func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
if len(msg) < 1 {
return msg, scratch, ErrShortBytes
}
t := getType(msg[0])
if t == InvalidType {
return msg, scratch, InvalidPrefixError(msg[0])
}
if t == ExtensionType {
et, err := peekExtension(msg)
if err != nil {
return nil, scratch, err
}
if et == TimeExtension {
t = TimeType
}
}
return unfuns[t](w, msg, scratch)
}
func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
sz, msg, err := ReadArrayHeaderBytes(msg)
if err != nil {
return msg, scratch, err
}
err = w.WriteByte('[')
if err != nil {
return msg, scratch, err
}
for i := uint32(0); i < sz; i++ {
if i != 0 {
err = w.WriteByte(',')
if err != nil {
return msg, scratch, err
}
}
msg, scratch, err = writeNext(w, msg, scratch)
if err != nil {
return msg, scratch, err
}
}
err = w.WriteByte(']')
return msg, scratch, err
}
func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
sz, msg, err := ReadMapHeaderBytes(msg)
if err != nil {
return msg, scratch, err
}
err = w.WriteByte('{')
if err != nil {
return msg, scratch, err
}
for i := uint32(0); i < sz; i++ {
if i != 0 {
err = w.WriteByte(',')
if err != nil {
return msg, scratch, err
}
}
msg, scratch, err = rwMapKeyBytes(w, msg, scratch)
if err != nil {
return msg, scratch, err
}
err = w.WriteByte(':')
if err != nil {
return msg, scratch, err
}
msg, scratch, err = writeNext(w, msg, scratch)
if err != nil {
return msg, scratch, err
}
}
err = w.WriteByte('}')
return msg, scratch, err
}
func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
msg, scratch, err := rwStringBytes(w, msg, scratch)
if err != nil {
if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
return rwBytesBytes(w, msg, scratch)
}
}
return msg, scratch, err
}
func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
str, msg, err := ReadStringZC(msg)
if err != nil {
return msg, scratch, err
}
_, err = rwquoted(w, str)
return msg, scratch, err
}
func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
bts, msg, err := ReadBytesZC(msg)
if err != nil {
return msg, scratch, err
}
l := base64.StdEncoding.EncodedLen(len(bts))
if cap(scratch) >= l {
scratch = scratch[0:l]
} else {
scratch = make([]byte, l)
}
base64.StdEncoding.Encode(scratch, bts)
err = w.WriteByte('"')
if err != nil {
return msg, scratch, err
}
_, err = w.Write(scratch)
if err != nil {
return msg, scratch, err
}
err = w.WriteByte('"')
return msg, scratch, err
}
func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
msg, err := ReadNilBytes(msg)
if err != nil {
return msg, scratch, err
}
_, err = w.Write(null)
return msg, scratch, err
}
func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
b, msg, err := ReadBoolBytes(msg)
if err != nil {
return msg, scratch, err
}
if b {
_, err = w.WriteString("true")
return msg, scratch, err
}
_, err = w.WriteString("false")
return msg, scratch, err
}
func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
i, msg, err := ReadInt64Bytes(msg)
if err != nil {
return msg, scratch, err
}
scratch = strconv.AppendInt(scratch[0:0], i, 10)
_, err = w.Write(scratch)
return msg, scratch, err
}
func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
u, msg, err := ReadUint64Bytes(msg)
if err != nil {
return msg, scratch, err
}
scratch = strconv.AppendUint(scratch[0:0], u, 10)
_, err = w.Write(scratch)
return msg, scratch, err
}
func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) {
var f float64
var err error
var sz int
if f64 {
sz = 64
f, msg, err = ReadFloat64Bytes(msg)
} else {
sz = 32
var v float32
v, msg, err = ReadFloat32Bytes(msg)
f = float64(v)
}
if err != nil {
return msg, scratch, err
}
scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz)
_, err = w.Write(scratch)
return msg, scratch, err
}
func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
var f float32
var err error
f, msg, err = ReadFloat32Bytes(msg)
if err != nil {
return msg, scratch, err
}
scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32)
_, err = w.Write(scratch)
return msg, scratch, err
}
func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
var f float64
var err error
f, msg, err = ReadFloat64Bytes(msg)
if err != nil {
return msg, scratch, err
}
scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)
_, err = w.Write(scratch)
return msg, scratch, err
}
func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
var t time.Time
var err error
t, msg, err = ReadTimeBytes(msg)
if err != nil {
return msg, scratch, err
}
bts, err := t.MarshalJSON()
if err != nil {
return msg, scratch, err
}
_, err = w.Write(bts)
return msg, scratch, err
}
func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
var err error
var et int8
et, err = peekExtension(msg)
if err != nil {
return msg, scratch, err
}
// if it's time.Time
if et == TimeExtension {
var tm time.Time
tm, msg, err = ReadTimeBytes(msg)
if err != nil {
return msg, scratch, err
}
bts, err := tm.MarshalJSON()
if err != nil {
return msg, scratch, err
}
_, err = w.Write(bts)
return msg, scratch, err
}
// if the extension is registered,
// use its canonical JSON form
if f, ok := extensionReg[et]; ok {
e := f()
msg, err = ReadExtensionBytes(msg, e)
if err != nil {
return msg, scratch, err
}
bts, err := json.Marshal(e)
if err != nil {
return msg, scratch, err
}
_, err = w.Write(bts)
return msg, scratch, err
}
// otherwise, write `{"type": <num>, "data": "<base64data>"}`
r := RawExtension{}
r.Type = et
msg, err = ReadExtensionBytes(msg, &r)
if err != nil {
return msg, scratch, err
}
scratch, err = writeExt(w, r, scratch)
return msg, scratch, err
}
func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) {
_, err := w.WriteString(`{"type":`)
if err != nil {
return scratch, err
}
scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10)
_, err = w.Write(scratch)
if err != nil {
return scratch, err
}
_, err = w.WriteString(`,"data":"`)
if err != nil {
return scratch, err
}
l := base64.StdEncoding.EncodedLen(len(r.Data))
if cap(scratch) >= l {
scratch = scratch[0:l]
} else {
scratch = make([]byte, l)
}
base64.StdEncoding.Encode(scratch, r.Data)
_, err = w.Write(scratch)
if err != nil {
return scratch, err
}
_, err = w.WriteString(`"}`)
return scratch, err
}

View File

@ -0,0 +1,121 @@
package msgp
import (
"bytes"
"encoding/json"
"testing"
"time"
)
func TestUnmarshalJSON(t *testing.T) {
var buf bytes.Buffer
enc := NewWriter(&buf)
enc.WriteMapHeader(5)
enc.WriteString("thing_1")
enc.WriteString("a string object")
enc.WriteString("a_map")
enc.WriteMapHeader(2)
// INNER
enc.WriteString("cmplx")
enc.WriteComplex64(complex(1.0, 1.0))
enc.WriteString("int_b")
enc.WriteInt64(-100)
enc.WriteString("an extension")
enc.WriteExtension(&RawExtension{Type: 1, Data: []byte("blaaahhh")})
enc.WriteString("some bytes")
enc.WriteBytes([]byte("here are some bytes"))
enc.WriteString("now")
enc.WriteTime(time.Now())
enc.Flush()
var js bytes.Buffer
_, err := UnmarshalAsJSON(&js, buf.Bytes())
if err != nil {
t.Logf("%s", js.Bytes())
t.Fatal(err)
}
mp := make(map[string]interface{})
err = json.Unmarshal(js.Bytes(), &mp)
if err != nil {
t.Log(js.String())
t.Fatalf("Error unmarshaling: %s", err)
}
if len(mp) != 5 {
t.Errorf("map length should be %d, not %d", 5, len(mp))
}
so, ok := mp["thing_1"]
if !ok || so != "a string object" {
t.Errorf("expected %q; got %q", "a string object", so)
}
if _, ok := mp["now"]; !ok {
t.Error(`"now" field doesn't exist`)
}
c, ok := mp["a_map"]
if !ok {
t.Error(`"a_map" field doesn't exist`)
} else {
if m, ok := c.(map[string]interface{}); ok {
if _, ok := m["cmplx"]; !ok {
t.Error(`"a_map.cmplx" doesn't exist`)
}
} else {
t.Error(`can't type-assert "c" to map[string]interface{}`)
}
}
t.Logf("JSON: %s", js.Bytes())
}
func BenchmarkUnmarshalAsJSON(b *testing.B) {
var buf bytes.Buffer
enc := NewWriter(&buf)
enc.WriteMapHeader(4)
enc.WriteString("thing_1")
enc.WriteString("a string object")
enc.WriteString("a_first_map")
enc.WriteMapHeader(2)
enc.WriteString("float_a")
enc.WriteFloat32(1.0)
enc.WriteString("int_b")
enc.WriteInt64(-100)
enc.WriteString("an array")
enc.WriteArrayHeader(2)
enc.WriteBool(true)
enc.WriteUint(2089)
enc.WriteString("a_second_map")
enc.WriteMapStrStr(map[string]string{
"internal_one": "blah",
"internal_two": "blahhh...",
})
enc.Flush()
var js bytes.Buffer
bts := buf.Bytes()
_, err := UnmarshalAsJSON(&js, bts)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(js.Bytes())))
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
js.Reset()
UnmarshalAsJSON(&js, bts)
}
}

View File

@ -0,0 +1,142 @@
package msgp
import (
"bytes"
"encoding/json"
"reflect"
"testing"
)
func TestCopyJSON(t *testing.T) {
var buf bytes.Buffer
enc := NewWriter(&buf)
enc.WriteMapHeader(5)
enc.WriteString("thing_1")
enc.WriteString("a string object")
enc.WriteString("a_map")
enc.WriteMapHeader(2)
enc.WriteString("float_a")
enc.WriteFloat32(1.0)
enc.WriteString("int_b")
enc.WriteInt64(-100)
enc.WriteString("some bytes")
enc.WriteBytes([]byte("here are some bytes"))
enc.WriteString("a bool")
enc.WriteBool(true)
enc.WriteString("a map")
enc.WriteMapStrStr(map[string]string{
"internal_one": "blah",
"internal_two": "blahhh...",
})
enc.Flush()
var js bytes.Buffer
_, err := CopyToJSON(&js, &buf)
if err != nil {
t.Fatal(err)
}
mp := make(map[string]interface{})
err = json.Unmarshal(js.Bytes(), &mp)
if err != nil {
t.Log(js.String())
t.Fatalf("Error unmarshaling: %s", err)
}
if len(mp) != 5 {
t.Errorf("map length should be %d, not %d", 4, len(mp))
}
so, ok := mp["thing_1"]
if !ok || so != "a string object" {
t.Errorf("expected %q; got %q", "a string object", so)
}
in, ok := mp["a map"]
if !ok {
t.Error("no key 'a map'")
}
if inm, ok := in.(map[string]interface{}); !ok {
t.Error("inner map not type-assertable to map[string]interface{}")
} else {
inm1, ok := inm["internal_one"]
if !ok || !reflect.DeepEqual(inm1, "blah") {
t.Errorf("inner map field %q should be %q, not %q", "internal_one", "blah", inm1)
}
}
}
func BenchmarkCopyToJSON(b *testing.B) {
var buf bytes.Buffer
enc := NewWriter(&buf)
enc.WriteMapHeader(4)
enc.WriteString("thing_1")
enc.WriteString("a string object")
enc.WriteString("a_first_map")
enc.WriteMapHeader(2)
enc.WriteString("float_a")
enc.WriteFloat32(1.0)
enc.WriteString("int_b")
enc.WriteInt64(-100)
enc.WriteString("an array")
enc.WriteArrayHeader(2)
enc.WriteBool(true)
enc.WriteUint(2089)
enc.WriteString("a_second_map")
enc.WriteMapStrStr(map[string]string{
"internal_one": "blah",
"internal_two": "blahhh...",
})
enc.Flush()
var js bytes.Buffer
bts := buf.Bytes()
_, err := CopyToJSON(&js, &buf)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(js.Bytes())))
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
js.Reset()
CopyToJSON(&js, bytes.NewReader(bts))
}
}
func BenchmarkStdlibJSON(b *testing.B) {
obj := map[string]interface{}{
"thing_1": "a string object",
"a_first_map": map[string]interface{}{
"float_a": float32(1.0),
"float_b": -100,
},
"an array": []interface{}{
"part_A",
"part_B",
},
"a_second_map": map[string]interface{}{
"internal_one": "blah",
"internal_two": "blahhh...",
},
}
var js bytes.Buffer
err := json.NewEncoder(&js).Encode(&obj)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(js.Bytes())))
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
js.Reset()
json.NewEncoder(&js).Encode(&obj)
}
}

View File

@ -0,0 +1,267 @@
package msgp
import (
"math"
"strconv"
)
// The portable parts of the Number implementation
// Number can be
// an int64, uint64, float32,
// or float64 internally.
// It can decode itself
// from any of the native
// messagepack number types.
// The zero-value of Number
// is Int(0). Using the equality
// operator with Number compares
// both the type and the value
// of the number.
type Number struct {
// internally, this
// is just a tagged union.
// the raw bits of the number
// are stored the same way regardless.
bits uint64
typ Type
}
// AsInt sets the number to an int64.
func (n *Number) AsInt(i int64) {
// we always store int(0)
// as {0, InvalidType} in
// order to preserve
// the behavior of the == operator
if i == 0 {
n.typ = InvalidType
n.bits = 0
return
}
n.typ = IntType
n.bits = uint64(i)
}
// AsUint sets the number to a uint64.
func (n *Number) AsUint(u uint64) {
n.typ = UintType
n.bits = u
}
// AsFloat32 sets the value of the number
// to a float32.
func (n *Number) AsFloat32(f float32) {
n.typ = Float32Type
n.bits = uint64(math.Float32bits(f))
}
// AsFloat64 sets the value of the
// number to a float64.
func (n *Number) AsFloat64(f float64) {
n.typ = Float64Type
n.bits = math.Float64bits(f)
}
// Int casts the number as an int64, and
// returns whether or not that was the
// underlying type.
func (n *Number) Int() (int64, bool) {
return int64(n.bits), n.typ == IntType || n.typ == InvalidType
}
// Uint casts the number as a uint64, and returns
// whether or not that was the underlying type.
func (n *Number) Uint() (uint64, bool) {
return n.bits, n.typ == UintType
}
// Float casts the number to a float64, and
// returns whether or not that was the underlying
// type (either a float64 or a float32).
func (n *Number) Float() (float64, bool) {
switch n.typ {
case Float32Type:
return float64(math.Float32frombits(uint32(n.bits))), true
case Float64Type:
return math.Float64frombits(n.bits), true
default:
return 0.0, false
}
}
// Type will return one of:
// Float64Type, Float32Type, UintType, or IntType.
func (n *Number) Type() Type {
if n.typ == InvalidType {
return IntType
}
return n.typ
}
// DecodeMsg implements msgp.Decodable
func (n *Number) DecodeMsg(r *Reader) error {
typ, err := r.NextType()
if err != nil {
return err
}
switch typ {
case Float32Type:
f, err := r.ReadFloat32()
if err != nil {
return err
}
n.AsFloat32(f)
return nil
case Float64Type:
f, err := r.ReadFloat64()
if err != nil {
return err
}
n.AsFloat64(f)
return nil
case IntType:
i, err := r.ReadInt64()
if err != nil {
return err
}
n.AsInt(i)
return nil
case UintType:
u, err := r.ReadUint64()
if err != nil {
return err
}
n.AsUint(u)
return nil
default:
return TypeError{Encoded: typ, Method: IntType}
}
}
// UnmarshalMsg implements msgp.Unmarshaler
func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) {
typ := NextType(b)
switch typ {
case IntType:
i, o, err := ReadInt64Bytes(b)
if err != nil {
return b, err
}
n.AsInt(i)
return o, nil
case UintType:
u, o, err := ReadUint64Bytes(b)
if err != nil {
return b, err
}
n.AsUint(u)
return o, nil
case Float64Type:
f, o, err := ReadFloat64Bytes(b)
if err != nil {
return b, err
}
n.AsFloat64(f)
return o, nil
case Float32Type:
f, o, err := ReadFloat32Bytes(b)
if err != nil {
return b, err
}
n.AsFloat32(f)
return o, nil
default:
return b, TypeError{Method: IntType, Encoded: typ}
}
}
// MarshalMsg implements msgp.Marshaler
func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
switch n.typ {
case IntType:
return AppendInt64(b, int64(n.bits)), nil
case UintType:
return AppendUint64(b, uint64(n.bits)), nil
case Float64Type:
return AppendFloat64(b, math.Float64frombits(n.bits)), nil
case Float32Type:
return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
default:
return AppendInt64(b, 0), nil
}
}
// EncodeMsg implements msgp.Encodable
func (n *Number) EncodeMsg(w *Writer) error {
switch n.typ {
case IntType:
return w.WriteInt64(int64(n.bits))
case UintType:
return w.WriteUint64(n.bits)
case Float64Type:
return w.WriteFloat64(math.Float64frombits(n.bits))
case Float32Type:
return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
default:
return w.WriteInt64(0)
}
}
// Msgsize implements msgp.Sizer
func (n *Number) Msgsize() int {
switch n.typ {
case Float32Type:
return Float32Size
case Float64Type:
return Float64Size
case IntType:
return Int64Size
case UintType:
return Uint64Size
default:
return 1 // fixint(0)
}
}
// MarshalJSON implements json.Marshaler
func (n *Number) MarshalJSON() ([]byte, error) {
t := n.Type()
if t == InvalidType {
return []byte{'0'}, nil
}
out := make([]byte, 0, 32)
switch t {
case Float32Type, Float64Type:
f, _ := n.Float()
return strconv.AppendFloat(out, f, 'f', -1, 64), nil
case IntType:
i, _ := n.Int()
return strconv.AppendInt(out, i, 10), nil
case UintType:
u, _ := n.Uint()
return strconv.AppendUint(out, u, 10), nil
default:
panic("(*Number).typ is invalid")
}
}
// String implements fmt.Stringer
func (n *Number) String() string {
switch n.typ {
case InvalidType:
return "0"
case Float32Type, Float64Type:
f, _ := n.Float()
return strconv.FormatFloat(f, 'f', -1, 64)
case IntType:
i, _ := n.Int()
return strconv.FormatInt(i, 10)
case UintType:
u, _ := n.Uint()
return strconv.FormatUint(u, 10)
default:
panic("(*Number).typ is invalid")
}
}

View File

@ -0,0 +1,94 @@
package msgp
import (
"bytes"
"testing"
)
func TestNumber(t *testing.T) {
n := Number{}
if n.Type() != IntType {
t.Errorf("expected zero-value type to be %s; got %s", IntType, n.Type())
}
if n.String() != "0" {
t.Errorf("expected Number{}.String() to be \"0\" but got %q", n.String())
}
n.AsInt(248)
i, ok := n.Int()
if !ok || i != 248 || n.Type() != IntType || n.String() != "248" {
t.Errorf("%d in; %d out!", 248, i)
}
n.AsFloat64(3.141)
f, ok := n.Float()
if !ok || f != 3.141 || n.Type() != Float64Type || n.String() != "3.141" {
t.Errorf("%f in; %f out!", 3.141, f)
}
n.AsUint(40000)
u, ok := n.Uint()
if !ok || u != 40000 || n.Type() != UintType || n.String() != "40000" {
t.Errorf("%d in; %d out!", 40000, u)
}
nums := []interface{}{
float64(3.14159),
int64(-29081),
uint64(90821983),
float32(3.141),
}
var dat []byte
var buf bytes.Buffer
wr := NewWriter(&buf)
for _, n := range nums {
dat, _ = AppendIntf(dat, n)
wr.WriteIntf(n)
}
wr.Flush()
mout := make([]Number, len(nums))
dout := make([]Number, len(nums))
rd := NewReader(&buf)
unm := dat
for i := range nums {
var err error
unm, err = mout[i].UnmarshalMsg(unm)
if err != nil {
t.Fatal("unmarshal error:", err)
}
err = dout[i].DecodeMsg(rd)
if err != nil {
t.Fatal("decode error:", err)
}
if mout[i] != dout[i] {
t.Errorf("for %#v, got %#v from unmarshal and %#v from decode", nums[i], mout[i], dout[i])
}
}
buf.Reset()
var odat []byte
for i := range nums {
var err error
odat, err = mout[i].MarshalMsg(odat)
if err != nil {
t.Fatal("marshal error:", err)
}
err = dout[i].EncodeMsg(wr)
}
wr.Flush()
if !bytes.Equal(dat, odat) {
t.Errorf("marshal: expected output %#v; got %#v", dat, odat)
}
if !bytes.Equal(dat, buf.Bytes()) {
t.Errorf("encode: expected output %#v; got %#v", dat, buf.Bytes())
}
}

View File

@ -0,0 +1,85 @@
package msgp
import (
"bytes"
"testing"
"time"
)
// all standard interfaces
type allifaces interface {
Encodable
Decodable
Marshaler
Unmarshaler
Sizer
}
func TestRaw(t *testing.T) {
bts := make([]byte, 0, 512)
bts = AppendMapHeader(bts, 3)
bts = AppendString(bts, "key_one")
bts = AppendFloat64(bts, -1.0)
bts = AppendString(bts, "key_two")
bts = AppendString(bts, "value_two")
bts = AppendString(bts, "key_three")
bts = AppendTime(bts, time.Now())
var r Raw
// verify that Raw satisfies
// the interfaces we want it to
var _ allifaces = &r
// READ TESTS
extra, err := r.UnmarshalMsg(bts)
if err != nil {
t.Fatal("error from UnmarshalMsg:", err)
}
if len(extra) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(extra))
}
if !bytes.Equal([]byte(r), bts) {
t.Fatal("value of raw and input slice are not equal after UnmarshalMsg")
}
r = r[:0]
var buf bytes.Buffer
buf.Write(bts)
rd := NewReader(&buf)
err = r.DecodeMsg(rd)
if err != nil {
t.Fatal("error from DecodeMsg:", err)
}
if !bytes.Equal([]byte(r), bts) {
t.Fatal("value of raw and input slice are not equal after DecodeMsg")
}
// WRITE TESTS
buf.Reset()
wr := NewWriter(&buf)
err = r.EncodeMsg(wr)
if err != nil {
t.Fatal("error from EncodeMsg:", err)
}
wr.Flush()
if !bytes.Equal(buf.Bytes(), bts) {
t.Fatal("value of buf.Bytes() and input slice are not equal after EncodeMsg")
}
var outsl []byte
outsl, err = r.MarshalMsg(outsl)
if err != nil {
t.Fatal("error from MarshalMsg:", err)
}
if !bytes.Equal(outsl, bts) {
t.Fatal("value of output and input of MarshalMsg are not equal.")
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,518 @@
package msgp
import (
"bytes"
"reflect"
"testing"
"time"
)
func TestReadMapHeaderBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []uint32{0, 1, 5, 49082}
for i, v := range tests {
buf.Reset()
en.WriteMapHeader(v)
en.Flush()
out, left, err := ReadMapHeaderBytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%d in; %d out", v, out)
}
}
}
func BenchmarkReadMapHeaderBytes(b *testing.B) {
sizes := []uint32{1, 100, tuint16, tuint32}
buf := make([]byte, 0, 5*len(sizes))
for _, sz := range sizes {
buf = AppendMapHeader(buf, sz)
}
b.SetBytes(int64(len(buf) / len(sizes)))
b.ReportAllocs()
b.ResetTimer()
o := buf
for i := 0; i < b.N; i++ {
_, buf, _ = ReadMapHeaderBytes(buf)
if len(buf) == 0 {
buf = o
}
}
}
func TestReadArrayHeaderBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []uint32{0, 1, 5, 49082}
for i, v := range tests {
buf.Reset()
en.WriteArrayHeader(v)
en.Flush()
out, left, err := ReadArrayHeaderBytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%d in; %d out", v, out)
}
}
}
func BenchmarkReadArrayHeaderBytes(b *testing.B) {
sizes := []uint32{1, 100, tuint16, tuint32}
buf := make([]byte, 0, 5*len(sizes))
for _, sz := range sizes {
buf = AppendArrayHeader(buf, sz)
}
b.SetBytes(int64(len(buf) / len(sizes)))
b.ReportAllocs()
b.ResetTimer()
o := buf
for i := 0; i < b.N; i++ {
_, buf, _ = ReadArrayHeaderBytes(buf)
if len(buf) == 0 {
buf = o
}
}
}
func TestReadNilBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteNil()
en.Flush()
left, err := ReadNilBytes(buf.Bytes())
if err != nil {
t.Fatal(err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
}
func BenchmarkReadNilByte(b *testing.B) {
buf := []byte{mnil}
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ReadNilBytes(buf)
}
}
func TestReadFloat64Bytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteFloat64(3.14159)
en.Flush()
out, left, err := ReadFloat64Bytes(buf.Bytes())
if err != nil {
t.Fatal(err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != 3.14159 {
t.Errorf("%f in; %f out", 3.14159, out)
}
}
func BenchmarkReadFloat64Bytes(b *testing.B) {
f := float64(3.14159)
buf := make([]byte, 0, 9)
buf = AppendFloat64(buf, f)
b.SetBytes(int64(len(buf)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ReadFloat64Bytes(buf)
}
}
func TestReadFloat32Bytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteFloat32(3.1)
en.Flush()
out, left, err := ReadFloat32Bytes(buf.Bytes())
if err != nil {
t.Fatal(err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != 3.1 {
t.Errorf("%f in; %f out", 3.1, out)
}
}
func BenchmarkReadFloat32Bytes(b *testing.B) {
f := float32(3.14159)
buf := make([]byte, 0, 5)
buf = AppendFloat32(buf, f)
b.SetBytes(int64(len(buf)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ReadFloat32Bytes(buf)
}
}
func TestReadBoolBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []bool{true, false}
for i, v := range tests {
buf.Reset()
en.WriteBool(v)
en.Flush()
out, left, err := ReadBoolBytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%t in; %t out", v, out)
}
}
}
func BenchmarkReadBoolBytes(b *testing.B) {
buf := []byte{mtrue, mfalse, mtrue, mfalse}
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
o := buf
for i := 0; i < b.N; i++ {
_, buf, _ = ReadBoolBytes(buf)
if len(buf) == 0 {
buf = o
}
}
}
func TestReadInt64Bytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []int64{-5, -30, 0, 1, 127, 300, 40921, 34908219}
for i, v := range tests {
buf.Reset()
en.WriteInt64(v)
en.Flush()
out, left, err := ReadInt64Bytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%d in; %d out", v, out)
}
}
}
func TestReadUint64Bytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []uint64{0, 1, 127, 300, 40921, 34908219}
for i, v := range tests {
buf.Reset()
en.WriteUint64(v)
en.Flush()
out, left, err := ReadUint64Bytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%d in; %d out", v, out)
}
}
}
func TestReadBytesBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")}
var scratch []byte
for i, v := range tests {
buf.Reset()
en.WriteBytes(v)
en.Flush()
out, left, err := ReadBytesBytes(buf.Bytes(), scratch)
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if !bytes.Equal(out, v) {
t.Errorf("%q in; %q out", v, out)
}
}
}
func TestReadZCBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")}
for i, v := range tests {
buf.Reset()
en.WriteBytes(v)
en.Flush()
out, left, err := ReadBytesZC(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if !bytes.Equal(out, v) {
t.Errorf("%q in; %q out", v, out)
}
}
}
func TestReadZCString(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []string{"", "hello", "here's another string......"}
for i, v := range tests {
buf.Reset()
en.WriteString(v)
en.Flush()
out, left, err := ReadStringZC(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if string(out) != v {
t.Errorf("%q in; %q out", v, out)
}
}
}
func TestReadStringBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []string{"", "hello", "here's another string......"}
for i, v := range tests {
buf.Reset()
en.WriteString(v)
en.Flush()
out, left, err := ReadStringBytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%q in; %q out", v, out)
}
}
}
func TestReadComplex128Bytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []complex128{complex(0, 0), complex(12.8, 32.0)}
for i, v := range tests {
buf.Reset()
en.WriteComplex128(v)
en.Flush()
out, left, err := ReadComplex128Bytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%f in; %f out", v, out)
}
}
}
func TestReadComplex64Bytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := []complex64{complex(0, 0), complex(12.8, 32.0)}
for i, v := range tests {
buf.Reset()
en.WriteComplex64(v)
en.Flush()
out, left, err := ReadComplex64Bytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if out != v {
t.Errorf("%f in; %f out", v, out)
}
}
}
func TestReadTimeBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
now := time.Now()
en.WriteTime(now)
en.Flush()
out, left, err := ReadTimeBytes(buf.Bytes())
if err != nil {
t.Fatal(err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if !now.Equal(out) {
t.Errorf("%s in; %s out", now, out)
}
}
func BenchmarkReadTimeBytes(b *testing.B) {
data := AppendTime(nil, time.Now())
b.SetBytes(15)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ReadTimeBytes(data)
}
}
func TestReadIntfBytes(t *testing.T) {
var buf bytes.Buffer
en := NewWriter(&buf)
tests := make([]interface{}, 0, 10)
tests = append(tests, float64(3.5))
tests = append(tests, int64(-49082))
tests = append(tests, uint64(34908))
tests = append(tests, string("hello!"))
tests = append(tests, []byte("blah."))
tests = append(tests, map[string]interface{}{
"key_one": 3.5,
"key_two": "hi.",
})
for i, v := range tests {
buf.Reset()
if err := en.WriteIntf(v); err != nil {
t.Fatal(err)
}
en.Flush()
out, left, err := ReadIntfBytes(buf.Bytes())
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if len(left) != 0 {
t.Errorf("expected 0 bytes left; found %d", len(left))
}
if !reflect.DeepEqual(v, out) {
t.Errorf("ReadIntf(): %v in; %v out", v, out)
}
}
}
func BenchmarkSkipBytes(b *testing.B) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteMapHeader(6)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(3.14159)
en.WriteString("some_bytes")
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
en.WriteString("the_time")
en.WriteTime(time.Now())
en.WriteString("what?")
en.WriteBool(true)
en.WriteString("ext")
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
en.Flush()
bts := buf.Bytes()
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := Skip(bts)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -0,0 +1,724 @@
package msgp
import (
"bytes"
"io"
"math"
"math/rand"
"reflect"
"testing"
"time"
)
func TestSanity(t *testing.T) {
if !isfixint(0) {
t.Fatal("WUT.")
}
}
func TestReadIntf(t *testing.T) {
// NOTE: if you include cases
// with, say, int32s, the test
// will fail, b/c integers are
// always read out as int64, and
// unsigned integers as uint64
var testCases = []interface{}{
float64(128.032),
float32(9082.092),
int64(-40),
uint64(9082981),
time.Now(),
"hello!",
[]byte("hello!"),
map[string]interface{}{
"thing-1": "thing-1-value",
"thing-2": int64(800),
"thing-3": []byte("some inner bytes..."),
"thing-4": false,
},
}
var buf bytes.Buffer
var v interface{}
dec := NewReader(&buf)
enc := NewWriter(&buf)
for i, ts := range testCases {
buf.Reset()
err := enc.WriteIntf(ts)
if err != nil {
t.Errorf("Test case %d: %s", i, err)
continue
}
err = enc.Flush()
if err != nil {
t.Fatal(err)
}
v, err = dec.ReadIntf()
if err != nil {
t.Errorf("Test case: %d: %s", i, err)
}
if !reflect.DeepEqual(v, ts) {
t.Errorf("%v in; %v out", ts, v)
}
}
}
func TestReadMapHeader(t *testing.T) {
tests := []struct {
Sz uint32
}{
{0},
{1},
{tuint16},
{tuint32},
}
var buf bytes.Buffer
var sz uint32
var err error
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i, test := range tests {
buf.Reset()
err = wr.WriteMapHeader(test.Sz)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
sz, err = rd.ReadMapHeader()
if err != nil {
t.Errorf("Test case %d: got error %s", i, err)
}
if sz != test.Sz {
t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
}
}
}
func BenchmarkReadMapHeader(b *testing.B) {
sizes := []uint32{0, 1, tuint16, tuint32}
data := make([]byte, 0, len(sizes)*5)
for _, d := range sizes {
data = AppendMapHeader(data, d)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(sizes)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
rd.ReadMapHeader()
}
}
func TestReadArrayHeader(t *testing.T) {
tests := []struct {
Sz uint32
}{
{0},
{1},
{tuint16},
{tuint32},
}
var buf bytes.Buffer
var sz uint32
var err error
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i, test := range tests {
buf.Reset()
err = wr.WriteArrayHeader(test.Sz)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
sz, err = rd.ReadArrayHeader()
if err != nil {
t.Errorf("Test case %d: got error %s", i, err)
}
if sz != test.Sz {
t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
}
}
}
func BenchmarkReadArrayHeader(b *testing.B) {
sizes := []uint32{0, 1, tuint16, tuint32}
data := make([]byte, 0, len(sizes)*5)
for _, d := range sizes {
data = AppendArrayHeader(data, d)
}
rd := NewReader(NewEndlessReader(data, b))
b.ReportAllocs()
b.SetBytes(int64(len(data) / len(sizes)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
rd.ReadArrayHeader()
}
}
func TestReadNil(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
wr.WriteNil()
wr.Flush()
err := rd.ReadNil()
if err != nil {
t.Fatal(err)
}
}
func BenchmarkReadNil(b *testing.B) {
data := AppendNil(nil)
rd := NewReader(NewEndlessReader(data, b))
b.ReportAllocs()
b.SetBytes(1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := rd.ReadNil()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadFloat64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 100; i++ {
buf.Reset()
flt := (rand.Float64() - 0.5) * math.MaxFloat64
err := wr.WriteFloat64(flt)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadFloat64()
if err != nil {
t.Errorf("Error reading %f: %s", flt, err)
continue
}
if out != flt {
t.Errorf("Put in %f but got out %f", flt, out)
}
}
}
func BenchmarkReadFloat64(b *testing.B) {
fs := []float64{rand.Float64(), rand.Float64(), rand.Float64(), rand.Float64()}
data := make([]byte, 0, 9*len(fs))
for _, f := range fs {
data = AppendFloat64(data, f)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadFloat64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadFloat32(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 10000; i++ {
buf.Reset()
flt := (rand.Float32() - 0.5) * math.MaxFloat32
err := wr.WriteFloat32(flt)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadFloat32()
if err != nil {
t.Errorf("Error reading %f: %s", flt, err)
continue
}
if out != flt {
t.Errorf("Put in %f but got out %f", flt, out)
}
}
}
func BenchmarkReadFloat32(b *testing.B) {
fs := []float32{rand.Float32(), rand.Float32(), rand.Float32(), rand.Float32()}
data := make([]byte, 0, 5*len(fs))
for _, f := range fs {
data = AppendFloat32(data, f)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(5)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadFloat32()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadInt64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
ints := []int64{-100000, -5000, -5, 0, 8, 240, int64(tuint16), int64(tuint32), int64(tuint64)}
for i, num := range ints {
buf.Reset()
err := wr.WriteInt64(num)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadInt64()
if err != nil {
t.Fatal(err)
}
if out != num {
t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
}
}
}
func BenchmarkReadInt64(b *testing.B) {
is := []int64{0, 1, 65000, rand.Int63()}
data := make([]byte, 0, 9*len(is))
for _, n := range is {
data = AppendInt64(data, n)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(is)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadInt64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadUint64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
ints := []uint64{0, 8, 240, uint64(tuint16), uint64(tuint32), uint64(tuint64)}
for i, num := range ints {
buf.Reset()
err := wr.WriteUint64(num)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadUint64()
if out != num {
t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
}
}
}
func BenchmarkReadUint64(b *testing.B) {
us := []uint64{0, 1, 10000, uint64(rand.Uint32() * 4)}
data := make([]byte, 0, 9*len(us))
for _, n := range us {
data = AppendUint64(data, n)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(us)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadUint64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadBytes(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
sizes := []int{0, 1, 225, int(tuint32)}
var scratch []byte
for i, size := range sizes {
buf.Reset()
bts := RandBytes(size)
err := wr.WriteBytes(bts)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadBytes(scratch)
if err != nil {
t.Errorf("test case %d: %s", i, err)
continue
}
if !bytes.Equal(bts, out) {
t.Errorf("test case %d: Bytes not equal.", i)
}
}
}
func benchBytes(size uint32, b *testing.B) {
data := make([]byte, 0, size+5)
data = AppendBytes(data, RandBytes(int(size)))
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
var scratch []byte
var err error
for i := 0; i < b.N; i++ {
scratch, err = rd.ReadBytes(scratch)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRead16Bytes(b *testing.B) {
benchBytes(16, b)
}
func BenchmarkRead256Bytes(b *testing.B) {
benchBytes(256, b)
}
// This particular case creates
// an object larger than the default
// read buffer size, so it's a decent
// indicator of worst-case performance.
func BenchmarkRead2048Bytes(b *testing.B) {
benchBytes(2048, b)
}
func TestReadString(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
sizes := []int{0, 1, 225, int(math.MaxUint16 + 5)}
for i, size := range sizes {
buf.Reset()
in := string(RandBytes(size))
err := wr.WriteString(in)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadString()
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if out != in {
t.Errorf("test case %d: strings not equal.", i)
t.Errorf("string (len = %d) in; string (len = %d) out", size, len(out))
}
}
}
func benchString(size uint32, b *testing.B) {
str := string(RandBytes(int(size)))
data := make([]byte, 0, len(str)+5)
data = AppendString(data, str)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadString()
if err != nil {
b.Fatal(err)
}
}
}
func benchStringAsBytes(size uint32, b *testing.B) {
str := string(RandBytes(int(size)))
data := make([]byte, 0, len(str)+5)
data = AppendString(data, str)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
var scratch []byte
var err error
for i := 0; i < b.N; i++ {
scratch, err = rd.ReadStringAsBytes(scratch)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRead16StringAsBytes(b *testing.B) {
benchStringAsBytes(16, b)
}
func BenchmarkRead256StringAsBytes(b *testing.B) {
benchStringAsBytes(256, b)
}
func BenchmarkRead16String(b *testing.B) {
benchString(16, b)
}
func BenchmarkRead256String(b *testing.B) {
benchString(256, b)
}
func TestReadComplex64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 100; i++ {
buf.Reset()
f := complex(rand.Float32()*math.MaxFloat32, rand.Float32()*math.MaxFloat32)
wr.WriteComplex64(f)
err := wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadComplex64()
if err != nil {
t.Error(err)
continue
}
if out != f {
t.Errorf("Wrote %f; read %f", f, out)
}
}
}
func BenchmarkReadComplex64(b *testing.B) {
f := complex(rand.Float32(), rand.Float32())
data := AppendComplex64(nil, f)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadComplex64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadComplex128(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 10; i++ {
buf.Reset()
f := complex(rand.Float64()*math.MaxFloat64, rand.Float64()*math.MaxFloat64)
wr.WriteComplex128(f)
err := wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadComplex128()
if err != nil {
t.Error(err)
continue
}
if out != f {
t.Errorf("Wrote %f; read %f", f, out)
}
}
}
func BenchmarkReadComplex128(b *testing.B) {
f := complex(rand.Float64(), rand.Float64())
data := AppendComplex128(nil, f)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadComplex128()
if err != nil {
b.Fatal(err)
}
}
}
func TestTime(t *testing.T) {
var buf bytes.Buffer
now := time.Now()
en := NewWriter(&buf)
dc := NewReader(&buf)
err := en.WriteTime(now)
if err != nil {
t.Fatal(err)
}
err = en.Flush()
if err != nil {
t.Fatal(err)
}
out, err := dc.ReadTime()
if err != nil {
t.Fatal(err)
}
// check for equivalence
if !now.Equal(out) {
t.Fatalf("%s in; %s out", now, out)
}
// check for time.Local zone
if now != out {
t.Error("returned time.Time not set to time.Local")
}
}
func BenchmarkReadTime(b *testing.B) {
t := time.Now()
data := AppendTime(nil, t)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadTime()
if err != nil {
b.Fatal(err)
}
}
}
func TestSkip(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
wr.WriteMapHeader(4)
wr.WriteString("key_1")
wr.WriteBytes([]byte("value_1"))
wr.WriteString("key_2")
wr.WriteFloat64(2.0)
wr.WriteString("key_3")
wr.WriteComplex128(3.0i)
wr.WriteString("key_4")
wr.WriteInt64(49080432189)
wr.Flush()
// this should skip the whole map
err := rd.Skip()
if err != nil {
t.Fatal(err)
}
tp, err := rd.NextType()
if err != io.EOF {
t.Errorf("expected %q; got %q", io.EOF, err)
t.Errorf("returned type %q", tp)
}
}
func BenchmarkSkip(b *testing.B) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteMapHeader(6)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(3.14159)
en.WriteString("some_bytes")
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
en.WriteString("the_time")
en.WriteTime(time.Now())
en.WriteString("what?")
en.WriteBool(true)
en.WriteString("ext")
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
en.Flush()
bts := buf.Bytes()
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
rd := NewReader(NewEndlessReader(bts, b))
for i := 0; i < b.N; i++ {
err := rd.Skip()
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -0,0 +1,38 @@
package msgp
// The sizes provided
// are the worst-case
// encoded sizes for
// each type. For variable-
// length types ([]byte, string),
// the total encoded size is
// the prefix size plus the
// length of the object.
const (
Int64Size = 9
IntSize = Int64Size
UintSize = Int64Size
Int8Size = 2
Int16Size = 3
Int32Size = 5
Uint8Size = 2
ByteSize = Uint8Size
Uint16Size = 3
Uint32Size = 5
Uint64Size = Int64Size
Float64Size = 9
Float32Size = 5
Complex64Size = 10
Complex128Size = 18
TimeSize = 15
BoolSize = 1
NilSize = 1
MapHeaderSize = 5
ArrayHeaderSize = 5
BytesPrefixSize = 5
StringPrefixSize = 5
ExtensionPrefixSize = 6
)

View File

@ -0,0 +1,40 @@
// +build !appengine
package msgp
import (
"reflect"
"unsafe"
)
// NOTE:
// all of the definition in this file
// should be repeated in appengine.go,
// but without using unsafe
const (
// spec says int and uint are always
// the same size, but that int/uint
// size may not be machine word size
smallint = unsafe.Sizeof(int(0)) == 4
)
// UnsafeString returns the byte slice as a volatile string
// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
// THIS IS EVIL CODE.
// YOU HAVE BEEN WARNED.
func UnsafeString(b []byte) string {
return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)}))
}
// UnsafeBytes returns the string as a byte slice
// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
// THIS IS EVIL CODE.
// YOU HAVE BEEN WARNED.
func UnsafeBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: len(s),
Cap: len(s),
Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
}))
}

View File

@ -0,0 +1,845 @@
package msgp
import (
"errors"
"fmt"
"io"
"math"
"reflect"
"sync"
"time"
)
// Sizer is an interface implemented
// by types that can estimate their
// size when MessagePack encoded.
// This interface is optional, but
// encoding/marshaling implementations
// may use this as a way to pre-allocate
// memory for serialization.
type Sizer interface {
Msgsize() int
}
var (
// Nowhere is an io.Writer to nowhere
Nowhere io.Writer = nwhere{}
btsType = reflect.TypeOf(([]byte)(nil))
writerPool = sync.Pool{
New: func() interface{} {
return &Writer{buf: make([]byte, 2048)}
},
}
)
func popWriter(w io.Writer) *Writer {
wr := writerPool.Get().(*Writer)
wr.Reset(w)
return wr
}
func pushWriter(wr *Writer) {
wr.w = nil
wr.wloc = 0
writerPool.Put(wr)
}
// freeW frees a writer for use
// by other processes. It is not necessary
// to call freeW on a writer. However, maintaining
// a reference to a *Writer after calling freeW on
// it will cause undefined behavior.
func freeW(w *Writer) { pushWriter(w) }
// Require ensures that cap(old)-len(old) >= extra.
func Require(old []byte, extra int) []byte {
l := len(old)
c := cap(old)
r := l + extra
if c >= r {
return old
} else if l == 0 {
return make([]byte, 0, extra)
}
// the new size is the greater
// of double the old capacity
// and the sum of the old length
// and the number of new bytes
// necessary.
c <<= 1
if c < r {
c = r
}
n := make([]byte, l, c)
copy(n, old)
return n
}
// nowhere writer
type nwhere struct{}
func (n nwhere) Write(p []byte) (int, error) { return len(p), nil }
// Marshaler is the interface implemented
// by types that know how to marshal themselves
// as MessagePack. MarshalMsg appends the marshalled
// form of the object to the provided
// byte slice, returning the extended
// slice and any errors encountered.
type Marshaler interface {
MarshalMsg([]byte) ([]byte, error)
}
// Encodable is the interface implemented
// by types that know how to write themselves
// as MessagePack using a *msgp.Writer.
type Encodable interface {
EncodeMsg(*Writer) error
}
// Writer is a buffered writer
// that can be used to write
// MessagePack objects to an io.Writer.
// You must call *Writer.Flush() in order
// to flush all of the buffered data
// to the underlying writer.
type Writer struct {
w io.Writer
buf []byte
wloc int
}
// NewWriter returns a new *Writer.
func NewWriter(w io.Writer) *Writer {
if wr, ok := w.(*Writer); ok {
return wr
}
return popWriter(w)
}
// NewWriterSize returns a writer with a custom buffer size.
func NewWriterSize(w io.Writer, sz int) *Writer {
// we must be able to require() 18
// contiguous bytes, so that is the
// practical minimum buffer size
if sz < 18 {
sz = 18
}
return &Writer{
w: w,
buf: make([]byte, sz),
}
}
// Encode encodes an Encodable to an io.Writer.
func Encode(w io.Writer, e Encodable) error {
wr := NewWriter(w)
err := e.EncodeMsg(wr)
if err == nil {
err = wr.Flush()
}
freeW(wr)
return err
}
func (mw *Writer) flush() error {
if mw.wloc == 0 {
return nil
}
n, err := mw.w.Write(mw.buf[:mw.wloc])
if err != nil {
if n > 0 {
mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc])
}
return err
}
mw.wloc = 0
return nil
}
// Flush flushes all of the buffered
// data to the underlying writer.
func (mw *Writer) Flush() error { return mw.flush() }
// Buffered returns the number bytes in the write buffer
func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc }
func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc }
func (mw *Writer) bufsize() int { return len(mw.buf) }
// NOTE: this should only be called with
// a number that is guaranteed to be less than
// len(mw.buf). typically, it is called with a constant.
//
// NOTE: this is a hot code path
func (mw *Writer) require(n int) (int, error) {
c := len(mw.buf)
wl := mw.wloc
if c-wl < n {
if err := mw.flush(); err != nil {
return 0, err
}
wl = mw.wloc
}
mw.wloc += n
return wl, nil
}
func (mw *Writer) Append(b ...byte) error {
if mw.avail() < len(b) {
err := mw.flush()
if err != nil {
return err
}
}
mw.wloc += copy(mw.buf[mw.wloc:], b)
return nil
}
// push one byte onto the buffer
//
// NOTE: this is a hot code path
func (mw *Writer) push(b byte) error {
if mw.wloc == len(mw.buf) {
if err := mw.flush(); err != nil {
return err
}
}
mw.buf[mw.wloc] = b
mw.wloc++
return nil
}
func (mw *Writer) prefix8(b byte, u uint8) error {
const need = 2
if len(mw.buf)-mw.wloc < need {
if err := mw.flush(); err != nil {
return err
}
}
prefixu8(mw.buf[mw.wloc:], b, u)
mw.wloc += need
return nil
}
func (mw *Writer) prefix16(b byte, u uint16) error {
const need = 3
if len(mw.buf)-mw.wloc < need {
if err := mw.flush(); err != nil {
return err
}
}
prefixu16(mw.buf[mw.wloc:], b, u)
mw.wloc += need
return nil
}
func (mw *Writer) prefix32(b byte, u uint32) error {
const need = 5
if len(mw.buf)-mw.wloc < need {
if err := mw.flush(); err != nil {
return err
}
}
prefixu32(mw.buf[mw.wloc:], b, u)
mw.wloc += need
return nil
}
func (mw *Writer) prefix64(b byte, u uint64) error {
const need = 9
if len(mw.buf)-mw.wloc < need {
if err := mw.flush(); err != nil {
return err
}
}
prefixu64(mw.buf[mw.wloc:], b, u)
mw.wloc += need
return nil
}
// Write implements io.Writer, and writes
// data directly to the buffer.
func (mw *Writer) Write(p []byte) (int, error) {
l := len(p)
if mw.avail() < l {
if err := mw.flush(); err != nil {
return 0, err
}
if l > len(mw.buf) {
return mw.w.Write(p)
}
}
mw.wloc += copy(mw.buf[mw.wloc:], p)
return l, nil
}
// implements io.WriteString
func (mw *Writer) writeString(s string) error {
l := len(s)
if mw.avail() < l {
if err := mw.flush(); err != nil {
return err
}
if l > len(mw.buf) {
_, err := io.WriteString(mw.w, s)
return err
}
}
mw.wloc += copy(mw.buf[mw.wloc:], s)
return nil
}
// Reset changes the underlying writer used by the Writer
func (mw *Writer) Reset(w io.Writer) {
mw.buf = mw.buf[:cap(mw.buf)]
mw.w = w
mw.wloc = 0
}
// WriteMapHeader writes a map header of the given
// size to the writer
func (mw *Writer) WriteMapHeader(sz uint32) error {
switch {
case sz <= 15:
return mw.push(wfixmap(uint8(sz)))
case sz <= math.MaxUint16:
return mw.prefix16(mmap16, uint16(sz))
default:
return mw.prefix32(mmap32, sz)
}
}
// WriteArrayHeader writes an array header of the
// given size to the writer
func (mw *Writer) WriteArrayHeader(sz uint32) error {
switch {
case sz <= 15:
return mw.push(wfixarray(uint8(sz)))
case sz <= math.MaxUint16:
return mw.prefix16(marray16, uint16(sz))
default:
return mw.prefix32(marray32, sz)
}
}
// WriteNil writes a nil byte to the buffer
func (mw *Writer) WriteNil() error {
return mw.push(mnil)
}
// WriteFloat64 writes a float64 to the writer
func (mw *Writer) WriteFloat64(f float64) error {
return mw.prefix64(mfloat64, math.Float64bits(f))
}
// WriteFloat32 writes a float32 to the writer
func (mw *Writer) WriteFloat32(f float32) error {
return mw.prefix32(mfloat32, math.Float32bits(f))
}
// WriteInt64 writes an int64 to the writer
func (mw *Writer) WriteInt64(i int64) error {
if i >= 0 {
switch {
case i <= math.MaxInt8:
return mw.push(wfixint(uint8(i)))
case i <= math.MaxInt16:
return mw.prefix16(mint16, uint16(i))
case i <= math.MaxInt32:
return mw.prefix32(mint32, uint32(i))
default:
return mw.prefix64(mint64, uint64(i))
}
}
switch {
case i >= -32:
return mw.push(wnfixint(int8(i)))
case i >= math.MinInt8:
return mw.prefix8(mint8, uint8(i))
case i >= math.MinInt16:
return mw.prefix16(mint16, uint16(i))
case i >= math.MinInt32:
return mw.prefix32(mint32, uint32(i))
default:
return mw.prefix64(mint64, uint64(i))
}
}
// WriteInt8 writes an int8 to the writer
func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) }
// WriteInt16 writes an int16 to the writer
func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) }
// WriteInt32 writes an int32 to the writer
func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) }
// WriteInt writes an int to the writer
func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) }
// WriteUint64 writes a uint64 to the writer
func (mw *Writer) WriteUint64(u uint64) error {
switch {
case u <= (1<<7)-1:
return mw.push(wfixint(uint8(u)))
case u <= math.MaxUint8:
return mw.prefix8(muint8, uint8(u))
case u <= math.MaxUint16:
return mw.prefix16(muint16, uint16(u))
case u <= math.MaxUint32:
return mw.prefix32(muint32, uint32(u))
default:
return mw.prefix64(muint64, u)
}
}
// WriteByte is analogous to WriteUint8
func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
// WriteUint8 writes a uint8 to the writer
func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) }
// WriteUint16 writes a uint16 to the writer
func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) }
// WriteUint32 writes a uint32 to the writer
func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) }
// WriteUint writes a uint to the writer
func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) }
// WriteBytes writes binary as 'bin' to the writer
func (mw *Writer) WriteBytes(b []byte) error {
sz := uint32(len(b))
var err error
switch {
case sz <= math.MaxUint8:
err = mw.prefix8(mbin8, uint8(sz))
case sz <= math.MaxUint16:
err = mw.prefix16(mbin16, uint16(sz))
default:
err = mw.prefix32(mbin32, sz)
}
if err != nil {
return err
}
_, err = mw.Write(b)
return err
}
// WriteBytesHeader writes just the size header
// of a MessagePack 'bin' object. The user is responsible
// for then writing 'sz' more bytes into the stream.
func (mw *Writer) WriteBytesHeader(sz uint32) error {
switch {
case sz <= math.MaxUint8:
return mw.prefix8(mbin8, uint8(sz))
case sz <= math.MaxUint16:
return mw.prefix16(mbin16, uint16(sz))
default:
return mw.prefix32(mbin32, sz)
}
}
// WriteBool writes a bool to the writer
func (mw *Writer) WriteBool(b bool) error {
if b {
return mw.push(mtrue)
}
return mw.push(mfalse)
}
// WriteString writes a messagepack string to the writer.
// (This is NOT an implementation of io.StringWriter)
func (mw *Writer) WriteString(s string) error {
sz := uint32(len(s))
var err error
switch {
case sz <= 31:
err = mw.push(wfixstr(uint8(sz)))
case sz <= math.MaxUint8:
err = mw.prefix8(mstr8, uint8(sz))
case sz <= math.MaxUint16:
err = mw.prefix16(mstr16, uint16(sz))
default:
err = mw.prefix32(mstr32, sz)
}
if err != nil {
return err
}
return mw.writeString(s)
}
// WriteStringHeader writes just the string size
// header of a MessagePack 'str' object. The user
// is responsible for writing 'sz' more valid UTF-8
// bytes to the stream.
func (mw *Writer) WriteStringHeader(sz uint32) error {
switch {
case sz <= 31:
return mw.push(wfixstr(uint8(sz)))
case sz <= math.MaxUint8:
return mw.prefix8(mstr8, uint8(sz))
case sz <= math.MaxUint16:
return mw.prefix16(mstr16, uint16(sz))
default:
return mw.prefix32(mstr32, sz)
}
}
// WriteStringFromBytes writes a 'str' object
// from a []byte.
func (mw *Writer) WriteStringFromBytes(str []byte) error {
sz := uint32(len(str))
var err error
switch {
case sz <= 31:
err = mw.push(wfixstr(uint8(sz)))
case sz <= math.MaxUint8:
err = mw.prefix8(mstr8, uint8(sz))
case sz <= math.MaxUint16:
err = mw.prefix16(mstr16, uint16(sz))
default:
err = mw.prefix32(mstr32, sz)
}
if err != nil {
return err
}
_, err = mw.Write(str)
return err
}
// WriteComplex64 writes a complex64 to the writer
func (mw *Writer) WriteComplex64(f complex64) error {
o, err := mw.require(10)
if err != nil {
return err
}
mw.buf[o] = mfixext8
mw.buf[o+1] = Complex64Extension
big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f)))
big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f)))
return nil
}
// WriteComplex128 writes a complex128 to the writer
func (mw *Writer) WriteComplex128(f complex128) error {
o, err := mw.require(18)
if err != nil {
return err
}
mw.buf[o] = mfixext16
mw.buf[o+1] = Complex128Extension
big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f)))
big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f)))
return nil
}
// WriteMapStrStr writes a map[string]string to the writer
func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) {
err = mw.WriteMapHeader(uint32(len(mp)))
if err != nil {
return
}
for key, val := range mp {
err = mw.WriteString(key)
if err != nil {
return
}
err = mw.WriteString(val)
if err != nil {
return
}
}
return nil
}
// WriteMapStrIntf writes a map[string]interface to the writer
func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) {
err = mw.WriteMapHeader(uint32(len(mp)))
if err != nil {
return
}
for key, val := range mp {
err = mw.WriteString(key)
if err != nil {
return
}
err = mw.WriteIntf(val)
if err != nil {
return
}
}
return
}
// WriteTime writes a time.Time object to the wire.
//
// Time is encoded as Unix time, which means that
// location (time zone) data is removed from the object.
// The encoded object itself is 12 bytes: 8 bytes for
// a big-endian 64-bit integer denoting seconds
// elapsed since "zero" Unix time, followed by 4 bytes
// for a big-endian 32-bit signed integer denoting
// the nanosecond offset of the time. This encoding
// is intended to ease portability across languages.
// (Note that this is *not* the standard time.Time
// binary encoding, because its implementation relies
// heavily on the internal representation used by the
// time package.)
func (mw *Writer) WriteTime(t time.Time) error {
t = t.UTC()
o, err := mw.require(15)
if err != nil {
return err
}
mw.buf[o] = mext8
mw.buf[o+1] = 12
mw.buf[o+2] = TimeExtension
putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond()))
return nil
}
// WriteIntf writes the concrete type of 'v'.
// WriteIntf will error if 'v' is not one of the following:
// - A bool, float, string, []byte, int, uint, or complex
// - A map of supported types (with string keys)
// - An array or slice of supported types
// - A pointer to a supported type
// - A type that satisfies the msgp.Encodable interface
// - A type that satisfies the msgp.Extension interface
func (mw *Writer) WriteIntf(v interface{}) error {
if v == nil {
return mw.WriteNil()
}
switch v := v.(type) {
// preferred interfaces
case Encodable:
return v.EncodeMsg(mw)
case Extension:
return mw.WriteExtension(v)
// concrete types
case bool:
return mw.WriteBool(v)
case float32:
return mw.WriteFloat32(v)
case float64:
return mw.WriteFloat64(v)
case complex64:
return mw.WriteComplex64(v)
case complex128:
return mw.WriteComplex128(v)
case uint8:
return mw.WriteUint8(v)
case uint16:
return mw.WriteUint16(v)
case uint32:
return mw.WriteUint32(v)
case uint64:
return mw.WriteUint64(v)
case uint:
return mw.WriteUint(v)
case int8:
return mw.WriteInt8(v)
case int16:
return mw.WriteInt16(v)
case int32:
return mw.WriteInt32(v)
case int64:
return mw.WriteInt64(v)
case int:
return mw.WriteInt(v)
case string:
return mw.WriteString(v)
case []byte:
return mw.WriteBytes(v)
case map[string]string:
return mw.WriteMapStrStr(v)
case map[string]interface{}:
return mw.WriteMapStrIntf(v)
case time.Time:
return mw.WriteTime(v)
}
val := reflect.ValueOf(v)
if !isSupported(val.Kind()) || !val.IsValid() {
return fmt.Errorf("msgp: type %s not supported", val)
}
switch val.Kind() {
case reflect.Ptr:
if val.IsNil() {
return mw.WriteNil()
}
return mw.WriteIntf(val.Elem().Interface())
case reflect.Slice:
return mw.writeSlice(val)
case reflect.Map:
return mw.writeMap(val)
}
return &ErrUnsupportedType{val.Type()}
}
func (mw *Writer) writeMap(v reflect.Value) (err error) {
if v.Elem().Kind() != reflect.String {
return errors.New("msgp: map keys must be strings")
}
ks := v.MapKeys()
err = mw.WriteMapHeader(uint32(len(ks)))
if err != nil {
return
}
for _, key := range ks {
val := v.MapIndex(key)
err = mw.WriteString(key.String())
if err != nil {
return
}
err = mw.WriteIntf(val.Interface())
if err != nil {
return
}
}
return
}
func (mw *Writer) writeSlice(v reflect.Value) (err error) {
// is []byte
if v.Type().ConvertibleTo(btsType) {
return mw.WriteBytes(v.Bytes())
}
sz := uint32(v.Len())
err = mw.WriteArrayHeader(sz)
if err != nil {
return
}
for i := uint32(0); i < sz; i++ {
err = mw.WriteIntf(v.Index(int(i)).Interface())
if err != nil {
return
}
}
return
}
func (mw *Writer) writeStruct(v reflect.Value) error {
if enc, ok := v.Interface().(Encodable); ok {
return enc.EncodeMsg(mw)
}
return fmt.Errorf("msgp: unsupported type: %s", v.Type())
}
func (mw *Writer) writeVal(v reflect.Value) error {
if !isSupported(v.Kind()) {
return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type())
}
// shortcut for nil values
if v.IsNil() {
return mw.WriteNil()
}
switch v.Kind() {
case reflect.Bool:
return mw.WriteBool(v.Bool())
case reflect.Float32, reflect.Float64:
return mw.WriteFloat64(v.Float())
case reflect.Complex64, reflect.Complex128:
return mw.WriteComplex128(v.Complex())
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
return mw.WriteInt64(v.Int())
case reflect.Interface, reflect.Ptr:
if v.IsNil() {
mw.WriteNil()
}
return mw.writeVal(v.Elem())
case reflect.Map:
return mw.writeMap(v)
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
return mw.WriteUint64(v.Uint())
case reflect.String:
return mw.WriteString(v.String())
case reflect.Slice, reflect.Array:
return mw.writeSlice(v)
case reflect.Struct:
return mw.writeStruct(v)
}
return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type())
}
// is the reflect.Kind encodable?
func isSupported(k reflect.Kind) bool {
switch k {
case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer:
return false
default:
return true
}
}
// GuessSize guesses the size of the underlying
// value of 'i'. If the underlying value is not
// a simple builtin (or []byte), GuessSize defaults
// to 512.
func GuessSize(i interface{}) int {
if i == nil {
return NilSize
}
switch i := i.(type) {
case Sizer:
return i.Msgsize()
case Extension:
return ExtensionPrefixSize + i.Len()
case float64:
return Float64Size
case float32:
return Float32Size
case uint8, uint16, uint32, uint64, uint:
return UintSize
case int8, int16, int32, int64, int:
return IntSize
case []byte:
return BytesPrefixSize + len(i)
case string:
return StringPrefixSize + len(i)
case complex64:
return Complex64Size
case complex128:
return Complex128Size
case bool:
return BoolSize
case map[string]interface{}:
s := MapHeaderSize
for key, val := range i {
s += StringPrefixSize + len(key) + GuessSize(val)
}
return s
case map[string]string:
s := MapHeaderSize
for key, val := range i {
s += 2*StringPrefixSize + len(key) + len(val)
}
return s
default:
return 512
}
}

View File

@ -0,0 +1,411 @@
package msgp
import (
"math"
"reflect"
"time"
)
// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b)
func ensure(b []byte, sz int) ([]byte, int) {
l := len(b)
c := cap(b)
if c-l < sz {
o := make([]byte, (2*c)+sz) // exponential growth
n := copy(o, b)
return o[:n+sz], n
}
return b[:l+sz], l
}
// AppendMapHeader appends a map header with the
// given size to the slice
func AppendMapHeader(b []byte, sz uint32) []byte {
switch {
case sz <= 15:
return append(b, wfixmap(uint8(sz)))
case sz <= math.MaxUint16:
o, n := ensure(b, 3)
prefixu16(o[n:], mmap16, uint16(sz))
return o
default:
o, n := ensure(b, 5)
prefixu32(o[n:], mmap32, sz)
return o
}
}
// AppendArrayHeader appends an array header with
// the given size to the slice
func AppendArrayHeader(b []byte, sz uint32) []byte {
switch {
case sz <= 15:
return append(b, wfixarray(uint8(sz)))
case sz <= math.MaxUint16:
o, n := ensure(b, 3)
prefixu16(o[n:], marray16, uint16(sz))
return o
default:
o, n := ensure(b, 5)
prefixu32(o[n:], marray32, sz)
return o
}
}
// AppendNil appends a 'nil' byte to the slice
func AppendNil(b []byte) []byte { return append(b, mnil) }
// AppendFloat64 appends a float64 to the slice
func AppendFloat64(b []byte, f float64) []byte {
o, n := ensure(b, Float64Size)
prefixu64(o[n:], mfloat64, math.Float64bits(f))
return o
}
// AppendFloat32 appends a float32 to the slice
func AppendFloat32(b []byte, f float32) []byte {
o, n := ensure(b, Float32Size)
prefixu32(o[n:], mfloat32, math.Float32bits(f))
return o
}
// AppendInt64 appends an int64 to the slice
func AppendInt64(b []byte, i int64) []byte {
if i >= 0 {
switch {
case i <= math.MaxInt8:
return append(b, wfixint(uint8(i)))
case i <= math.MaxInt16:
o, n := ensure(b, 3)
putMint16(o[n:], int16(i))
return o
case i <= math.MaxInt32:
o, n := ensure(b, 5)
putMint32(o[n:], int32(i))
return o
default:
o, n := ensure(b, 9)
putMint64(o[n:], i)
return o
}
}
switch {
case i >= -32:
return append(b, wnfixint(int8(i)))
case i >= math.MinInt8:
o, n := ensure(b, 2)
putMint8(o[n:], int8(i))
return o
case i >= math.MinInt16:
o, n := ensure(b, 3)
putMint16(o[n:], int16(i))
return o
case i >= math.MinInt32:
o, n := ensure(b, 5)
putMint32(o[n:], int32(i))
return o
default:
o, n := ensure(b, 9)
putMint64(o[n:], i)
return o
}
}
// AppendInt appends an int to the slice
func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) }
// AppendInt8 appends an int8 to the slice
func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) }
// AppendInt16 appends an int16 to the slice
func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) }
// AppendInt32 appends an int32 to the slice
func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) }
// AppendUint64 appends a uint64 to the slice
func AppendUint64(b []byte, u uint64) []byte {
switch {
case u <= (1<<7)-1:
return append(b, wfixint(uint8(u)))
case u <= math.MaxUint8:
o, n := ensure(b, 2)
putMuint8(o[n:], uint8(u))
return o
case u <= math.MaxUint16:
o, n := ensure(b, 3)
putMuint16(o[n:], uint16(u))
return o
case u <= math.MaxUint32:
o, n := ensure(b, 5)
putMuint32(o[n:], uint32(u))
return o
default:
o, n := ensure(b, 9)
putMuint64(o[n:], u)
return o
}
}
// AppendUint appends a uint to the slice
func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) }
// AppendUint8 appends a uint8 to the slice
func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
// AppendByte is analogous to AppendUint8
func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
// AppendUint16 appends a uint16 to the slice
func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) }
// AppendUint32 appends a uint32 to the slice
func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) }
// AppendBytes appends bytes to the slice as MessagePack 'bin' data
func AppendBytes(b []byte, bts []byte) []byte {
sz := len(bts)
var o []byte
var n int
switch {
case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mbin8, uint8(sz))
n += 2
case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mbin16, uint16(sz))
n += 3
default:
o, n = ensure(b, 5+sz)
prefixu32(o[n:], mbin32, uint32(sz))
n += 5
}
return o[:n+copy(o[n:], bts)]
}
// AppendBool appends a bool to the slice
func AppendBool(b []byte, t bool) []byte {
if t {
return append(b, mtrue)
}
return append(b, mfalse)
}
// AppendString appends a string as a MessagePack 'str' to the slice
func AppendString(b []byte, s string) []byte {
sz := len(s)
var n int
var o []byte
switch {
case sz <= 31:
o, n = ensure(b, 1+sz)
o[n] = wfixstr(uint8(sz))
n++
case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mstr8, uint8(sz))
n += 2
case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mstr16, uint16(sz))
n += 3
default:
o, n = ensure(b, 5+sz)
prefixu32(o[n:], mstr32, uint32(sz))
n += 5
}
return o[:n+copy(o[n:], s)]
}
// AppendStringFromBytes appends a []byte
// as a MessagePack 'str' to the slice 'b.'
func AppendStringFromBytes(b []byte, str []byte) []byte {
sz := len(str)
var n int
var o []byte
switch {
case sz <= 31:
o, n = ensure(b, 1+sz)
o[n] = wfixstr(uint8(sz))
n++
case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mstr8, uint8(sz))
n += 2
case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mstr16, uint16(sz))
n += 3
default:
o, n = ensure(b, 5+sz)
prefixu32(o[n:], mstr32, uint32(sz))
n += 5
}
return o[:n+copy(o[n:], str)]
}
// AppendComplex64 appends a complex64 to the slice as a MessagePack extension
func AppendComplex64(b []byte, c complex64) []byte {
o, n := ensure(b, Complex64Size)
o[n] = mfixext8
o[n+1] = Complex64Extension
big.PutUint32(o[n+2:], math.Float32bits(real(c)))
big.PutUint32(o[n+6:], math.Float32bits(imag(c)))
return o
}
// AppendComplex128 appends a complex128 to the slice as a MessagePack extension
func AppendComplex128(b []byte, c complex128) []byte {
o, n := ensure(b, Complex128Size)
o[n] = mfixext16
o[n+1] = Complex128Extension
big.PutUint64(o[n+2:], math.Float64bits(real(c)))
big.PutUint64(o[n+10:], math.Float64bits(imag(c)))
return o
}
// AppendTime appends a time.Time to the slice as a MessagePack extension
func AppendTime(b []byte, t time.Time) []byte {
o, n := ensure(b, TimeSize)
t = t.UTC()
o[n] = mext8
o[n+1] = 12
o[n+2] = TimeExtension
putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond()))
return o
}
// AppendMapStrStr appends a map[string]string to the slice
// as a MessagePack map with 'str'-type keys and values
func AppendMapStrStr(b []byte, m map[string]string) []byte {
sz := uint32(len(m))
b = AppendMapHeader(b, sz)
for key, val := range m {
b = AppendString(b, key)
b = AppendString(b, val)
}
return b
}
// AppendMapStrIntf appends a map[string]interface{} to the slice
// as a MessagePack map with 'str'-type keys.
func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
sz := uint32(len(m))
b = AppendMapHeader(b, sz)
var err error
for key, val := range m {
b = AppendString(b, key)
b, err = AppendIntf(b, val)
if err != nil {
return b, err
}
}
return b, nil
}
// AppendIntf appends the concrete type of 'i' to the
// provided []byte. 'i' must be one of the following:
// - 'nil'
// - A bool, float, string, []byte, int, uint, or complex
// - A map[string]interface{} or map[string]string
// - A []T, where T is another supported type
// - A *T, where T is another supported type
// - A type that satisfieds the msgp.Marshaler interface
// - A type that satisfies the msgp.Extension interface
func AppendIntf(b []byte, i interface{}) ([]byte, error) {
if i == nil {
return AppendNil(b), nil
}
// all the concrete types
// for which we have methods
switch i := i.(type) {
case Marshaler:
return i.MarshalMsg(b)
case Extension:
return AppendExtension(b, i)
case bool:
return AppendBool(b, i), nil
case float32:
return AppendFloat32(b, i), nil
case float64:
return AppendFloat64(b, i), nil
case complex64:
return AppendComplex64(b, i), nil
case complex128:
return AppendComplex128(b, i), nil
case string:
return AppendString(b, i), nil
case []byte:
return AppendBytes(b, i), nil
case int8:
return AppendInt8(b, i), nil
case int16:
return AppendInt16(b, i), nil
case int32:
return AppendInt32(b, i), nil
case int64:
return AppendInt64(b, i), nil
case int:
return AppendInt64(b, int64(i)), nil
case uint:
return AppendUint64(b, uint64(i)), nil
case uint8:
return AppendUint8(b, i), nil
case uint16:
return AppendUint16(b, i), nil
case uint32:
return AppendUint32(b, i), nil
case uint64:
return AppendUint64(b, i), nil
case time.Time:
return AppendTime(b, i), nil
case map[string]interface{}:
return AppendMapStrIntf(b, i)
case map[string]string:
return AppendMapStrStr(b, i), nil
case []interface{}:
b = AppendArrayHeader(b, uint32(len(i)))
var err error
for _, k := range i {
b, err = AppendIntf(b, k)
if err != nil {
return b, err
}
}
return b, nil
}
var err error
v := reflect.ValueOf(i)
switch v.Kind() {
case reflect.Array, reflect.Slice:
l := v.Len()
b = AppendArrayHeader(b, uint32(l))
for i := 0; i < l; i++ {
b, err = AppendIntf(b, v.Index(i).Interface())
if err != nil {
return b, err
}
}
return b, nil
case reflect.Ptr:
if v.IsNil() {
return AppendNil(b), err
}
b, err = AppendIntf(b, v.Elem().Interface())
return b, err
default:
return b, &ErrUnsupportedType{T: v.Type()}
}
}

View File

@ -0,0 +1,319 @@
package msgp
import (
"bytes"
"math"
"testing"
"time"
)
func TestIssue116(t *testing.T) {
data := AppendInt64(nil, math.MinInt64)
i, _, err := ReadInt64Bytes(data)
if err != nil {
t.Fatal(err)
}
if i != math.MinInt64 {
t.Errorf("put %d in and got %d out", int64(math.MinInt64), i)
}
var buf bytes.Buffer
w := NewWriter(&buf)
w.WriteInt64(math.MinInt64)
w.Flush()
i, err = NewReader(&buf).ReadInt64()
if err != nil {
t.Fatal(err)
}
if i != math.MinInt64 {
t.Errorf("put %d in and got %d out", int64(math.MinInt64), i)
}
}
func TestAppendMapHeader(t *testing.T) {
szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, sz := range szs {
buf.Reset()
en.WriteMapHeader(sz)
en.Flush()
bts = AppendMapHeader(bts[0:0], sz)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts)
}
}
}
func BenchmarkAppendMapHeader(b *testing.B) {
buf := make([]byte, 0, 9)
N := b.N / 4
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < N; i++ {
AppendMapHeader(buf[:0], 0)
AppendMapHeader(buf[:0], uint32(tint8))
AppendMapHeader(buf[:0], tuint16)
AppendMapHeader(buf[:0], tuint32)
}
}
func TestAppendArrayHeader(t *testing.T) {
szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, sz := range szs {
buf.Reset()
en.WriteArrayHeader(sz)
en.Flush()
bts = AppendArrayHeader(bts[0:0], sz)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts)
}
}
}
func BenchmarkAppendArrayHeader(b *testing.B) {
buf := make([]byte, 0, 9)
N := b.N / 4
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < N; i++ {
AppendArrayHeader(buf[:0], 0)
AppendArrayHeader(buf[:0], uint32(tint8))
AppendArrayHeader(buf[:0], tuint16)
AppendArrayHeader(buf[:0], tuint32)
}
}
func TestAppendNil(t *testing.T) {
var bts []byte
bts = AppendNil(bts[0:0])
if bts[0] != mnil {
t.Fatal("bts[0] is not 'nil'")
}
}
func TestAppendFloat64(t *testing.T) {
f := float64(3.14159)
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
en.WriteFloat64(f)
en.Flush()
bts = AppendFloat64(bts[0:0], f)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts)
}
}
func BenchmarkAppendFloat64(b *testing.B) {
f := float64(3.14159)
buf := make([]byte, 0, 9)
b.SetBytes(9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendFloat64(buf[0:0], f)
}
}
func TestAppendFloat32(t *testing.T) {
f := float32(3.14159)
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
en.WriteFloat32(f)
en.Flush()
bts = AppendFloat32(bts[0:0], f)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts)
}
}
func BenchmarkAppendFloat32(b *testing.B) {
f := float32(3.14159)
buf := make([]byte, 0, 5)
b.SetBytes(5)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendFloat32(buf[0:0], f)
}
}
func TestAppendInt64(t *testing.T) {
is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, i := range is {
buf.Reset()
en.WriteInt64(i)
en.Flush()
bts = AppendInt64(bts[0:0], i)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for int64 %d, encoder wrote %q; append wrote %q", i, buf.Bytes(), bts)
}
}
}
func BenchmarkAppendInt64(b *testing.B) {
is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)}
l := len(is)
buf := make([]byte, 0, 9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendInt64(buf[0:0], is[i%l])
}
}
func TestAppendUint64(t *testing.T) {
us := []uint64{0, 1, uint64(tuint16), uint64(tuint32), tuint64}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, u := range us {
buf.Reset()
en.WriteUint64(u)
en.Flush()
bts = AppendUint64(bts[0:0], u)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for uint64 %d, encoder wrote %q; append wrote %q", u, buf.Bytes(), bts)
}
}
}
func BenchmarkAppendUint64(b *testing.B) {
us := []uint64{0, 1, 15, uint64(tuint16), uint64(tuint32), tuint64}
buf := make([]byte, 0, 9)
b.ReportAllocs()
b.ResetTimer()
l := len(us)
for i := 0; i < b.N; i++ {
AppendUint64(buf[0:0], us[i%l])
}
}
func TestAppendBytes(t *testing.T) {
sizes := []int{0, 1, 225, int(tuint32)}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, sz := range sizes {
buf.Reset()
b := RandBytes(sz)
en.WriteBytes(b)
en.Flush()
bts = AppendBytes(b[0:0], b)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for bytes of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts))
}
}
}
func benchappendBytes(size uint32, b *testing.B) {
bts := RandBytes(int(size))
buf := make([]byte, 0, len(bts)+5)
b.SetBytes(int64(len(bts) + 5))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendBytes(buf[0:0], bts)
}
}
func BenchmarkAppend16Bytes(b *testing.B) { benchappendBytes(16, b) }
func BenchmarkAppend256Bytes(b *testing.B) { benchappendBytes(256, b) }
func BenchmarkAppend2048Bytes(b *testing.B) { benchappendBytes(2048, b) }
func TestAppendString(t *testing.T) {
sizes := []int{0, 1, 225, int(tuint32)}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, sz := range sizes {
buf.Reset()
s := string(RandBytes(sz))
en.WriteString(s)
en.Flush()
bts = AppendString(bts[0:0], s)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for string of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts))
t.Errorf("WriteString prefix: %x", buf.Bytes()[0:5])
t.Errorf("Appendstring prefix: %x", bts[0:5])
}
}
}
func benchappendString(size uint32, b *testing.B) {
str := string(RandBytes(int(size)))
buf := make([]byte, 0, len(str)+5)
b.SetBytes(int64(len(str) + 5))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendString(buf[0:0], str)
}
}
func BenchmarkAppend16String(b *testing.B) { benchappendString(16, b) }
func BenchmarkAppend256String(b *testing.B) { benchappendString(256, b) }
func BenchmarkAppend2048String(b *testing.B) { benchappendString(2048, b) }
func TestAppendBool(t *testing.T) {
vs := []bool{true, false}
var buf bytes.Buffer
en := NewWriter(&buf)
var bts []byte
for _, v := range vs {
buf.Reset()
en.WriteBool(v)
en.Flush()
bts = AppendBool(bts[0:0], v)
if !bytes.Equal(buf.Bytes(), bts) {
t.Errorf("for %t, encoder wrote %q and append wrote %q", v, buf.Bytes(), bts)
}
}
}
func BenchmarkAppendBool(b *testing.B) {
vs := []bool{true, false}
buf := make([]byte, 0, 1)
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendBool(buf[0:0], vs[i%2])
}
}
func BenchmarkAppendTime(b *testing.B) {
t := time.Now()
b.SetBytes(15)
buf := make([]byte, 0, 15)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
AppendTime(buf[0:0], t)
}
}

View File

@ -0,0 +1,405 @@
package msgp
import (
"bytes"
"math"
"math/rand"
"testing"
"time"
)
var (
tint8 int8 = 126 // cannot be most fix* types
tint16 int16 = 150 // cannot be int8
tint32 int32 = math.MaxInt16 + 100 // cannot be int16
tint64 int64 = math.MaxInt32 + 100 // cannot be int32
tuint16 uint32 = 300 // cannot be uint8
tuint32 uint32 = math.MaxUint16 + 100 // cannot be uint16
tuint64 uint64 = math.MaxUint32 + 100 // cannot be uint32
)
func RandBytes(sz int) []byte {
out := make([]byte, sz)
for i := range out {
out[i] = byte(rand.Int63n(math.MaxInt64) % 256)
}
return out
}
func TestWriteMapHeader(t *testing.T) {
tests := []struct {
Sz uint32
Outbytes []byte
}{
{0, []byte{mfixmap}},
{1, []byte{mfixmap | byte(1)}},
{100, []byte{mmap16, byte(uint16(100) >> 8), byte(uint16(100))}},
{tuint32,
[]byte{mmap32,
byte(tuint32 >> 24),
byte(tuint32 >> 16),
byte(tuint32 >> 8),
byte(tuint32),
},
},
}
var buf bytes.Buffer
var err error
wr := NewWriter(&buf)
for _, test := range tests {
buf.Reset()
err = wr.WriteMapHeader(test.Sz)
if err != nil {
t.Error(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), test.Outbytes) {
t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes())
}
}
}
func BenchmarkWriteMapHeader(b *testing.B) {
wr := NewWriter(Nowhere)
N := b.N / 4
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < N; i++ {
wr.WriteMapHeader(0)
wr.WriteMapHeader(8)
wr.WriteMapHeader(tuint16)
wr.WriteMapHeader(tuint32)
}
}
func TestWriteArrayHeader(t *testing.T) {
tests := []struct {
Sz uint32
Outbytes []byte
}{
{0, []byte{mfixarray}},
{1, []byte{mfixarray | byte(1)}},
{tuint16, []byte{marray16, byte(tuint16 >> 8), byte(tuint16)}},
{tuint32, []byte{marray32, byte(tuint32 >> 24), byte(tuint32 >> 16), byte(tuint32 >> 8), byte(tuint32)}},
}
var buf bytes.Buffer
var err error
wr := NewWriter(&buf)
for _, test := range tests {
buf.Reset()
err = wr.WriteArrayHeader(test.Sz)
if err != nil {
t.Error(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), test.Outbytes) {
t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes())
}
}
}
func TestReadWriteStringHeader(t *testing.T) {
sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32}
var buf bytes.Buffer
var err error
wr := NewWriter(&buf)
for _, sz := range sizes {
buf.Reset()
err = wr.WriteStringHeader(sz)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
var nsz uint32
nsz, err = NewReader(&buf).ReadStringHeader()
if err != nil {
t.Fatal(err)
}
if nsz != sz {
t.Errorf("put in size %d but got out size %d", sz, nsz)
}
}
}
func TestReadWriteBytesHeader(t *testing.T) {
sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32}
var buf bytes.Buffer
var err error
wr := NewWriter(&buf)
for _, sz := range sizes {
buf.Reset()
err = wr.WriteBytesHeader(sz)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
var nsz uint32
nsz, err = NewReader(&buf).ReadBytesHeader()
if err != nil {
t.Fatal(err)
}
if nsz != sz {
t.Errorf("put in size %d but got out size %d", sz, nsz)
}
}
}
func BenchmarkWriteArrayHeader(b *testing.B) {
wr := NewWriter(Nowhere)
N := b.N / 4
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < N; i++ {
wr.WriteArrayHeader(0)
wr.WriteArrayHeader(16)
wr.WriteArrayHeader(tuint16)
wr.WriteArrayHeader(tuint32)
}
}
func TestWriteNil(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
err := wr.WriteNil()
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
bts := buf.Bytes()
if bts[0] != mnil {
t.Errorf("Expected %x; wrote %x", mnil, bts[0])
}
}
func TestWriteFloat64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
for i := 0; i < 10000; i++ {
buf.Reset()
flt := (rand.Float64() - 0.5) * math.MaxFloat64
err := wr.WriteFloat64(flt)
if err != nil {
t.Errorf("Error with %f: %s", flt, err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
bts := buf.Bytes()
if bts[0] != mfloat64 {
t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64)
}
}
}
func BenchmarkWriteFloat64(b *testing.B) {
f := rand.Float64()
wr := NewWriter(Nowhere)
b.SetBytes(9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr.WriteFloat64(f)
}
}
func TestWriteFloat32(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
for i := 0; i < 10000; i++ {
buf.Reset()
flt := (rand.Float32() - 0.5) * math.MaxFloat32
err := wr.WriteFloat32(flt)
if err != nil {
t.Errorf("Error with %f: %s", flt, err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
bts := buf.Bytes()
if bts[0] != mfloat32 {
t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64)
}
}
}
func BenchmarkWriteFloat32(b *testing.B) {
f := rand.Float32()
wr := NewWriter(Nowhere)
b.SetBytes(5)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr.WriteFloat32(f)
}
}
func TestWriteInt64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
for i := 0; i < 10000; i++ {
buf.Reset()
num := (rand.Int63n(math.MaxInt64)) - (math.MaxInt64 / 2)
err := wr.WriteInt64(num)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() > 9 {
t.Errorf("buffer length should be <= 9; it's %d", buf.Len())
}
}
}
func BenchmarkWriteInt64(b *testing.B) {
wr := NewWriter(Nowhere)
b.SetBytes(9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr.WriteInt64(int64(tint64))
}
}
func TestWriteUint64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
for i := 0; i < 10000; i++ {
buf.Reset()
num := uint64(rand.Int63n(math.MaxInt64))
err := wr.WriteUint64(num)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() > 9 {
t.Errorf("buffer length should be <= 9; it's %d", buf.Len())
}
}
}
func BenchmarkWriteUint64(b *testing.B) {
wr := NewWriter(Nowhere)
b.SetBytes(9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr.WriteUint64(uint64(tuint64))
}
}
func TestWriteBytes(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
sizes := []int{0, 1, 225, int(tuint32)}
for _, size := range sizes {
buf.Reset()
bts := RandBytes(size)
err := wr.WriteBytes(bts)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() < len(bts) {
t.Errorf("somehow, %d bytes were encoded in %d bytes", len(bts), buf.Len())
}
}
}
func benchwrBytes(size uint32, b *testing.B) {
bts := RandBytes(int(size))
wr := NewWriter(Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr.WriteBytes(bts)
}
}
func BenchmarkWrite16Bytes(b *testing.B) { benchwrBytes(16, b) }
func BenchmarkWrite256Bytes(b *testing.B) { benchwrBytes(256, b) }
func BenchmarkWrite2048Bytes(b *testing.B) { benchwrBytes(2048, b) }
func TestWriteTime(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
tm := time.Now()
err := wr.WriteTime(tm)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
if buf.Len() != 15 {
t.Errorf("expected time.Time to be %d bytes; got %d", 15, buf.Len())
}
newt, err := NewReader(&buf).ReadTime()
if err != nil {
t.Fatal(err)
}
if !newt.Equal(tm) {
t.Errorf("in/out not equal; %s in and %s out", tm, newt)
}
}
func BenchmarkWriteTime(b *testing.B) {
t := time.Now()
wr := NewWriter(Nowhere)
b.SetBytes(15)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr.WriteTime(t)
}
}

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,5 @@
import { Record, List } from 'immutable';
import { UPDATE_PATH } from 'redux-simple-router';
import { UPDATE_LOCATION } from 'redux-simple-router';
import createReducer from '../util/createReducer';
import * as actions from '../actions';
@ -38,8 +38,8 @@ export default createReducer(new State(), {
return state.set('history', state.history.filter(tab => tab.server !== action.server));
},
[UPDATE_PATH](state, action) {
if (action.payload.path.indexOf('.') === -1 && state.selected.server) {
[UPDATE_LOCATION](state, action) {
if (action.location.pathname.indexOf('.') === -1 && state.selected.server) {
return state.set('selected', new Tab());
}

View File

@ -1,4 +1,4 @@
import { padLeft } from 'lodash';
import padStart from 'lodash/padStart';
export wrapMessages from './wrapMessages';
@ -11,8 +11,8 @@ export function normalizeChannel(channel) {
}
export function timestamp(date = new Date()) {
const h = padLeft(date.getHours(), 2, '0');
const m = padLeft(date.getMinutes(), 2, '0');
const h = padStart(date.getHours(), 2, '0');
const m = padStart(date.getMinutes(), 2, '0');
return h + ':' + m;
}

View File

@ -76,7 +76,7 @@ func (i *ircHandler) join(msg *irc.Message) {
channelStore.AddUser(msg.Nick, i.client.Host, msg.Params[0])
if msg.Nick == i.client.GetNick() {
i.session.user.AddChannel(storage.Channel{
go i.session.user.AddChannel(storage.Channel{
Server: i.client.Host,
Name: msg.Params[0],
})
@ -96,7 +96,7 @@ func (i *ircHandler) part(msg *irc.Message) {
channelStore.RemoveUser(msg.Nick, i.client.Host, msg.Params[0])
if msg.Nick == i.client.GetNick() {
i.session.user.RemoveChannel(i.client.Host, msg.Params[0])
go i.session.user.RemoveChannel(i.client.Host, msg.Params[0])
}
}

View File

@ -108,7 +108,7 @@ func (h *wsHandler) connect(b []byte) {
i.Connect(data.Server)
go newIRCHandler(i, h.session).run()
h.session.user.AddServer(storage.Server{
go h.session.user.AddServer(storage.Server{
Name: data.Name,
Host: host,
Port: port,
@ -151,7 +151,7 @@ func (h *wsHandler) quit(b []byte) {
i.Quit()
h.session.deleteIRC(data.Server)
channelStore.RemoveUserAll(i.GetNick(), data.Server)
h.session.user.RemoveServer(data.Server)
go h.session.user.RemoveServer(data.Server)
}
}
@ -170,7 +170,7 @@ func (h *wsHandler) nick(b []byte) {
if i, ok := h.session.getIRC(data.Server); ok {
i.Nick(data.New)
h.session.user.SetNick(data.New, data.Server)
go h.session.user.SetNick(data.New, data.Server)
}
}

View File

@ -17,7 +17,7 @@ func TestAddRemoveUser(t *testing.T) {
channelStore := NewChannelStore()
channelStore.AddUser("user", "srv", "#chan")
channelStore.AddUser("user2", "srv", "#chan")
assert.Equal(t, channelStore.GetUsers("srv", "#chan"), []string{"user", "user2"})
assert.Equal(t, []string{"user", "user2"}, channelStore.GetUsers("srv", "#chan"))
channelStore.RemoveUser("user", "srv", "#chan")
assert.Equal(t, []string{"user2"}, channelStore.GetUsers("srv", "#chan"))
}

48
storage/schema.go Normal file
View File

@ -0,0 +1,48 @@
package storage
import (
"crypto/tls"
"sync"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/boltdb/bolt"
)
//go:generate msgp
type User struct {
ID uint64
Username string
id []byte
messageLog *bolt.DB
messageIndex bleve.Index
certificate *tls.Certificate
lock sync.Mutex
}
type Server struct {
Name string `json:"name"`
Host string `json:"host"`
Port string `json:"port,omitempty"`
TLS bool `json:"tls"`
Password string `json:"password,omitempty"`
Nick string `json:"nick"`
Username string `json:"username"`
Realname string `json:"realname"`
}
type Channel struct {
Server string `json:"server"`
Name string `json:"name"`
Topic string `json:"topic,omitempty"`
}
type Message struct {
ID uint64 `json:"id"`
Server string `json:"server"`
From string `json:"from"`
To string `json:"to,omitempty"`
Content string `json:"content"`
Time int64 `json:"time"`
}

711
storage/schema_gen.go Normal file
View File

@ -0,0 +1,711 @@
package storage
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import (
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *Channel) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var isz uint32
isz, err = dc.ReadMapHeader()
if err != nil {
return
}
for isz > 0 {
isz--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "Server":
z.Server, err = dc.ReadString()
if err != nil {
return
}
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
return
}
case "Topic":
z.Topic, err = dc.ReadString()
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Channel) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Server"
err = en.Append(0x83, 0xa6, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72)
if err != nil {
return err
}
err = en.WriteString(z.Server)
if err != nil {
return
}
// write "Name"
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return err
}
err = en.WriteString(z.Name)
if err != nil {
return
}
// write "Topic"
err = en.Append(0xa5, 0x54, 0x6f, 0x70, 0x69, 0x63)
if err != nil {
return err
}
err = en.WriteString(z.Topic)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Channel) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Server"
o = append(o, 0x83, 0xa6, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72)
o = msgp.AppendString(o, z.Server)
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Topic"
o = append(o, 0xa5, 0x54, 0x6f, 0x70, 0x69, 0x63)
o = msgp.AppendString(o, z.Topic)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Channel) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var isz uint32
isz, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for isz > 0 {
isz--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "Server":
z.Server, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Topic":
z.Topic, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
func (z Channel) Msgsize() (s int) {
s = 1 + 7 + msgp.StringPrefixSize + len(z.Server) + 5 + msgp.StringPrefixSize + len(z.Name) + 6 + msgp.StringPrefixSize + len(z.Topic)
return
}
// DecodeMsg implements msgp.Decodable
func (z *Message) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var isz uint32
isz, err = dc.ReadMapHeader()
if err != nil {
return
}
for isz > 0 {
isz--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadUint64()
if err != nil {
return
}
case "Server":
z.Server, err = dc.ReadString()
if err != nil {
return
}
case "From":
z.From, err = dc.ReadString()
if err != nil {
return
}
case "To":
z.To, err = dc.ReadString()
if err != nil {
return
}
case "Content":
z.Content, err = dc.ReadString()
if err != nil {
return
}
case "Time":
z.Time, err = dc.ReadInt64()
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Message) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "ID"
err = en.Append(0x86, 0xa2, 0x49, 0x44)
if err != nil {
return err
}
err = en.WriteUint64(z.ID)
if err != nil {
return
}
// write "Server"
err = en.Append(0xa6, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72)
if err != nil {
return err
}
err = en.WriteString(z.Server)
if err != nil {
return
}
// write "From"
err = en.Append(0xa4, 0x46, 0x72, 0x6f, 0x6d)
if err != nil {
return err
}
err = en.WriteString(z.From)
if err != nil {
return
}
// write "To"
err = en.Append(0xa2, 0x54, 0x6f)
if err != nil {
return err
}
err = en.WriteString(z.To)
if err != nil {
return
}
// write "Content"
err = en.Append(0xa7, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
if err != nil {
return err
}
err = en.WriteString(z.Content)
if err != nil {
return
}
// write "Time"
err = en.Append(0xa4, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return err
}
err = en.WriteInt64(z.Time)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Message) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "ID"
o = append(o, 0x86, 0xa2, 0x49, 0x44)
o = msgp.AppendUint64(o, z.ID)
// string "Server"
o = append(o, 0xa6, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72)
o = msgp.AppendString(o, z.Server)
// string "From"
o = append(o, 0xa4, 0x46, 0x72, 0x6f, 0x6d)
o = msgp.AppendString(o, z.From)
// string "To"
o = append(o, 0xa2, 0x54, 0x6f)
o = msgp.AppendString(o, z.To)
// string "Content"
o = append(o, 0xa7, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
o = msgp.AppendString(o, z.Content)
// string "Time"
o = append(o, 0xa4, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendInt64(o, z.Time)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var isz uint32
isz, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for isz > 0 {
isz--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return
}
case "Server":
z.Server, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "From":
z.From, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "To":
z.To, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Content":
z.Content, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Time":
z.Time, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
func (z *Message) Msgsize() (s int) {
s = 1 + 3 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Server) + 5 + msgp.StringPrefixSize + len(z.From) + 3 + msgp.StringPrefixSize + len(z.To) + 8 + msgp.StringPrefixSize + len(z.Content) + 5 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *Server) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var isz uint32
isz, err = dc.ReadMapHeader()
if err != nil {
return
}
for isz > 0 {
isz--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
return
}
case "Host":
z.Host, err = dc.ReadString()
if err != nil {
return
}
case "Port":
z.Port, err = dc.ReadString()
if err != nil {
return
}
case "TLS":
z.TLS, err = dc.ReadBool()
if err != nil {
return
}
case "Password":
z.Password, err = dc.ReadString()
if err != nil {
return
}
case "Nick":
z.Nick, err = dc.ReadString()
if err != nil {
return
}
case "Username":
z.Username, err = dc.ReadString()
if err != nil {
return
}
case "Realname":
z.Realname, err = dc.ReadString()
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Server) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "Name"
err = en.Append(0x88, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return err
}
err = en.WriteString(z.Name)
if err != nil {
return
}
// write "Host"
err = en.Append(0xa4, 0x48, 0x6f, 0x73, 0x74)
if err != nil {
return err
}
err = en.WriteString(z.Host)
if err != nil {
return
}
// write "Port"
err = en.Append(0xa4, 0x50, 0x6f, 0x72, 0x74)
if err != nil {
return err
}
err = en.WriteString(z.Port)
if err != nil {
return
}
// write "TLS"
err = en.Append(0xa3, 0x54, 0x4c, 0x53)
if err != nil {
return err
}
err = en.WriteBool(z.TLS)
if err != nil {
return
}
// write "Password"
err = en.Append(0xa8, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64)
if err != nil {
return err
}
err = en.WriteString(z.Password)
if err != nil {
return
}
// write "Nick"
err = en.Append(0xa4, 0x4e, 0x69, 0x63, 0x6b)
if err != nil {
return err
}
err = en.WriteString(z.Nick)
if err != nil {
return
}
// write "Username"
err = en.Append(0xa8, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return err
}
err = en.WriteString(z.Username)
if err != nil {
return
}
// write "Realname"
err = en.Append(0xa8, 0x52, 0x65, 0x61, 0x6c, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return err
}
err = en.WriteString(z.Realname)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Server) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "Name"
o = append(o, 0x88, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Host"
o = append(o, 0xa4, 0x48, 0x6f, 0x73, 0x74)
o = msgp.AppendString(o, z.Host)
// string "Port"
o = append(o, 0xa4, 0x50, 0x6f, 0x72, 0x74)
o = msgp.AppendString(o, z.Port)
// string "TLS"
o = append(o, 0xa3, 0x54, 0x4c, 0x53)
o = msgp.AppendBool(o, z.TLS)
// string "Password"
o = append(o, 0xa8, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64)
o = msgp.AppendString(o, z.Password)
// string "Nick"
o = append(o, 0xa4, 0x4e, 0x69, 0x63, 0x6b)
o = msgp.AppendString(o, z.Nick)
// string "Username"
o = append(o, 0xa8, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Username)
// string "Realname"
o = append(o, 0xa8, 0x52, 0x65, 0x61, 0x6c, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Realname)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Server) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var isz uint32
isz, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for isz > 0 {
isz--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Host":
z.Host, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Port":
z.Port, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "TLS":
z.TLS, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
return
}
case "Password":
z.Password, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Nick":
z.Nick, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Username":
z.Username, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
case "Realname":
z.Realname, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
func (z *Server) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 5 + msgp.StringPrefixSize + len(z.Host) + 5 + msgp.StringPrefixSize + len(z.Port) + 4 + msgp.BoolSize + 9 + msgp.StringPrefixSize + len(z.Password) + 5 + msgp.StringPrefixSize + len(z.Nick) + 9 + msgp.StringPrefixSize + len(z.Username) + 9 + msgp.StringPrefixSize + len(z.Realname)
return
}
// DecodeMsg implements msgp.Decodable
func (z *User) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var isz uint32
isz, err = dc.ReadMapHeader()
if err != nil {
return
}
for isz > 0 {
isz--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadUint64()
if err != nil {
return
}
case "Username":
z.Username, err = dc.ReadString()
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z User) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "ID"
err = en.Append(0x82, 0xa2, 0x49, 0x44)
if err != nil {
return err
}
err = en.WriteUint64(z.ID)
if err != nil {
return
}
// write "Username"
err = en.Append(0xa8, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return err
}
err = en.WriteString(z.Username)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z User) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "ID"
o = append(o, 0x82, 0xa2, 0x49, 0x44)
o = msgp.AppendUint64(o, z.ID)
// string "Username"
o = append(o, 0xa8, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Username)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *User) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var isz uint32
isz, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for isz > 0 {
isz--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return
}
case "Username":
z.Username, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
func (z User) Msgsize() (s int) {
s = 1 + 3 + msgp.Uint64Size + 9 + msgp.StringPrefixSize + len(z.Username)
return
}

464
storage/schema_gen_test.go Normal file
View File

@ -0,0 +1,464 @@
package storage
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import (
"bytes"
"testing"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalChannel(t *testing.T) {
v := Channel{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgChannel(b *testing.B) {
v := Channel{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgChannel(b *testing.B) {
v := Channel{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalChannel(b *testing.B) {
v := Channel{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeChannel(t *testing.T) {
v := Channel{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
}
vn := Channel{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeChannel(b *testing.B) {
v := Channel{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeChannel(b *testing.B) {
v := Channel{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMessage(t *testing.T) {
v := Message{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMessage(b *testing.B) {
v := Message{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMessage(b *testing.B) {
v := Message{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMessage(b *testing.B) {
v := Message{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMessage(t *testing.T) {
v := Message{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
}
vn := Message{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMessage(b *testing.B) {
v := Message{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMessage(b *testing.B) {
v := Message{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalServer(t *testing.T) {
v := Server{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgServer(b *testing.B) {
v := Server{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgServer(b *testing.B) {
v := Server{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalServer(b *testing.B) {
v := Server{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeServer(t *testing.T) {
v := Server{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
}
vn := Server{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeServer(b *testing.B) {
v := Server{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeServer(b *testing.B) {
v := Server{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalUser(t *testing.T) {
v := User{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgUser(b *testing.B) {
v := User{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgUser(b *testing.B) {
v := User{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalUser(b *testing.B) {
v := User{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeUser(t *testing.T) {
v := User{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
}
vn := User{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeUser(b *testing.B) {
v := User{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeUser(b *testing.B) {
v := User{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -2,54 +2,21 @@ package storage
import (
"bytes"
"crypto/tls"
"encoding/json"
"strconv"
"sync"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/blevesearch/bleve"
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/boltdb/bolt"
)
type User struct {
ID uint64
Username string
id []byte
messageLog *bolt.DB
messageIndex bleve.Index
certificate *tls.Certificate
lock sync.Mutex
}
type Server struct {
Name string `json:"name"`
Host string `json:"host"`
Port string `json:"port,omitempty"`
TLS bool `json:"tls"`
Password string `json:"password,omitempty"`
Nick string `json:"nick"`
Username string `json:"username"`
Realname string `json:"realname"`
}
type Channel struct {
Server string `json:"server"`
Name string `json:"name"`
Users []string `json:"users,omitempty"`
Topic string `json:"topic,omitempty"`
}
func NewUser() (*User, error) {
user := &User{}
err := db.Update(func(tx *bolt.Tx) error {
err := db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketUsers)
user.ID, _ = b.NextSequence()
user.Username = strconv.FormatUint(user.ID, 10)
data, err := json.Marshal(user)
data, err := user.MarshalMsg(nil)
if err != nil {
return err
}
@ -108,8 +75,8 @@ func (u *User) GetServers() []Server {
c := tx.Bucket(bucketServers).Cursor()
for k, v := c.Seek(u.id); bytes.HasPrefix(k, u.id); k, v = c.Next() {
var server Server
json.Unmarshal(v, &server)
server := Server{}
server.UnmarshalMsg(v)
servers = append(servers, server)
}
@ -126,8 +93,8 @@ func (u *User) GetChannels() []Channel {
c := tx.Bucket(bucketChannels).Cursor()
for k, v := c.Seek(u.id); bytes.HasPrefix(k, u.id); k, v = c.Next() {
var channel Channel
json.Unmarshal(v, &channel)
channel := Channel{}
channel.UnmarshalMsg(v)
channels = append(channels, channel)
}
@ -138,9 +105,9 @@ func (u *User) GetChannels() []Channel {
}
func (u *User) AddServer(server Server) {
db.Update(func(tx *bolt.Tx) error {
db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketServers)
data, _ := json.Marshal(server)
data, _ := server.MarshalMsg(nil)
b.Put(u.serverID(server.Host), data)
@ -149,9 +116,9 @@ func (u *User) AddServer(server Server) {
}
func (u *User) AddChannel(channel Channel) {
db.Update(func(tx *bolt.Tx) error {
db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketChannels)
data, _ := json.Marshal(channel)
data, _ := channel.MarshalMsg(nil)
b.Put(u.channelID(channel.Server, channel.Name), data)
@ -160,15 +127,15 @@ func (u *User) AddChannel(channel Channel) {
}
func (u *User) SetNick(nick, address string) {
db.Update(func(tx *bolt.Tx) error {
db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketServers)
id := u.serverID(address)
var server Server
json.Unmarshal(b.Get(id), &server)
server := Server{}
server.UnmarshalMsg(b.Get(id))
server.Nick = nick
data, _ := json.Marshal(server)
data, _ := server.MarshalMsg(nil)
b.Put(id, data)
return nil
@ -176,7 +143,7 @@ func (u *User) SetNick(nick, address string) {
}
func (u *User) RemoveServer(address string) {
db.Update(func(tx *bolt.Tx) error {
db.Batch(func(tx *bolt.Tx) error {
serverID := u.serverID(address)
tx.Bucket(bucketServers).Delete(serverID)
@ -192,7 +159,7 @@ func (u *User) RemoveServer(address string) {
}
func (u *User) RemoveChannel(server, channel string) {
db.Update(func(tx *bolt.Tx) error {
db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketChannels)
id := u.channelID(server, channel)

View File

@ -1,7 +1,6 @@
package storage
import (
"encoding/json"
"os"
"strconv"
"strings"
@ -11,15 +10,6 @@ import (
"github.com/khlieng/dispatch/Godeps/_workspace/src/github.com/boltdb/bolt"
)
type Message struct {
ID uint64 `json:"id"`
Server string `json:"server"`
From string `json:"from"`
To string `json:"to,omitempty"`
Content string `json:"content"`
Time int64 `json:"time"`
}
func (u *User) LogMessage(server, from, to, content string) error {
message := Message{
Server: server,
@ -38,7 +28,7 @@ func (u *User) LogMessage(server, from, to, content string) error {
message.ID, _ = b.NextSequence()
data, err := json.Marshal(message)
data, err := message.MarshalMsg(nil)
if err != nil {
return err
}
@ -66,7 +56,7 @@ func (u *User) GetLastMessages(server, channel string, count int) ([]Message, er
for _, v := c.Last(); count > 0 && v != nil; _, v = c.Prev() {
count--
json.Unmarshal(v, &messages[count])
messages[count].UnmarshalMsg(v)
}
return nil
@ -93,7 +83,7 @@ func (u *User) GetMessages(server, channel string, count int, fromID uint64) ([]
for k, v := c.Prev(); count > 0 && k != nil; k, v = c.Prev() {
count--
json.Unmarshal(v, &messages[count])
messages[count].UnmarshalMsg(v)
}
return nil
@ -131,8 +121,8 @@ func (u *User) SearchMessages(server, channel, phrase string) ([]Message, error)
bc := b.Bucket([]byte(hit.ID[:idx]))
id, _ := strconv.ParseUint(hit.ID[idx+1:], 10, 64)
var message Message
json.Unmarshal(bc.Get(idToBytes(id)), &message)
message := Message{}
message.UnmarshalMsg(bc.Get(idToBytes(id)))
messages = append(messages, message)
}

View File

@ -102,6 +102,8 @@ func TestMessages(t *testing.T) {
assert.Len(t, messages, 5)
messages, err = user.GetMessages("irc.freenode.net", "#go-nuts", 10, 100)
assert.Equal(t, "message0", messages[0].Content)
assert.Equal(t, "message4", messages[4].Content)
assert.Nil(t, err)
assert.Len(t, messages, 5)