Use easyjson

This commit is contained in:
Ken-Håvard Lieng 2018-05-25 23:54:36 +02:00
parent e7cff1686e
commit 09d57b7023
25 changed files with 6167 additions and 122 deletions

File diff suppressed because one or more lines are too long

View File

@ -74,7 +74,11 @@
"prettier:all": "prettier --write {.*,*.js,src/**/*.js,src/css/*.css}",
"test": "jest",
"test:verbose": "jest --verbose",
"test:watch": "jest --watch"
"test:watch": "jest --watch",
"gen:install": "go get -u github.com/andyleap/gencode github.com/mailru/easyjson/... github.com/SlinSo/egon/cmd/egon",
"gen:binary": "gencode go -package storage -schema ../storage/storage.schema -unsafe",
"gen:json": "easyjson -all -lower_camel_case -omit_empty ../server/json.go ../server/index_data.go",
"gen:template": "egon -s -m ../server"
},
"jest": {
"moduleNameMapper": {

View File

@ -170,12 +170,17 @@ export default createReducer(
[actions.socket.MODE](state, { server, channel, user, remove, add }) {
const u = find(state[server][channel].users, v => v.nick === user);
if (u) {
let j = remove.length;
while (j--) {
u.mode = u.mode.replace(remove[j], '');
if (remove) {
let j = remove.length;
while (j--) {
u.mode = u.mode.replace(remove[j], '');
}
}
if (add) {
u.mode += add;
}
u.mode += add;
u.renderName = getRenderName(u);
}
},

View File

@ -88,6 +88,14 @@ export default createReducer(
})
)
);
},
[actions.socket.SERVERS](state, { data }) {
if (data) {
data.forEach(({ host }) => {
state[host] = {};
});
}
}
}
);

View File

@ -10,7 +10,7 @@ export const getSearch = state => state.search;
export default createReducer(initialState, {
[actions.socket.SEARCH](state, { results }) {
state.results = results;
state.results = results || [];
},
[actions.TOGGLE_SEARCH](state) {

View File

@ -48,8 +48,6 @@ export default createReducer(
}
};
}
return state;
},
[actions.DISCONNECT](state, { server }) {

View File

@ -1,6 +1,6 @@
<%! data *indexData, cssPath, jsPath string %>
<%% import "encoding/json" %%>
<%% import "github.com/mailru/easyjson" %%>
<!DOCTYPE html>
<html lang="en">
@ -15,7 +15,7 @@
</head>
<body>
<div id="root"></div>
<script id="env" type="application/json"><% json.NewEncoder(w).Encode(data) %></script>
<script id="env" type="application/json"><% easyjson.MarshalToWriter(data, w) %></script>
<script src="/<%== jsPath %>"></script>
</body>
</html>

View File

@ -4,14 +4,14 @@
package server
import (
"io"
"encoding/json"
"github.com/mailru/easyjson"
)
func IndexTemplate(w io.Writer, data *indexData, cssPath, jsPath string) error {
io.WriteString(w, "<!DOCTYPE html><html lang=\"en\"><head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"><title>Dispatch</title><link href=\"/")
io.WriteString(w, cssPath )
io.WriteString(w, "\" rel=\"stylesheet\"><link rel=\"icon\" href=\"data:;base64,=\"></head><body><div id=\"root\"></div><script id=\"env\" type=\"application/json\">")
json.NewEncoder(w).Encode(data)
easyjson.MarshalToWriter(data, w)
io.WriteString(w, "</script><script src=\"/")
io.WriteString(w, jsPath )
io.WriteString(w, "\"></script></body></html>")

View File

@ -0,0 +1,479 @@
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
package server
import (
json "encoding/json"
storage "github.com/khlieng/dispatch/storage"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
// suppress unused package warning
var (
_ *json.RawMessage
_ *jlexer.Lexer
_ *jwriter.Writer
_ easyjson.Marshaler
)
func easyjson7e607aefDecodeGithubComKhliengDispatchServer(in *jlexer.Lexer, out *indexData) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeString()
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "defaults":
if data := in.Raw(); in.Ok() {
in.AddError((out.Defaults).UnmarshalJSON(data))
}
case "servers":
if in.IsNull() {
in.Skip()
out.Servers = nil
} else {
in.Delim('[')
if out.Servers == nil {
if !in.IsDelim(']') {
out.Servers = make([]Server, 0, 1)
} else {
out.Servers = []Server{}
}
} else {
out.Servers = (out.Servers)[:0]
}
for !in.IsDelim(']') {
var v1 Server
if data := in.Raw(); in.Ok() {
in.AddError((v1).UnmarshalJSON(data))
}
out.Servers = append(out.Servers, v1)
in.WantComma()
}
in.Delim(']')
}
case "channels":
if in.IsNull() {
in.Skip()
out.Channels = nil
} else {
in.Delim('[')
if out.Channels == nil {
if !in.IsDelim(']') {
out.Channels = make([]storage.Channel, 0, 1)
} else {
out.Channels = []storage.Channel{}
}
} else {
out.Channels = (out.Channels)[:0]
}
for !in.IsDelim(']') {
var v2 storage.Channel
easyjson7e607aefDecodeGithubComKhliengDispatchStorage(in, &v2)
out.Channels = append(out.Channels, v2)
in.WantComma()
}
in.Delim(']')
}
case "users":
if in.IsNull() {
in.Skip()
out.Users = nil
} else {
if out.Users == nil {
out.Users = new(Userlist)
}
if data := in.Raw(); in.Ok() {
in.AddError((*out.Users).UnmarshalJSON(data))
}
}
case "messages":
if in.IsNull() {
in.Skip()
out.Messages = nil
} else {
if out.Messages == nil {
out.Messages = new(Messages)
}
if data := in.Raw(); in.Ok() {
in.AddError((*out.Messages).UnmarshalJSON(data))
}
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson7e607aefEncodeGithubComKhliengDispatchServer(out *jwriter.Writer, in indexData) {
out.RawByte('{')
first := true
_ = first
if true {
const prefix string = ",\"defaults\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Defaults).MarshalJSON())
}
if len(in.Servers) != 0 {
const prefix string = ",\"servers\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
{
out.RawByte('[')
for v3, v4 := range in.Servers {
if v3 > 0 {
out.RawByte(',')
}
out.Raw((v4).MarshalJSON())
}
out.RawByte(']')
}
}
if len(in.Channels) != 0 {
const prefix string = ",\"channels\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
{
out.RawByte('[')
for v5, v6 := range in.Channels {
if v5 > 0 {
out.RawByte(',')
}
easyjson7e607aefEncodeGithubComKhliengDispatchStorage(out, v6)
}
out.RawByte(']')
}
}
if in.Users != nil {
const prefix string = ",\"users\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((*in.Users).MarshalJSON())
}
if in.Messages != nil {
const prefix string = ",\"messages\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((*in.Messages).MarshalJSON())
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v indexData) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson7e607aefEncodeGithubComKhliengDispatchServer(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v indexData) MarshalEasyJSON(w *jwriter.Writer) {
easyjson7e607aefEncodeGithubComKhliengDispatchServer(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *indexData) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson7e607aefDecodeGithubComKhliengDispatchServer(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *indexData) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson7e607aefDecodeGithubComKhliengDispatchServer(l, v)
}
func easyjson7e607aefDecodeGithubComKhliengDispatchStorage(in *jlexer.Lexer, out *storage.Channel) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeString()
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "server":
out.Server = string(in.String())
case "name":
out.Name = string(in.String())
case "topic":
out.Topic = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson7e607aefEncodeGithubComKhliengDispatchStorage(out *jwriter.Writer, in storage.Channel) {
out.RawByte('{')
first := true
_ = first
if in.Server != "" {
const prefix string = ",\"server\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Server))
}
if in.Name != "" {
const prefix string = ",\"name\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Name))
}
if in.Topic != "" {
const prefix string = ",\"topic\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Topic))
}
out.RawByte('}')
}
func easyjson7e607aefDecodeGithubComKhliengDispatchServer1(in *jlexer.Lexer, out *connectDefaults) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeString()
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "name":
out.Name = string(in.String())
case "host":
out.Host = string(in.String())
case "port":
out.Port = int(in.Int())
case "channels":
if in.IsNull() {
in.Skip()
out.Channels = nil
} else {
in.Delim('[')
if out.Channels == nil {
if !in.IsDelim(']') {
out.Channels = make([]string, 0, 4)
} else {
out.Channels = []string{}
}
} else {
out.Channels = (out.Channels)[:0]
}
for !in.IsDelim(']') {
var v7 string
v7 = string(in.String())
out.Channels = append(out.Channels, v7)
in.WantComma()
}
in.Delim(']')
}
case "password":
out.Password = bool(in.Bool())
case "ssl":
out.SSL = bool(in.Bool())
case "readonly":
out.ReadOnly = bool(in.Bool())
case "showDetails":
out.ShowDetails = bool(in.Bool())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson7e607aefEncodeGithubComKhliengDispatchServer1(out *jwriter.Writer, in connectDefaults) {
out.RawByte('{')
first := true
_ = first
if in.Name != "" {
const prefix string = ",\"name\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Name))
}
if in.Host != "" {
const prefix string = ",\"host\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Host))
}
if in.Port != 0 {
const prefix string = ",\"port\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int(int(in.Port))
}
if len(in.Channels) != 0 {
const prefix string = ",\"channels\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
{
out.RawByte('[')
for v8, v9 := range in.Channels {
if v8 > 0 {
out.RawByte(',')
}
out.String(string(v9))
}
out.RawByte(']')
}
}
if in.Password {
const prefix string = ",\"password\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.Password))
}
if in.SSL {
const prefix string = ",\"ssl\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.SSL))
}
if in.ReadOnly {
const prefix string = ",\"readonly\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.ReadOnly))
}
if in.ShowDetails {
const prefix string = ",\"showDetails\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.ShowDetails))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v connectDefaults) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson7e607aefEncodeGithubComKhliengDispatchServer1(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v connectDefaults) MarshalEasyJSON(w *jwriter.Writer) {
easyjson7e607aefEncodeGithubComKhliengDispatchServer1(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *connectDefaults) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson7e607aefDecodeGithubComKhliengDispatchServer1(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *connectDefaults) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson7e607aefDecodeGithubComKhliengDispatchServer1(l, v)
}

View File

@ -2,42 +2,43 @@ package server
import (
"crypto/x509"
"encoding/json"
"github.com/mailru/easyjson"
"github.com/khlieng/dispatch/irc"
"github.com/khlieng/dispatch/storage"
)
type WSRequest struct {
Type string `json:"type"`
Data json.RawMessage `json:"data"`
Type string
Data easyjson.RawMessage
}
type WSResponse struct {
Type string `json:"type"`
Data interface{} `json:"data"`
Type string
Data interface{}
}
type Server struct {
storage.Server
Status ConnectionUpdate `json:"status"`
Status ConnectionUpdate
}
type ServerName struct {
Server string `json:"server"`
Name string `json:"name"`
Server string
Name string
}
type ReconnectSettings struct {
Server string `json:"server"`
SkipVerify bool `json:"skipVerify"`
Server string
SkipVerify bool
}
type ConnectionUpdate struct {
Server string `json:"server"`
Connected bool `json:"connected"`
Error string `json:"error,omitempty"`
ErrorType string `json:"errorType,omitempty"`
Server string
Connected bool
Error string
ErrorType string
}
func newConnectionUpdate(server string, state irc.ConnectionState) ConnectionUpdate {
@ -55,139 +56,139 @@ func newConnectionUpdate(server string, state irc.ConnectionState) ConnectionUpd
}
type Nick struct {
Server string `json:"server"`
Old string `json:"oldNick"`
New string `json:"newNick"`
Server string
Old string `json:"oldNick,omitempty"`
New string `json:"newNick,omitempty"`
}
type NickFail struct {
Server string `json:"server"`
Server string
}
type Join struct {
Server string `json:"server"`
User string `json:"user"`
Channels []string `json:"channels"`
Server string
User string
Channels []string
}
type Part struct {
Server string `json:"server"`
User string `json:"user"`
Channel string `json:"channel,omitempty"`
Channels []string `json:"channels,omitempty"`
Reason string `json:"reason,omitempty"`
Server string
User string
Channel string
Channels []string
Reason string
}
type Mode struct {
Server string `json:"server"`
Channel string `json:"channel"`
User string `json:"user"`
Add string `json:"add"`
Remove string `json:"remove"`
Server string
Channel string
User string
Add string
Remove string
}
type Quit struct {
Server string `json:"server"`
User string `json:"user"`
Reason string `json:"reason,omitempty"`
Server string
User string
Reason string
}
type Message struct {
ID string `json:"id,omitempty"`
Server string `json:"server,omitempty"`
From string `json:"from,omitempty"`
To string `json:"to,omitempty"`
Content string `json:"content"`
Type string `json:"type,omitempty"`
ID string
Server string
From string
To string
Content string
Type string
}
type Messages struct {
Server string `json:"server"`
To string `json:"to"`
Messages []storage.Message `json:"messages"`
Prepend bool `json:"prepend,omitempty"`
Next string `json:"next,omitempty"`
Server string
To string
Messages []storage.Message
Prepend bool
Next string
}
type Topic struct {
Server string `json:"server"`
Channel string `json:"channel"`
Topic string `json:"topic,omitempty"`
Nick string `json:"nick,omitempty"`
Server string
Channel string
Topic string
Nick string
}
type Userlist struct {
Server string `json:"server"`
Channel string `json:"channel"`
Users []string `json:"users"`
Server string
Channel string
Users []string
}
type MOTD struct {
Server string `json:"server"`
Title string `json:"title"`
Content []string `json:"content"`
Server string
Title string
Content []string
}
type Invite struct {
Server string `json:"server"`
Channel string `json:"channel"`
User string `json:"user"`
Server string
Channel string
User string
}
type Kick struct {
Server string `json:"server"`
Channel string `json:"channel"`
User string `json:"user"`
Server string
Channel string
User string
}
type Whois struct {
Server string `json:"server"`
User string `json:"user"`
Server string
User string
}
type WhoisReply struct {
Nick string `json:"nick"`
Username string `json:"username"`
Host string `json:"host"`
Realname string `json:"realname"`
Server string `json:"server"`
Channels []string `json:"channels"`
Nick string
Username string
Host string
Realname string
Server string
Channels []string
}
type Away struct {
Server string `json:"server"`
Message string `json:"message"`
Server string
Message string
}
type Raw struct {
Server string `json:"server"`
Message string `json:"message"`
Server string
Message string
}
type SearchRequest struct {
Server string `json:"server"`
Channel string `json:"channel"`
Phrase string `json:"phrase"`
Server string
Channel string
Phrase string
}
type SearchResult struct {
Server string `json:"server"`
Channel string `json:"channel"`
Results []storage.Message `json:"results"`
Server string
Channel string
Results []storage.Message
}
type ClientCert struct {
Cert []byte `json:"cert"`
Key []byte `json:"key"`
Cert []byte
Key []byte
}
type FetchMessages struct {
Server string `json:"server"`
Channel string `json:"channel"`
Next string `json:"next"`
Server string
Channel string
Next string
}
type Error struct {
Server string `json:"server"`
Message string `json:"message"`
Server string
Message string
}

3095
server/json_easyjson.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ import (
"time"
"github.com/gorilla/websocket"
"github.com/mailru/easyjson"
)
type wsConn struct {
@ -31,10 +32,10 @@ func (c *wsConn) send() {
return
}
err = c.conn.WriteJSON(res)
err = c.writeJSON(res)
case <-ping:
err = c.conn.WriteJSON(WSResponse{Type: "ping"})
err = c.writeJSON(WSResponse{Type: "ping"})
}
if err != nil {
@ -47,7 +48,7 @@ func (c *wsConn) recv() {
var req WSRequest
for {
err := c.conn.ReadJSON(&req)
err := c.readJSON(&req)
if err != nil {
close(c.in)
return
@ -61,3 +62,25 @@ func (c *wsConn) close() {
close(c.out)
c.conn.Close()
}
func (c *wsConn) readJSON(v easyjson.Unmarshaler) error {
_, r, err := c.conn.NextReader()
if err != nil {
return err
}
return easyjson.UnmarshalFromReader(r, v)
}
func (c *wsConn) writeJSON(v easyjson.Marshaler) error {
w, err := c.conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
_, err1 := easyjson.MarshalToWriter(v, w)
err2 := w.Close()
if err1 != nil {
return err1
}
return err2
}

View File

@ -1,7 +1,6 @@
package server
import (
"encoding/json"
"log"
"net/http"
"strings"
@ -82,7 +81,7 @@ func (h *wsHandler) init(r *http.Request) {
func (h *wsHandler) connect(b []byte) {
var data Server
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if _, ok := h.session.getIRC(data.Host); !ok {
log.Println(h.addr, "[IRC] Add server", data.Host)
@ -97,7 +96,7 @@ func (h *wsHandler) connect(b []byte) {
func (h *wsHandler) reconnect(b []byte) {
var data ReconnectSettings
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok && !i.Connected() {
if i.TLS {
@ -109,7 +108,7 @@ func (h *wsHandler) reconnect(b []byte) {
func (h *wsHandler) join(b []byte) {
var data Join
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Join(data.Channels...)
@ -118,7 +117,7 @@ func (h *wsHandler) join(b []byte) {
func (h *wsHandler) part(b []byte) {
var data Part
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Part(data.Channels...)
@ -127,7 +126,7 @@ func (h *wsHandler) part(b []byte) {
func (h *wsHandler) quit(b []byte) {
var data Quit
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
log.Println(h.addr, "[IRC] Remove server", data.Server)
if i, ok := h.session.getIRC(data.Server); ok {
@ -140,7 +139,7 @@ func (h *wsHandler) quit(b []byte) {
func (h *wsHandler) message(b []byte) {
var data Message
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Privmsg(data.To, data.Content)
@ -152,7 +151,7 @@ func (h *wsHandler) message(b []byte) {
func (h *wsHandler) nick(b []byte) {
var data Nick
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Nick(data.New)
@ -161,7 +160,7 @@ func (h *wsHandler) nick(b []byte) {
func (h *wsHandler) topic(b []byte) {
var data Topic
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Topic(data.Channel, data.Topic)
@ -170,7 +169,7 @@ func (h *wsHandler) topic(b []byte) {
func (h *wsHandler) invite(b []byte) {
var data Invite
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Invite(data.User, data.Channel)
@ -179,7 +178,7 @@ func (h *wsHandler) invite(b []byte) {
func (h *wsHandler) kick(b []byte) {
var data Invite
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Kick(data.Channel, data.User)
@ -188,7 +187,7 @@ func (h *wsHandler) kick(b []byte) {
func (h *wsHandler) whois(b []byte) {
var data Whois
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Whois(data.User)
@ -197,7 +196,7 @@ func (h *wsHandler) whois(b []byte) {
func (h *wsHandler) away(b []byte) {
var data Away
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Away(data.Message)
@ -206,7 +205,7 @@ func (h *wsHandler) away(b []byte) {
func (h *wsHandler) raw(b []byte) {
var data Raw
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if i, ok := h.session.getIRC(data.Server); ok {
i.Write(data.Message)
@ -216,7 +215,7 @@ func (h *wsHandler) raw(b []byte) {
func (h *wsHandler) search(b []byte) {
go func() {
var data SearchRequest
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
results, err := h.session.user.SearchMessages(data.Server, data.Channel, data.Phrase)
if err != nil {
@ -234,7 +233,7 @@ func (h *wsHandler) search(b []byte) {
func (h *wsHandler) cert(b []byte) {
var data ClientCert
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
err := h.session.user.SetCertificate(data.Cert, data.Key)
if err != nil {
@ -247,14 +246,14 @@ func (h *wsHandler) cert(b []byte) {
func (h *wsHandler) fetchMessages(b []byte) {
var data FetchMessages
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
h.session.sendMessages(data.Server, data.Channel, 200, data.Next)
}
func (h *wsHandler) setServerName(b []byte) {
var data ServerName
json.Unmarshal(b, &data)
data.UnmarshalJSON(b)
if isValidServerName(data.Name) {
h.session.user.SetServerName(data.Name, data.Server)

7
vendor/github.com/mailru/easyjson/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2016 Mail.Ru Group
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

60
vendor/github.com/mailru/easyjson/Makefile generated vendored Normal file
View File

@ -0,0 +1,60 @@
PKG=github.com/mailru/easyjson
GOPATH:=$(PWD)/.root:$(GOPATH)
export GOPATH
all: test
.root/src/$(PKG):
mkdir -p $@
for i in $$PWD/* ; do ln -s $$i $@/`basename $$i` ; done
root: .root/src/$(PKG)
clean:
rm -rf .root
rm -rf tests/*_easyjson.go
build:
go build -i -o .root/bin/easyjson $(PKG)/easyjson
generate: root build
.root/bin/easyjson -stubs \
.root/src/$(PKG)/tests/snake.go \
.root/src/$(PKG)/tests/data.go \
.root/src/$(PKG)/tests/omitempty.go \
.root/src/$(PKG)/tests/nothing.go \
.root/src/$(PKG)/tests/named_type.go \
.root/src/$(PKG)/tests/custom_map_key_type.go \
.root/src/$(PKG)/tests/embedded_type.go
.root/bin/easyjson -all .root/src/$(PKG)/tests/data.go
.root/bin/easyjson -all .root/src/$(PKG)/tests/nothing.go
.root/bin/easyjson -all .root/src/$(PKG)/tests/errors.go
.root/bin/easyjson -snake_case .root/src/$(PKG)/tests/snake.go
.root/bin/easyjson -omit_empty .root/src/$(PKG)/tests/omitempty.go
.root/bin/easyjson -build_tags=use_easyjson .root/src/$(PKG)/benchmark/data.go
.root/bin/easyjson .root/src/$(PKG)/tests/nested_easy.go
.root/bin/easyjson .root/src/$(PKG)/tests/named_type.go
.root/bin/easyjson .root/src/$(PKG)/tests/custom_map_key_type.go
.root/bin/easyjson .root/src/$(PKG)/tests/embedded_type.go
test: generate root
go test \
$(PKG)/tests \
$(PKG)/jlexer \
$(PKG)/gen \
$(PKG)/buffer
go test -benchmem -tags use_easyjson -bench . $(PKG)/benchmark
golint -set_exit_status .root/src/$(PKG)/tests/*_easyjson.go
bench-other: generate root
@go test -benchmem -bench . $(PKG)/benchmark
@go test -benchmem -tags use_ffjson -bench . $(PKG)/benchmark
@go test -benchmem -tags use_jsoniter -bench . $(PKG)/benchmark
@go test -benchmem -tags use_codec -bench . $(PKG)/benchmark
bench-python:
benchmark/ujson.sh
.PHONY: root clean generate test build

331
vendor/github.com/mailru/easyjson/README.md generated vendored Normal file
View File

@ -0,0 +1,331 @@
# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson)
Package easyjson provides a fast and easy way to marshal/unmarshal Go structs
to/from JSON without the use of reflection. In performance tests, easyjson
outperforms the standard `encoding/json` package by a factor of 4-5x, and other
JSON encoding packages by a factor of 2-3x.
easyjson aims to keep generated Go code simple enough so that it can be easily
optimized or fixed. Another goal is to provide users with the ability to
customize the generated code by providing options not available with the
standard `encoding/json` package, such as generating "snake_case" names or
enabling `omitempty` behavior by default.
## Usage
```sh
# install
go get -u github.com/mailru/easyjson/...
# run
easyjson -all <file>.go
```
The above will generate `<file>_easyjson.go` containing the appropriate marshaler and
unmarshaler funcs for all structs contained in `<file>.go`.
Please note that easyjson requires a full Go build environment and the `GOPATH`
environment variable to be set. This is because easyjson code generation
invokes `go run` on a temporary file (an approach to code generation borrowed
from [ffjson](https://github.com/pquerna/ffjson)).
## Options
```txt
Usage of easyjson:
-all
generate marshaler/unmarshalers for all structs in a file
-build_tags string
build tags to add to generated file
-leave_temps
do not delete temporary files
-no_std_marshalers
don't generate MarshalJSON/UnmarshalJSON funcs
-noformat
do not run 'gofmt -w' on output file
-omit_empty
omit empty fields by default
-output_filename string
specify the filename of the output
-pkg
process the whole package instead of just the given file
-snake_case
use snake_case names instead of CamelCase by default
-lower_camel_case
use lowerCamelCase instead of CamelCase by default
-stubs
only generate stubs for marshaler/unmarshaler funcs
```
Using `-all` will generate marshalers/unmarshalers for all Go structs in the
file. If `-all` is not provided, then only those structs whose preceding
comment starts with `easyjson:json` will have marshalers/unmarshalers
generated. For example:
```go
//easyjson:json
type A struct {}
```
Additional option notes:
* `-snake_case` tells easyjson to generate snake\_case field names by default
(unless overridden by a field tag). The CamelCase to snake\_case conversion
algorithm should work in most cases (ie, HTTPVersion will be converted to
"http_version").
* `-build_tags` will add the specified build tags to generated Go sources.
## Generated Marshaler/Unmarshaler Funcs
For Go struct types, easyjson generates the funcs `MarshalEasyJSON` /
`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify
the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in
conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary
reflection / type assertions during marshaling/unmarshaling to/from JSON for Go
structs.
easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct
types compatible with the standard `json.Marshaler` and `json.Unmarshaler`
interfaces. Please be aware that using the standard `json.Marshal` /
`json.Unmarshal` for marshaling/unmarshaling will incur a significant
performance penalty when compared to using `easyjson.Marshal` /
`easyjson.Unmarshal`.
Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and
`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers
and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter`
which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc
listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of
utility funcs that are available.
## Controlling easyjson Marshaling and Unmarshaling Behavior
Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs
that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces.
These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined
for a Go type.
Go types can also satisify the `easyjson.Optional` interface, which allows the
type to define its own `omitempty` logic.
## Type Wrappers
easyjson provides additional type wrappers defined in the `easyjson/opt`
package. These wrap the standard Go primitives and in turn satisify the
easyjson interfaces.
The `easyjson/opt` type wrappers are useful when needing to distinguish between
a missing value and/or when needing to specifying a default value. Type
wrappers allow easyjson to avoid additional pointers and heap allocations and
can significantly increase performance when used properly.
## Memory Pooling
easyjson uses a buffer pool that allocates data in increasing chunks from 128
to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of
`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory
allocation and to allow larger reusable buffers.
easyjson's custom allocation buffer pool is defined in the `easyjson/buffer`
package, and the default behavior pool behavior can be modified (if necessary)
through a call to `buffer.Init()` prior to any marshaling or unmarshaling.
Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer)
for more information.
## Issues, Notes, and Limitations
* easyjson is still early in its development. As such, there are likely to be
bugs and missing features when compared to `encoding/json`. In the case of a
missing feature or bug, please create a GitHub issue. Pull requests are
welcome!
* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive
matching is not currently provided due to the significant performance hit
when doing case-insensitive key matching. In the future, case-insensitive
object key matching may be provided via an option to the generator.
* easyjson makes use of `unsafe`, which simplifies the code and
provides significant performance benefits by allowing no-copy
conversion from `[]byte` to `string`. That said, `unsafe` is used
only when unmarshaling and parsing JSON, and any `unsafe` operations
/ memory allocations done will be safely deallocated by
easyjson. Set the build tag `easyjson_nounsafe` to compile it
without `unsafe`.
* easyjson is compatible with Google App Engine. The `appengine` build
tag (set by App Engine's environment) will automatically disable the
use of `unsafe`, which is not allowed in App Engine's Standard
Environment. Note that the use with App Engine is still experimental.
* Floats are formatted using the default precision from Go's `strconv` package.
As such, easyjson will not correctly handle high precision floats when
marshaling/unmarshaling JSON. Note, however, that there are very few/limited
uses where this behavior is not sufficient for general use. That said, a
different package may be needed if precise marshaling/unmarshaling of high
precision floats to/from JSON is required.
* While unmarshaling, the JSON parser does the minimal amount of work needed to
skip over unmatching parens, and as such full validation is not done for the
entire JSON value being unmarshaled/parsed.
* Currently there is no true streaming support for encoding/decoding as
typically for many uses/protocols the final, marshaled length of the JSON
needs to be known prior to sending the data. Currently this is not possible
with easyjson's architecture.
## Benchmarks
Most benchmarks were done using the example
[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets)
(9k after eliminating whitespace). This example is similar to real-world data,
is well-structured, and contains a healthy variety of different types, making
it ideal for JSON serialization benchmarks.
Note:
* For small request benchmarks, an 80 byte portion of the above example was
used.
* For large request marshaling benchmarks, a struct containing 50 regular
samples was used, making a ~500kB output JSON.
* Benchmarks are showing the results of easyjson's default behaviour,
which makes use of `unsafe`.
Benchmarks are available in the repository and can be run by invoking `make`.
### easyjson vs. encoding/json
easyjson is roughly 5-6 times faster than the standard `encoding/json` for
unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent
marshaling is 6-7x faster if marshaling to a writer.
### easyjson vs. ffjson
easyjson uses the same approach for JSON marshaling as
[ffjson](https://github.com/pquerna/ffjson), but takes a significantly
different approach to lexing and parsing JSON during unmarshaling. This means
easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for
non-concurrent unmarshaling.
As of this writing, `ffjson` seems to have issues when used concurrently:
specifically, large request pooling hurts `ffjson`'s performance and causes
scalability issues. These issues with `ffjson` can likely be fixed, but as of
writing remain outstanding/known issues with `ffjson`.
easyjson and `ffjson` have similar performance for small requests, however
easyjson outperforms `ffjson` by roughly 2-5x times for large requests when
used with a writer.
### easyjson vs. go/codec
[go/codec](https://github.com/ugorji/go) provides
compile-time helpers for JSON generation. In this case, helpers do not work
like marshalers as they are encoding-independent.
easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks
and about 3x faster for concurrent encoding (without marshaling to a writer).
In an attempt to measure marshaling performance of `go/codec` (as opposed to
allocations/memcpy/writer interface invocations), a benchmark was done with
resetting length of a byte slice rather than resetting the whole slice to nil.
However, the optimization in this exact form may not be applicable in practice,
since the memory is not freed between marshaling operations.
### easyjson vs 'ujson' python module
[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it
is interesting to see how plain golang compares to that. It is imporant to note
that the resulting object for python is slower to access, since the library
parses JSON object into dictionaries.
easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for
marshaling.
### Benchmark Results
`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6.
`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6.
#### Unmarshaling
| lib | json size | MB/s | allocs/op | B/op |
|:---------|:----------|-----:|----------:|------:|
| standard | regular | 22 | 218 | 10229 |
| standard | small | 9.7 | 14 | 720 |
| | | | | |
| easyjson | regular | 125 | 128 | 9794 |
| easyjson | small | 67 | 3 | 128 |
| | | | | |
| ffjson | regular | 66 | 141 | 9985 |
| ffjson | small | 17.6 | 10 | 488 |
| | | | | |
| codec | regular | 55 | 434 | 19299 |
| codec | small | 29 | 7 | 336 |
| | | | | |
| ujson | regular | 103 | N/A | N/A |
#### Marshaling, one goroutine.
| lib | json size | MB/s | allocs/op | B/op |
|:----------|:----------|-----:|----------:|------:|
| standard | regular | 75 | 9 | 23256 |
| standard | small | 32 | 3 | 328 |
| standard | large | 80 | 17 | 1.2M |
| | | | | |
| easyjson | regular | 213 | 9 | 10260 |
| easyjson* | regular | 263 | 8 | 742 |
| easyjson | small | 125 | 1 | 128 |
| easyjson | large | 212 | 33 | 490k |
| easyjson* | large | 262 | 25 | 2879 |
| | | | | |
| ffjson | regular | 122 | 153 | 21340 |
| ffjson** | regular | 146 | 152 | 4897 |
| ffjson | small | 36 | 5 | 384 |
| ffjson** | small | 64 | 4 | 128 |
| ffjson | large | 134 | 7317 | 818k |
| ffjson** | large | 125 | 7320 | 827k |
| | | | | |
| codec | regular | 80 | 17 | 33601 |
| codec*** | regular | 108 | 9 | 1153 |
| codec | small | 42 | 3 | 304 |
| codec*** | small | 56 | 1 | 48 |
| codec | large | 73 | 483 | 2.5M |
| codec*** | large | 103 | 451 | 66007 |
| | | | | |
| ujson | regular | 92 | N/A | N/A |
\* marshaling to a writer,
\*\* using `ffjson.Pool()`,
\*\*\* reusing output slice instead of resetting it to nil
#### Marshaling, concurrent.
| lib | json size | MB/s | allocs/op | B/op |
|:----------|:----------|-----:|----------:|------:|
| standard | regular | 252 | 9 | 23257 |
| standard | small | 124 | 3 | 328 |
| standard | large | 289 | 17 | 1.2M |
| | | | | |
| easyjson | regular | 792 | 9 | 10597 |
| easyjson* | regular | 1748 | 8 | 779 |
| easyjson | small | 333 | 1 | 128 |
| easyjson | large | 718 | 36 | 548k |
| easyjson* | large | 2134 | 25 | 4957 |
| | | | | |
| ffjson | regular | 301 | 153 | 21629 |
| ffjson** | regular | 707 | 152 | 5148 |
| ffjson | small | 62 | 5 | 384 |
| ffjson** | small | 282 | 4 | 128 |
| ffjson | large | 438 | 7330 | 1.0M |
| ffjson** | large | 131 | 7319 | 820k |
| | | | | |
| codec | regular | 183 | 17 | 33603 |
| codec*** | regular | 671 | 9 | 1157 |
| codec | small | 147 | 3 | 304 |
| codec*** | small | 299 | 1 | 48 |
| codec | large | 190 | 483 | 2.5M |
| codec*** | large | 752 | 451 | 77574 |
\* marshaling to a writer,
\*\* using `ffjson.Pool()`,
\*\*\* reusing output slice instead of resetting it to nil

270
vendor/github.com/mailru/easyjson/buffer/pool.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
// reduce copying and to allow reuse of individual chunks.
package buffer
import (
"io"
"sync"
)
// PoolConfig contains configuration for the allocation and reuse strategy.
type PoolConfig struct {
StartSize int // Minimum chunk size that is allocated.
PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
MaxSize int // Maximum chunk size that will be allocated.
}
var config = PoolConfig{
StartSize: 128,
PooledSize: 512,
MaxSize: 32768,
}
// Reuse pool: chunk size -> pool.
var buffers = map[int]*sync.Pool{}
func initBuffers() {
for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
buffers[l] = new(sync.Pool)
}
}
func init() {
initBuffers()
}
// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
func Init(cfg PoolConfig) {
config = cfg
initBuffers()
}
// putBuf puts a chunk to reuse pool if it can be reused.
func putBuf(buf []byte) {
size := cap(buf)
if size < config.PooledSize {
return
}
if c := buffers[size]; c != nil {
c.Put(buf[:0])
}
}
// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
func getBuf(size int) []byte {
if size < config.PooledSize {
return make([]byte, 0, size)
}
if c := buffers[size]; c != nil {
v := c.Get()
if v != nil {
return v.([]byte)
}
}
return make([]byte, 0, size)
}
// Buffer is a buffer optimized for serialization without extra copying.
type Buffer struct {
// Buf is the current chunk that can be used for serialization.
Buf []byte
toPool []byte
bufs [][]byte
}
// EnsureSpace makes sure that the current chunk contains at least s free bytes,
// possibly creating a new chunk.
func (b *Buffer) EnsureSpace(s int) {
if cap(b.Buf)-len(b.Buf) >= s {
return
}
l := len(b.Buf)
if l > 0 {
if cap(b.toPool) != cap(b.Buf) {
// Chunk was reallocated, toPool can be pooled.
putBuf(b.toPool)
}
if cap(b.bufs) == 0 {
b.bufs = make([][]byte, 0, 8)
}
b.bufs = append(b.bufs, b.Buf)
l = cap(b.toPool) * 2
} else {
l = config.StartSize
}
if l > config.MaxSize {
l = config.MaxSize
}
b.Buf = getBuf(l)
b.toPool = b.Buf
}
// AppendByte appends a single byte to buffer.
func (b *Buffer) AppendByte(data byte) {
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
b.EnsureSpace(1)
}
b.Buf = append(b.Buf, data)
}
// AppendBytes appends a byte slice to buffer.
func (b *Buffer) AppendBytes(data []byte) {
for len(data) > 0 {
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
b.EnsureSpace(1)
}
sz := cap(b.Buf) - len(b.Buf)
if sz > len(data) {
sz = len(data)
}
b.Buf = append(b.Buf, data[:sz]...)
data = data[sz:]
}
}
// AppendBytes appends a string to buffer.
func (b *Buffer) AppendString(data string) {
for len(data) > 0 {
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
b.EnsureSpace(1)
}
sz := cap(b.Buf) - len(b.Buf)
if sz > len(data) {
sz = len(data)
}
b.Buf = append(b.Buf, data[:sz]...)
data = data[sz:]
}
}
// Size computes the size of a buffer by adding sizes of every chunk.
func (b *Buffer) Size() int {
size := len(b.Buf)
for _, buf := range b.bufs {
size += len(buf)
}
return size
}
// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
var n int
for _, buf := range b.bufs {
if err == nil {
n, err = w.Write(buf)
written += n
}
putBuf(buf)
}
if err == nil {
n, err = w.Write(b.Buf)
written += n
}
putBuf(b.toPool)
b.bufs = nil
b.Buf = nil
b.toPool = nil
return
}
// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
// copied if it does not fit in a single chunk. You can optionally provide one byte
// slice as argument that it will try to reuse.
func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
if len(b.bufs) == 0 {
ret := b.Buf
b.toPool = nil
b.Buf = nil
return ret
}
var ret []byte
size := b.Size()
// If we got a buffer as argument and it is big enought, reuse it.
if len(reuse) == 1 && cap(reuse[0]) >= size {
ret = reuse[0][:0]
} else {
ret = make([]byte, 0, size)
}
for _, buf := range b.bufs {
ret = append(ret, buf...)
putBuf(buf)
}
ret = append(ret, b.Buf...)
putBuf(b.toPool)
b.bufs = nil
b.toPool = nil
b.Buf = nil
return ret
}
type readCloser struct {
offset int
bufs [][]byte
}
func (r *readCloser) Read(p []byte) (n int, err error) {
for _, buf := range r.bufs {
// Copy as much as we can.
x := copy(p[n:], buf[r.offset:])
n += x // Increment how much we filled.
// Did we empty the whole buffer?
if r.offset+x == len(buf) {
// On to the next buffer.
r.offset = 0
r.bufs = r.bufs[1:]
// We can release this buffer.
putBuf(buf)
} else {
r.offset += x
}
if n == len(p) {
break
}
}
// No buffers left or nothing read?
if len(r.bufs) == 0 {
err = io.EOF
}
return
}
func (r *readCloser) Close() error {
// Release all remaining buffers.
for _, buf := range r.bufs {
putBuf(buf)
}
// In case Close gets called multiple times.
r.bufs = nil
return nil
}
// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
func (b *Buffer) ReadCloser() io.ReadCloser {
ret := &readCloser{0, append(b.bufs, b.Buf)}
b.bufs = nil
b.toPool = nil
b.Buf = nil
return ret
}

78
vendor/github.com/mailru/easyjson/helpers.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
// Package easyjson contains marshaler/unmarshaler interfaces and helper functions.
package easyjson
import (
"io"
"io/ioutil"
"net/http"
"strconv"
"github.com/mailru/easyjson/jlexer"
"github.com/mailru/easyjson/jwriter"
)
// Marshaler is an easyjson-compatible marshaler interface.
type Marshaler interface {
MarshalEasyJSON(w *jwriter.Writer)
}
// Marshaler is an easyjson-compatible unmarshaler interface.
type Unmarshaler interface {
UnmarshalEasyJSON(w *jlexer.Lexer)
}
// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic.
type Optional interface {
IsDefined() bool
}
// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied
// from a chain of smaller chunks.
func Marshal(v Marshaler) ([]byte, error) {
w := jwriter.Writer{}
v.MarshalEasyJSON(&w)
return w.BuildBytes()
}
// MarshalToWriter marshals the data to an io.Writer.
func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {
jw := jwriter.Writer{}
v.MarshalEasyJSON(&jw)
return jw.DumpTo(w)
}
// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the
// http.ResponseWriter, and send the data to the writer. started will be equal to
// false if an error occurred before any http.ResponseWriter methods were actually
// invoked (in this case a 500 reply is possible).
func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) {
jw := jwriter.Writer{}
v.MarshalEasyJSON(&jw)
if jw.Error != nil {
return false, 0, jw.Error
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(jw.Size()))
started = true
written, err = jw.DumpTo(w)
return
}
// Unmarshal decodes the JSON in data into the object.
func Unmarshal(data []byte, v Unmarshaler) error {
l := jlexer.Lexer{Data: data}
v.UnmarshalEasyJSON(&l)
return l.Error()
}
// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object.
func UnmarshalFromReader(r io.Reader, v Unmarshaler) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
l := jlexer.Lexer{Data: data}
v.UnmarshalEasyJSON(&l)
return l.Error()
}

24
vendor/github.com/mailru/easyjson/jlexer/bytestostr.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// This file will only be included to the build if neither
// easyjson_nounsafe nor appengine build tag is set. See README notes
// for more details.
//+build !easyjson_nounsafe
//+build !appengine
package jlexer
import (
"reflect"
"unsafe"
)
// bytesToStr creates a string pointing at the slice to avoid copying.
//
// Warning: the string returned by the function should be used with care, as the whole input data
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
// may be garbage-collected even when the string exists.
func bytesToStr(data []byte) string {
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
return *(*string)(unsafe.Pointer(&shdr))
}

View File

@ -0,0 +1,13 @@
// This file is included to the build if any of the buildtags below
// are defined. Refer to README notes for more details.
//+build easyjson_nounsafe appengine
package jlexer
// bytesToStr creates a string normally from []byte
//
// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
func bytesToStr(data []byte) string {
return string(data)
}

15
vendor/github.com/mailru/easyjson/jlexer/error.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package jlexer
import "fmt"
// LexerError implements the error interface and represents all possible errors that can be
// generated during parsing the JSON data.
type LexerError struct {
Reason string
Offset int
Data string
}
func (l *LexerError) Error() string {
return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
}

1176
vendor/github.com/mailru/easyjson/jlexer/lexer.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

390
vendor/github.com/mailru/easyjson/jwriter/writer.go generated vendored Normal file
View File

@ -0,0 +1,390 @@
// Package jwriter contains a JSON writer.
package jwriter
import (
"io"
"strconv"
"unicode/utf8"
"github.com/mailru/easyjson/buffer"
)
// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
// Flags field in Writer is used to set and pass them around.
type Flags int
const (
NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
)
// Writer is a JSON writer.
type Writer struct {
Flags Flags
Error error
Buffer buffer.Buffer
NoEscapeHTML bool
}
// Size returns the size of the data that was written out.
func (w *Writer) Size() int {
return w.Buffer.Size()
}
// DumpTo outputs the data to given io.Writer, resetting the buffer.
func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
return w.Buffer.DumpTo(out)
}
// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
// as argument that it will try to reuse.
func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
if w.Error != nil {
return nil, w.Error
}
return w.Buffer.BuildBytes(reuse...), nil
}
// ReadCloser returns an io.ReadCloser that can be used to read the data.
// ReadCloser also resets the buffer.
func (w *Writer) ReadCloser() (io.ReadCloser, error) {
if w.Error != nil {
return nil, w.Error
}
return w.Buffer.ReadCloser(), nil
}
// RawByte appends raw binary data to the buffer.
func (w *Writer) RawByte(c byte) {
w.Buffer.AppendByte(c)
}
// RawByte appends raw binary data to the buffer.
func (w *Writer) RawString(s string) {
w.Buffer.AppendString(s)
}
// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
// calling with results of MarshalJSON-like functions.
func (w *Writer) Raw(data []byte, err error) {
switch {
case w.Error != nil:
return
case err != nil:
w.Error = err
case len(data) > 0:
w.Buffer.AppendBytes(data)
default:
w.RawString("null")
}
}
// RawText encloses raw binary data in quotes and appends in to the buffer.
// Useful for calling with results of MarshalText-like functions.
func (w *Writer) RawText(data []byte, err error) {
switch {
case w.Error != nil:
return
case err != nil:
w.Error = err
case len(data) > 0:
w.String(string(data))
default:
w.RawString("null")
}
}
// Base64Bytes appends data to the buffer after base64 encoding it
func (w *Writer) Base64Bytes(data []byte) {
if data == nil {
w.Buffer.AppendString("null")
return
}
w.Buffer.AppendByte('"')
w.base64(data)
w.Buffer.AppendByte('"')
}
func (w *Writer) Uint8(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint16(n uint16) {
w.Buffer.EnsureSpace(5)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint32(n uint32) {
w.Buffer.EnsureSpace(10)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint(n uint) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint64(n uint64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
}
func (w *Writer) Int8(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int16(n int16) {
w.Buffer.EnsureSpace(6)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int32(n int32) {
w.Buffer.EnsureSpace(11)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int(n int) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int64(n int64) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
}
func (w *Writer) Uint8Str(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint16Str(n uint16) {
w.Buffer.EnsureSpace(5)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint32Str(n uint32) {
w.Buffer.EnsureSpace(10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) UintStr(n uint) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint64Str(n uint64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) UintptrStr(n uintptr) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int8Str(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int16Str(n int16) {
w.Buffer.EnsureSpace(6)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int32Str(n int32) {
w.Buffer.EnsureSpace(11)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) IntStr(n int) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int64Str(n int64) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Float32(n float32) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
}
func (w *Writer) Float32Str(n float32) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Float64(n float64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
}
func (w *Writer) Float64Str(n float64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Bool(v bool) {
w.Buffer.EnsureSpace(5)
if v {
w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
} else {
w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
}
}
const chars = "0123456789abcdef"
func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
// Note: might make sense to use a table if there are more chars to escape. With 4 chars
// it benchmarks the same.
if escapeHTML {
return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
} else {
return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
}
}
func (w *Writer) String(s string) {
w.Buffer.AppendByte('"')
// Portions of the string that contain no escapes are appended as
// byte slices.
p := 0 // last non-escape symbol
for i := 0; i < len(s); {
c := s[i]
if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
// single-width character, no escaping is required
i++
continue
} else if c < utf8.RuneSelf {
// single-with character, need to escape
w.Buffer.AppendString(s[p:i])
switch c {
case '\t':
w.Buffer.AppendString(`\t`)
case '\r':
w.Buffer.AppendString(`\r`)
case '\n':
w.Buffer.AppendString(`\n`)
case '\\':
w.Buffer.AppendString(`\\`)
case '"':
w.Buffer.AppendString(`\"`)
default:
w.Buffer.AppendString(`\u00`)
w.Buffer.AppendByte(chars[c>>4])
w.Buffer.AppendByte(chars[c&0xf])
}
i++
p = i
continue
}
// broken utf
runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
if runeValue == utf8.RuneError && runeWidth == 1 {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\ufffd`)
i++
p = i
continue
}
// jsonp stuff - tab separator and line separator
if runeValue == '\u2028' || runeValue == '\u2029' {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\u202`)
w.Buffer.AppendByte(chars[runeValue&0xf])
i += runeWidth
p = i
continue
}
i += runeWidth
}
w.Buffer.AppendString(s[p:])
w.Buffer.AppendByte('"')
}
const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
const padChar = '='
func (w *Writer) base64(in []byte) {
if len(in) == 0 {
return
}
w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
si := 0
n := (len(in) / 3) * 3
for si < n {
// Convert 3x 8bit source bytes into 4 bytes
val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
si += 3
}
remain := len(in) - si
if remain == 0 {
return
}
// Add the remaining small block
val := uint(in[si+0]) << 16
if remain == 2 {
val |= uint(in[si+1]) << 8
}
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
switch remain {
case 2:
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
case 1:
w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
}
}

45
vendor/github.com/mailru/easyjson/raw.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package easyjson
import (
"github.com/mailru/easyjson/jlexer"
"github.com/mailru/easyjson/jwriter"
)
// RawMessage is a raw piece of JSON (number, string, bool, object, array or
// null) that is extracted without parsing and output as is during marshaling.
type RawMessage []byte
// MarshalEasyJSON does JSON marshaling using easyjson interface.
func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) {
if len(*v) == 0 {
w.RawString("null")
} else {
w.Raw(*v, nil)
}
}
// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {
*v = RawMessage(l.Raw())
}
// UnmarshalJSON implements encoding/json.Unmarshaler interface.
func (v *RawMessage) UnmarshalJSON(data []byte) error {
*v = data
return nil
}
var nullBytes = []byte("null")
// MarshalJSON implements encoding/json.Marshaler interface.
func (v RawMessage) MarshalJSON() ([]byte, error) {
if len(v) == 0 {
return nullBytes, nil
}
return v, nil
}
// IsDefined is required for integration with omitempty easyjson logic.
func (v *RawMessage) IsDefined() bool {
return len(*v) > 0
}

24
vendor/vendor.json vendored
View File

@ -410,6 +410,30 @@
"revision": "2c9e9502788518c97fe44e8955cd069417ee89df",
"revisionTime": "2018-02-17T13:45:45Z"
},
{
"checksumSHA1": "HIMDeysKGqDKh9a3N5lFLZRNHaU=",
"path": "github.com/mailru/easyjson",
"revision": "8b799c424f57fa123fc63a99d6383bc6e4c02578",
"revisionTime": "2018-03-23T15:44:45Z"
},
{
"checksumSHA1": "T8soMJArSZrYnhmdpAnq1bVxQ6Q=",
"path": "github.com/mailru/easyjson/buffer",
"revision": "8b799c424f57fa123fc63a99d6383bc6e4c02578",
"revisionTime": "2018-03-23T15:44:45Z"
},
{
"checksumSHA1": "r2z0mW+SLfGn93Psm3tvEs3WQDM=",
"path": "github.com/mailru/easyjson/jlexer",
"revision": "8b799c424f57fa123fc63a99d6383bc6e4c02578",
"revisionTime": "2018-03-23T15:44:45Z"
},
{
"checksumSHA1": "4BAeeJ7JywEQyR6GqRrxfSfN2/Q=",
"path": "github.com/mailru/easyjson/jwriter",
"revision": "8b799c424f57fa123fc63a99d6383bc6e4c02578",
"revisionTime": "2018-03-23T15:44:45Z"
},
{
"checksumSHA1": "n/zX9TwtVztVCRPou/4CLmZAeLw=",
"path": "github.com/miekg/dns",