forked from forgejo/forgejo
Use vendored go-swagger (#8087)
* Use vendored go-swagger * vendor go-swagger * revert un wanteed change * remove un-needed GO111MODULE * Update Makefile Co-Authored-By: techknowlogick <matti@mdranta.net>
This commit is contained in:
parent
4cb1bdddc8
commit
9fe4437bda
686 changed files with 143379 additions and 17 deletions
7
vendor/github.com/mailru/easyjson/LICENSE
generated
vendored
Normal file
7
vendor/github.com/mailru/easyjson/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
Copyright (c) 2016 Mail.Ru Group
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
270
vendor/github.com/mailru/easyjson/buffer/pool.go
generated
vendored
Normal file
270
vendor/github.com/mailru/easyjson/buffer/pool.go
generated
vendored
Normal file
|
@ -0,0 +1,270 @@
|
|||
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
|
||||
// reduce copying and to allow reuse of individual chunks.
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// PoolConfig contains configuration for the allocation and reuse strategy.
|
||||
type PoolConfig struct {
|
||||
StartSize int // Minimum chunk size that is allocated.
|
||||
PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
|
||||
MaxSize int // Maximum chunk size that will be allocated.
|
||||
}
|
||||
|
||||
var config = PoolConfig{
|
||||
StartSize: 128,
|
||||
PooledSize: 512,
|
||||
MaxSize: 32768,
|
||||
}
|
||||
|
||||
// Reuse pool: chunk size -> pool.
|
||||
var buffers = map[int]*sync.Pool{}
|
||||
|
||||
func initBuffers() {
|
||||
for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
|
||||
buffers[l] = new(sync.Pool)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
initBuffers()
|
||||
}
|
||||
|
||||
// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
|
||||
func Init(cfg PoolConfig) {
|
||||
config = cfg
|
||||
initBuffers()
|
||||
}
|
||||
|
||||
// putBuf puts a chunk to reuse pool if it can be reused.
|
||||
func putBuf(buf []byte) {
|
||||
size := cap(buf)
|
||||
if size < config.PooledSize {
|
||||
return
|
||||
}
|
||||
if c := buffers[size]; c != nil {
|
||||
c.Put(buf[:0])
|
||||
}
|
||||
}
|
||||
|
||||
// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
|
||||
func getBuf(size int) []byte {
|
||||
if size < config.PooledSize {
|
||||
return make([]byte, 0, size)
|
||||
}
|
||||
|
||||
if c := buffers[size]; c != nil {
|
||||
v := c.Get()
|
||||
if v != nil {
|
||||
return v.([]byte)
|
||||
}
|
||||
}
|
||||
return make([]byte, 0, size)
|
||||
}
|
||||
|
||||
// Buffer is a buffer optimized for serialization without extra copying.
|
||||
type Buffer struct {
|
||||
|
||||
// Buf is the current chunk that can be used for serialization.
|
||||
Buf []byte
|
||||
|
||||
toPool []byte
|
||||
bufs [][]byte
|
||||
}
|
||||
|
||||
// EnsureSpace makes sure that the current chunk contains at least s free bytes,
|
||||
// possibly creating a new chunk.
|
||||
func (b *Buffer) EnsureSpace(s int) {
|
||||
if cap(b.Buf)-len(b.Buf) >= s {
|
||||
return
|
||||
}
|
||||
l := len(b.Buf)
|
||||
if l > 0 {
|
||||
if cap(b.toPool) != cap(b.Buf) {
|
||||
// Chunk was reallocated, toPool can be pooled.
|
||||
putBuf(b.toPool)
|
||||
}
|
||||
if cap(b.bufs) == 0 {
|
||||
b.bufs = make([][]byte, 0, 8)
|
||||
}
|
||||
b.bufs = append(b.bufs, b.Buf)
|
||||
l = cap(b.toPool) * 2
|
||||
} else {
|
||||
l = config.StartSize
|
||||
}
|
||||
|
||||
if l > config.MaxSize {
|
||||
l = config.MaxSize
|
||||
}
|
||||
b.Buf = getBuf(l)
|
||||
b.toPool = b.Buf
|
||||
}
|
||||
|
||||
// AppendByte appends a single byte to buffer.
|
||||
func (b *Buffer) AppendByte(data byte) {
|
||||
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
|
||||
b.EnsureSpace(1)
|
||||
}
|
||||
b.Buf = append(b.Buf, data)
|
||||
}
|
||||
|
||||
// AppendBytes appends a byte slice to buffer.
|
||||
func (b *Buffer) AppendBytes(data []byte) {
|
||||
for len(data) > 0 {
|
||||
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
|
||||
b.EnsureSpace(1)
|
||||
}
|
||||
|
||||
sz := cap(b.Buf) - len(b.Buf)
|
||||
if sz > len(data) {
|
||||
sz = len(data)
|
||||
}
|
||||
|
||||
b.Buf = append(b.Buf, data[:sz]...)
|
||||
data = data[sz:]
|
||||
}
|
||||
}
|
||||
|
||||
// AppendBytes appends a string to buffer.
|
||||
func (b *Buffer) AppendString(data string) {
|
||||
for len(data) > 0 {
|
||||
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
|
||||
b.EnsureSpace(1)
|
||||
}
|
||||
|
||||
sz := cap(b.Buf) - len(b.Buf)
|
||||
if sz > len(data) {
|
||||
sz = len(data)
|
||||
}
|
||||
|
||||
b.Buf = append(b.Buf, data[:sz]...)
|
||||
data = data[sz:]
|
||||
}
|
||||
}
|
||||
|
||||
// Size computes the size of a buffer by adding sizes of every chunk.
|
||||
func (b *Buffer) Size() int {
|
||||
size := len(b.Buf)
|
||||
for _, buf := range b.bufs {
|
||||
size += len(buf)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
|
||||
func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
|
||||
var n int
|
||||
for _, buf := range b.bufs {
|
||||
if err == nil {
|
||||
n, err = w.Write(buf)
|
||||
written += n
|
||||
}
|
||||
putBuf(buf)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
n, err = w.Write(b.Buf)
|
||||
written += n
|
||||
}
|
||||
putBuf(b.toPool)
|
||||
|
||||
b.bufs = nil
|
||||
b.Buf = nil
|
||||
b.toPool = nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
|
||||
// copied if it does not fit in a single chunk. You can optionally provide one byte
|
||||
// slice as argument that it will try to reuse.
|
||||
func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
|
||||
if len(b.bufs) == 0 {
|
||||
ret := b.Buf
|
||||
b.toPool = nil
|
||||
b.Buf = nil
|
||||
return ret
|
||||
}
|
||||
|
||||
var ret []byte
|
||||
size := b.Size()
|
||||
|
||||
// If we got a buffer as argument and it is big enought, reuse it.
|
||||
if len(reuse) == 1 && cap(reuse[0]) >= size {
|
||||
ret = reuse[0][:0]
|
||||
} else {
|
||||
ret = make([]byte, 0, size)
|
||||
}
|
||||
for _, buf := range b.bufs {
|
||||
ret = append(ret, buf...)
|
||||
putBuf(buf)
|
||||
}
|
||||
|
||||
ret = append(ret, b.Buf...)
|
||||
putBuf(b.toPool)
|
||||
|
||||
b.bufs = nil
|
||||
b.toPool = nil
|
||||
b.Buf = nil
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
offset int
|
||||
bufs [][]byte
|
||||
}
|
||||
|
||||
func (r *readCloser) Read(p []byte) (n int, err error) {
|
||||
for _, buf := range r.bufs {
|
||||
// Copy as much as we can.
|
||||
x := copy(p[n:], buf[r.offset:])
|
||||
n += x // Increment how much we filled.
|
||||
|
||||
// Did we empty the whole buffer?
|
||||
if r.offset+x == len(buf) {
|
||||
// On to the next buffer.
|
||||
r.offset = 0
|
||||
r.bufs = r.bufs[1:]
|
||||
|
||||
// We can release this buffer.
|
||||
putBuf(buf)
|
||||
} else {
|
||||
r.offset += x
|
||||
}
|
||||
|
||||
if n == len(p) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// No buffers left or nothing read?
|
||||
if len(r.bufs) == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *readCloser) Close() error {
|
||||
// Release all remaining buffers.
|
||||
for _, buf := range r.bufs {
|
||||
putBuf(buf)
|
||||
}
|
||||
// In case Close gets called multiple times.
|
||||
r.bufs = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
|
||||
func (b *Buffer) ReadCloser() io.ReadCloser {
|
||||
ret := &readCloser{0, append(b.bufs, b.Buf)}
|
||||
|
||||
b.bufs = nil
|
||||
b.toPool = nil
|
||||
b.Buf = nil
|
||||
|
||||
return ret
|
||||
}
|
24
vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
generated
vendored
Normal file
24
vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// This file will only be included to the build if neither
|
||||
// easyjson_nounsafe nor appengine build tag is set. See README notes
|
||||
// for more details.
|
||||
|
||||
//+build !easyjson_nounsafe
|
||||
//+build !appengine
|
||||
|
||||
package jlexer
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// bytesToStr creates a string pointing at the slice to avoid copying.
|
||||
//
|
||||
// Warning: the string returned by the function should be used with care, as the whole input data
|
||||
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
|
||||
// may be garbage-collected even when the string exists.
|
||||
func bytesToStr(data []byte) string {
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
||||
shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
|
||||
return *(*string)(unsafe.Pointer(&shdr))
|
||||
}
|
13
vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
generated
vendored
Normal file
13
vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// This file is included to the build if any of the buildtags below
|
||||
// are defined. Refer to README notes for more details.
|
||||
|
||||
//+build easyjson_nounsafe appengine
|
||||
|
||||
package jlexer
|
||||
|
||||
// bytesToStr creates a string normally from []byte
|
||||
//
|
||||
// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
|
||||
func bytesToStr(data []byte) string {
|
||||
return string(data)
|
||||
}
|
15
vendor/github.com/mailru/easyjson/jlexer/error.go
generated
vendored
Normal file
15
vendor/github.com/mailru/easyjson/jlexer/error.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package jlexer
|
||||
|
||||
import "fmt"
|
||||
|
||||
// LexerError implements the error interface and represents all possible errors that can be
|
||||
// generated during parsing the JSON data.
|
||||
type LexerError struct {
|
||||
Reason string
|
||||
Offset int
|
||||
Data string
|
||||
}
|
||||
|
||||
func (l *LexerError) Error() string {
|
||||
return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
|
||||
}
|
1182
vendor/github.com/mailru/easyjson/jlexer/lexer.go
generated
vendored
Normal file
1182
vendor/github.com/mailru/easyjson/jlexer/lexer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
390
vendor/github.com/mailru/easyjson/jwriter/writer.go
generated
vendored
Normal file
390
vendor/github.com/mailru/easyjson/jwriter/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,390 @@
|
|||
// Package jwriter contains a JSON writer.
|
||||
package jwriter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/mailru/easyjson/buffer"
|
||||
)
|
||||
|
||||
// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
|
||||
// Flags field in Writer is used to set and pass them around.
|
||||
type Flags int
|
||||
|
||||
const (
|
||||
NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
|
||||
NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
|
||||
)
|
||||
|
||||
// Writer is a JSON writer.
|
||||
type Writer struct {
|
||||
Flags Flags
|
||||
|
||||
Error error
|
||||
Buffer buffer.Buffer
|
||||
NoEscapeHTML bool
|
||||
}
|
||||
|
||||
// Size returns the size of the data that was written out.
|
||||
func (w *Writer) Size() int {
|
||||
return w.Buffer.Size()
|
||||
}
|
||||
|
||||
// DumpTo outputs the data to given io.Writer, resetting the buffer.
|
||||
func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
|
||||
return w.Buffer.DumpTo(out)
|
||||
}
|
||||
|
||||
// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
|
||||
// as argument that it will try to reuse.
|
||||
func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
|
||||
if w.Error != nil {
|
||||
return nil, w.Error
|
||||
}
|
||||
|
||||
return w.Buffer.BuildBytes(reuse...), nil
|
||||
}
|
||||
|
||||
// ReadCloser returns an io.ReadCloser that can be used to read the data.
|
||||
// ReadCloser also resets the buffer.
|
||||
func (w *Writer) ReadCloser() (io.ReadCloser, error) {
|
||||
if w.Error != nil {
|
||||
return nil, w.Error
|
||||
}
|
||||
|
||||
return w.Buffer.ReadCloser(), nil
|
||||
}
|
||||
|
||||
// RawByte appends raw binary data to the buffer.
|
||||
func (w *Writer) RawByte(c byte) {
|
||||
w.Buffer.AppendByte(c)
|
||||
}
|
||||
|
||||
// RawByte appends raw binary data to the buffer.
|
||||
func (w *Writer) RawString(s string) {
|
||||
w.Buffer.AppendString(s)
|
||||
}
|
||||
|
||||
// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
|
||||
// calling with results of MarshalJSON-like functions.
|
||||
func (w *Writer) Raw(data []byte, err error) {
|
||||
switch {
|
||||
case w.Error != nil:
|
||||
return
|
||||
case err != nil:
|
||||
w.Error = err
|
||||
case len(data) > 0:
|
||||
w.Buffer.AppendBytes(data)
|
||||
default:
|
||||
w.RawString("null")
|
||||
}
|
||||
}
|
||||
|
||||
// RawText encloses raw binary data in quotes and appends in to the buffer.
|
||||
// Useful for calling with results of MarshalText-like functions.
|
||||
func (w *Writer) RawText(data []byte, err error) {
|
||||
switch {
|
||||
case w.Error != nil:
|
||||
return
|
||||
case err != nil:
|
||||
w.Error = err
|
||||
case len(data) > 0:
|
||||
w.String(string(data))
|
||||
default:
|
||||
w.RawString("null")
|
||||
}
|
||||
}
|
||||
|
||||
// Base64Bytes appends data to the buffer after base64 encoding it
|
||||
func (w *Writer) Base64Bytes(data []byte) {
|
||||
if data == nil {
|
||||
w.Buffer.AppendString("null")
|
||||
return
|
||||
}
|
||||
w.Buffer.AppendByte('"')
|
||||
w.base64(data)
|
||||
w.Buffer.AppendByte('"')
|
||||
}
|
||||
|
||||
func (w *Writer) Uint8(n uint8) {
|
||||
w.Buffer.EnsureSpace(3)
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Uint16(n uint16) {
|
||||
w.Buffer.EnsureSpace(5)
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Uint32(n uint32) {
|
||||
w.Buffer.EnsureSpace(10)
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Uint(n uint) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Uint64(n uint64) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Int8(n int8) {
|
||||
w.Buffer.EnsureSpace(4)
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Int16(n int16) {
|
||||
w.Buffer.EnsureSpace(6)
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Int32(n int32) {
|
||||
w.Buffer.EnsureSpace(11)
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Int(n int) {
|
||||
w.Buffer.EnsureSpace(21)
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Int64(n int64) {
|
||||
w.Buffer.EnsureSpace(21)
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
|
||||
}
|
||||
|
||||
func (w *Writer) Uint8Str(n uint8) {
|
||||
w.Buffer.EnsureSpace(3)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Uint16Str(n uint16) {
|
||||
w.Buffer.EnsureSpace(5)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Uint32Str(n uint32) {
|
||||
w.Buffer.EnsureSpace(10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) UintStr(n uint) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Uint64Str(n uint64) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) UintptrStr(n uintptr) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Int8Str(n int8) {
|
||||
w.Buffer.EnsureSpace(4)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Int16Str(n int16) {
|
||||
w.Buffer.EnsureSpace(6)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Int32Str(n int32) {
|
||||
w.Buffer.EnsureSpace(11)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) IntStr(n int) {
|
||||
w.Buffer.EnsureSpace(21)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Int64Str(n int64) {
|
||||
w.Buffer.EnsureSpace(21)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Float32(n float32) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
|
||||
}
|
||||
|
||||
func (w *Writer) Float32Str(n float32) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Float64(n float64) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
|
||||
}
|
||||
|
||||
func (w *Writer) Float64Str(n float64) {
|
||||
w.Buffer.EnsureSpace(20)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, '"')
|
||||
}
|
||||
|
||||
func (w *Writer) Bool(v bool) {
|
||||
w.Buffer.EnsureSpace(5)
|
||||
if v {
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
|
||||
} else {
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
|
||||
}
|
||||
}
|
||||
|
||||
const chars = "0123456789abcdef"
|
||||
|
||||
func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
|
||||
// Note: might make sense to use a table if there are more chars to escape. With 4 chars
|
||||
// it benchmarks the same.
|
||||
if escapeHTML {
|
||||
return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
|
||||
} else {
|
||||
return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) String(s string) {
|
||||
w.Buffer.AppendByte('"')
|
||||
|
||||
// Portions of the string that contain no escapes are appended as
|
||||
// byte slices.
|
||||
|
||||
p := 0 // last non-escape symbol
|
||||
|
||||
for i := 0; i < len(s); {
|
||||
c := s[i]
|
||||
|
||||
if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
|
||||
// single-width character, no escaping is required
|
||||
i++
|
||||
continue
|
||||
} else if c < utf8.RuneSelf {
|
||||
// single-with character, need to escape
|
||||
w.Buffer.AppendString(s[p:i])
|
||||
switch c {
|
||||
case '\t':
|
||||
w.Buffer.AppendString(`\t`)
|
||||
case '\r':
|
||||
w.Buffer.AppendString(`\r`)
|
||||
case '\n':
|
||||
w.Buffer.AppendString(`\n`)
|
||||
case '\\':
|
||||
w.Buffer.AppendString(`\\`)
|
||||
case '"':
|
||||
w.Buffer.AppendString(`\"`)
|
||||
default:
|
||||
w.Buffer.AppendString(`\u00`)
|
||||
w.Buffer.AppendByte(chars[c>>4])
|
||||
w.Buffer.AppendByte(chars[c&0xf])
|
||||
}
|
||||
|
||||
i++
|
||||
p = i
|
||||
continue
|
||||
}
|
||||
|
||||
// broken utf
|
||||
runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
|
||||
if runeValue == utf8.RuneError && runeWidth == 1 {
|
||||
w.Buffer.AppendString(s[p:i])
|
||||
w.Buffer.AppendString(`\ufffd`)
|
||||
i++
|
||||
p = i
|
||||
continue
|
||||
}
|
||||
|
||||
// jsonp stuff - tab separator and line separator
|
||||
if runeValue == '\u2028' || runeValue == '\u2029' {
|
||||
w.Buffer.AppendString(s[p:i])
|
||||
w.Buffer.AppendString(`\u202`)
|
||||
w.Buffer.AppendByte(chars[runeValue&0xf])
|
||||
i += runeWidth
|
||||
p = i
|
||||
continue
|
||||
}
|
||||
i += runeWidth
|
||||
}
|
||||
w.Buffer.AppendString(s[p:])
|
||||
w.Buffer.AppendByte('"')
|
||||
}
|
||||
|
||||
const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||
const padChar = '='
|
||||
|
||||
func (w *Writer) base64(in []byte) {
|
||||
|
||||
if len(in) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
|
||||
|
||||
si := 0
|
||||
n := (len(in) / 3) * 3
|
||||
|
||||
for si < n {
|
||||
// Convert 3x 8bit source bytes into 4 bytes
|
||||
val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
|
||||
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
|
||||
|
||||
si += 3
|
||||
}
|
||||
|
||||
remain := len(in) - si
|
||||
if remain == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Add the remaining small block
|
||||
val := uint(in[si+0]) << 16
|
||||
if remain == 2 {
|
||||
val |= uint(in[si+1]) << 8
|
||||
}
|
||||
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
|
||||
|
||||
switch remain {
|
||||
case 2:
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
|
||||
case 1:
|
||||
w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue