1
0
Fork 0
forked from forgejo/forgejo

Added all required dependencies

This commit is contained in:
Thomas Boerger 2016-11-03 23:16:01 +01:00
parent 78f86abba4
commit 1ebb35b988
No known key found for this signature in database
GPG key ID: 5A388F55283960B6
660 changed files with 502447 additions and 0 deletions

27
vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

20
vendor/golang.org/x/crypto/curve25519/const_amd64.s generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF
GLOBL ·REDMASK51(SB), 8, $8
DATA ·_121666_213(SB)/8, $996687872
GLOBL ·_121666_213(SB), 8, $8
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
GLOBL ·_2P0(SB), 8, $8
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
GLOBL ·_2P1234(SB), 8, $8

88
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s generated vendored Normal file
View file

@ -0,0 +1,88 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// func cswap(inout *[5]uint64, v uint64)
TEXT ·cswap(SB),7,$0
MOVQ inout+0(FP),DI
MOVQ v+8(FP),SI
CMPQ SI,$1
MOVQ 0(DI),SI
MOVQ 80(DI),DX
MOVQ 8(DI),CX
MOVQ 88(DI),R8
MOVQ SI,R9
CMOVQEQ DX,SI
CMOVQEQ R9,DX
MOVQ CX,R9
CMOVQEQ R8,CX
CMOVQEQ R9,R8
MOVQ SI,0(DI)
MOVQ DX,80(DI)
MOVQ CX,8(DI)
MOVQ R8,88(DI)
MOVQ 16(DI),SI
MOVQ 96(DI),DX
MOVQ 24(DI),CX
MOVQ 104(DI),R8
MOVQ SI,R9
CMOVQEQ DX,SI
CMOVQEQ R9,DX
MOVQ CX,R9
CMOVQEQ R8,CX
CMOVQEQ R9,R8
MOVQ SI,16(DI)
MOVQ DX,96(DI)
MOVQ CX,24(DI)
MOVQ R8,104(DI)
MOVQ 32(DI),SI
MOVQ 112(DI),DX
MOVQ 40(DI),CX
MOVQ 120(DI),R8
MOVQ SI,R9
CMOVQEQ DX,SI
CMOVQEQ R9,DX
MOVQ CX,R9
CMOVQEQ R8,CX
CMOVQEQ R9,R8
MOVQ SI,32(DI)
MOVQ DX,112(DI)
MOVQ CX,40(DI)
MOVQ R8,120(DI)
MOVQ 48(DI),SI
MOVQ 128(DI),DX
MOVQ 56(DI),CX
MOVQ 136(DI),R8
MOVQ SI,R9
CMOVQEQ DX,SI
CMOVQEQ R9,DX
MOVQ CX,R9
CMOVQEQ R8,CX
CMOVQEQ R9,R8
MOVQ SI,48(DI)
MOVQ DX,128(DI)
MOVQ CX,56(DI)
MOVQ R8,136(DI)
MOVQ 64(DI),SI
MOVQ 144(DI),DX
MOVQ 72(DI),CX
MOVQ 152(DI),R8
MOVQ SI,R9
CMOVQEQ DX,SI
CMOVQEQ R9,DX
MOVQ CX,R9
CMOVQEQ R8,CX
CMOVQEQ R9,R8
MOVQ SI,64(DI)
MOVQ DX,144(DI)
MOVQ CX,72(DI)
MOVQ R8,152(DI)
MOVQ DI,AX
MOVQ SI,DX
RET

841
vendor/golang.org/x/crypto/curve25519/curve25519.go generated vendored Normal file
View file

@ -0,0 +1,841 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// We have a implementation in amd64 assembly so this code is only run on
// non-amd64 platforms. The amd64 assembly does not support gccgo.
// +build !amd64 gccgo appengine
package curve25519
// This code is a port of the public domain, "ref10" implementation of
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
// fieldElement represents an element of the field GF(2^255 - 19). An element
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
// context.
type fieldElement [10]int32
func feZero(fe *fieldElement) {
for i := range fe {
fe[i] = 0
}
}
func feOne(fe *fieldElement) {
feZero(fe)
fe[0] = 1
}
func feAdd(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] + b[i]
}
}
func feSub(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] - b[i]
}
}
func feCopy(dst, src *fieldElement) {
for i := range dst {
dst[i] = src[i]
}
}
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
//
// Preconditions: b in {0,1}.
func feCSwap(f, g *fieldElement, b int32) {
var x fieldElement
b = -b
for i := range x {
x[i] = b & (f[i] ^ g[i])
}
for i := range f {
f[i] ^= x[i]
}
for i := range g {
g[i] ^= x[i]
}
}
// load3 reads a 24-bit, little-endian value from in.
func load3(in []byte) int64 {
var r int64
r = int64(in[0])
r |= int64(in[1]) << 8
r |= int64(in[2]) << 16
return r
}
// load4 reads a 32-bit, little-endian value from in.
func load4(in []byte) int64 {
var r int64
r = int64(in[0])
r |= int64(in[1]) << 8
r |= int64(in[2]) << 16
r |= int64(in[3]) << 24
return r
}
func feFromBytes(dst *fieldElement, src *[32]byte) {
h0 := load4(src[:])
h1 := load3(src[4:]) << 6
h2 := load3(src[7:]) << 5
h3 := load3(src[10:]) << 3
h4 := load3(src[13:]) << 2
h5 := load4(src[16:])
h6 := load3(src[20:]) << 7
h7 := load3(src[23:]) << 5
h8 := load3(src[26:]) << 4
h9 := load3(src[29:]) << 2
var carry [10]int64
carry[9] = (h9 + 1<<24) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + 1<<24) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + 1<<24) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + 1<<24) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + 1<<24) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + 1<<25) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + 1<<25) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + 1<<25) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + 1<<25) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + 1<<25) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
dst[0] = int32(h0)
dst[1] = int32(h1)
dst[2] = int32(h2)
dst[3] = int32(h3)
dst[4] = int32(h4)
dst[5] = int32(h5)
dst[6] = int32(h6)
dst[7] = int32(h7)
dst[8] = int32(h8)
dst[9] = int32(h9)
}
// feToBytes marshals h to s.
// Preconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Write p=2^255-19; q=floor(h/p).
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
//
// Proof:
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
//
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
// Then 0<y<1.
//
// Write r=h-pq.
// Have 0<=r<=p-1=2^255-20.
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
//
// Write x=r+19(2^-255)r+y.
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
//
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
func feToBytes(s *[32]byte, h *fieldElement) {
var carry [10]int32
q := (19*h[9] + (1 << 24)) >> 25
q = (h[0] + q) >> 26
q = (h[1] + q) >> 25
q = (h[2] + q) >> 26
q = (h[3] + q) >> 25
q = (h[4] + q) >> 26
q = (h[5] + q) >> 25
q = (h[6] + q) >> 26
q = (h[7] + q) >> 25
q = (h[8] + q) >> 26
q = (h[9] + q) >> 25
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
h[0] += 19 * q
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
carry[0] = h[0] >> 26
h[1] += carry[0]
h[0] -= carry[0] << 26
carry[1] = h[1] >> 25
h[2] += carry[1]
h[1] -= carry[1] << 25
carry[2] = h[2] >> 26
h[3] += carry[2]
h[2] -= carry[2] << 26
carry[3] = h[3] >> 25
h[4] += carry[3]
h[3] -= carry[3] << 25
carry[4] = h[4] >> 26
h[5] += carry[4]
h[4] -= carry[4] << 26
carry[5] = h[5] >> 25
h[6] += carry[5]
h[5] -= carry[5] << 25
carry[6] = h[6] >> 26
h[7] += carry[6]
h[6] -= carry[6] << 26
carry[7] = h[7] >> 25
h[8] += carry[7]
h[7] -= carry[7] << 25
carry[8] = h[8] >> 26
h[9] += carry[8]
h[8] -= carry[8] << 26
carry[9] = h[9] >> 25
h[9] -= carry[9] << 25
// h10 = carry9
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
// evidently 2^255 h10-2^255 q = 0.
// Goal: Output h[0]+...+2^230 h[9].
s[0] = byte(h[0] >> 0)
s[1] = byte(h[0] >> 8)
s[2] = byte(h[0] >> 16)
s[3] = byte((h[0] >> 24) | (h[1] << 2))
s[4] = byte(h[1] >> 6)
s[5] = byte(h[1] >> 14)
s[6] = byte((h[1] >> 22) | (h[2] << 3))
s[7] = byte(h[2] >> 5)
s[8] = byte(h[2] >> 13)
s[9] = byte((h[2] >> 21) | (h[3] << 5))
s[10] = byte(h[3] >> 3)
s[11] = byte(h[3] >> 11)
s[12] = byte((h[3] >> 19) | (h[4] << 6))
s[13] = byte(h[4] >> 2)
s[14] = byte(h[4] >> 10)
s[15] = byte(h[4] >> 18)
s[16] = byte(h[5] >> 0)
s[17] = byte(h[5] >> 8)
s[18] = byte(h[5] >> 16)
s[19] = byte((h[5] >> 24) | (h[6] << 1))
s[20] = byte(h[6] >> 7)
s[21] = byte(h[6] >> 15)
s[22] = byte((h[6] >> 23) | (h[7] << 3))
s[23] = byte(h[7] >> 5)
s[24] = byte(h[7] >> 13)
s[25] = byte((h[7] >> 21) | (h[8] << 4))
s[26] = byte(h[8] >> 4)
s[27] = byte(h[8] >> 12)
s[28] = byte((h[8] >> 20) | (h[9] << 6))
s[29] = byte(h[9] >> 2)
s[30] = byte(h[9] >> 10)
s[31] = byte(h[9] >> 18)
}
// feMul calculates h = f * g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Notes on implementation strategy:
//
// Using schoolbook multiplication.
// Karatsuba would save a little in some cost models.
//
// Most multiplications by 2 and 19 are 32-bit precomputations;
// cheaper than 64-bit postcomputations.
//
// There is one remaining multiplication by 19 in the carry chain;
// one *19 precomputation can be merged into this,
// but the resulting data flow is considerably less clean.
//
// There are 12 carries below.
// 10 of them are 2-way parallelizable and vectorizable.
// Can get away with 11 carries, but then data flow is much deeper.
//
// With tighter constraints on inputs can squeeze carries into int32.
func feMul(h, f, g *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
g0 := g[0]
g1 := g[1]
g2 := g[2]
g3 := g[3]
g4 := g[4]
g5 := g[5]
g6 := g[6]
g7 := g[7]
g8 := g[8]
g9 := g[9]
g1_19 := 19 * g1 // 1.4*2^29
g2_19 := 19 * g2 // 1.4*2^30; still ok
g3_19 := 19 * g3
g4_19 := 19 * g4
g5_19 := 19 * g5
g6_19 := 19 * g6
g7_19 := 19 * g7
g8_19 := 19 * g8
g9_19 := 19 * g9
f1_2 := 2 * f1
f3_2 := 2 * f3
f5_2 := 2 * f5
f7_2 := 2 * f7
f9_2 := 2 * f9
f0g0 := int64(f0) * int64(g0)
f0g1 := int64(f0) * int64(g1)
f0g2 := int64(f0) * int64(g2)
f0g3 := int64(f0) * int64(g3)
f0g4 := int64(f0) * int64(g4)
f0g5 := int64(f0) * int64(g5)
f0g6 := int64(f0) * int64(g6)
f0g7 := int64(f0) * int64(g7)
f0g8 := int64(f0) * int64(g8)
f0g9 := int64(f0) * int64(g9)
f1g0 := int64(f1) * int64(g0)
f1g1_2 := int64(f1_2) * int64(g1)
f1g2 := int64(f1) * int64(g2)
f1g3_2 := int64(f1_2) * int64(g3)
f1g4 := int64(f1) * int64(g4)
f1g5_2 := int64(f1_2) * int64(g5)
f1g6 := int64(f1) * int64(g6)
f1g7_2 := int64(f1_2) * int64(g7)
f1g8 := int64(f1) * int64(g8)
f1g9_38 := int64(f1_2) * int64(g9_19)
f2g0 := int64(f2) * int64(g0)
f2g1 := int64(f2) * int64(g1)
f2g2 := int64(f2) * int64(g2)
f2g3 := int64(f2) * int64(g3)
f2g4 := int64(f2) * int64(g4)
f2g5 := int64(f2) * int64(g5)
f2g6 := int64(f2) * int64(g6)
f2g7 := int64(f2) * int64(g7)
f2g8_19 := int64(f2) * int64(g8_19)
f2g9_19 := int64(f2) * int64(g9_19)
f3g0 := int64(f3) * int64(g0)
f3g1_2 := int64(f3_2) * int64(g1)
f3g2 := int64(f3) * int64(g2)
f3g3_2 := int64(f3_2) * int64(g3)
f3g4 := int64(f3) * int64(g4)
f3g5_2 := int64(f3_2) * int64(g5)
f3g6 := int64(f3) * int64(g6)
f3g7_38 := int64(f3_2) * int64(g7_19)
f3g8_19 := int64(f3) * int64(g8_19)
f3g9_38 := int64(f3_2) * int64(g9_19)
f4g0 := int64(f4) * int64(g0)
f4g1 := int64(f4) * int64(g1)
f4g2 := int64(f4) * int64(g2)
f4g3 := int64(f4) * int64(g3)
f4g4 := int64(f4) * int64(g4)
f4g5 := int64(f4) * int64(g5)
f4g6_19 := int64(f4) * int64(g6_19)
f4g7_19 := int64(f4) * int64(g7_19)
f4g8_19 := int64(f4) * int64(g8_19)
f4g9_19 := int64(f4) * int64(g9_19)
f5g0 := int64(f5) * int64(g0)
f5g1_2 := int64(f5_2) * int64(g1)
f5g2 := int64(f5) * int64(g2)
f5g3_2 := int64(f5_2) * int64(g3)
f5g4 := int64(f5) * int64(g4)
f5g5_38 := int64(f5_2) * int64(g5_19)
f5g6_19 := int64(f5) * int64(g6_19)
f5g7_38 := int64(f5_2) * int64(g7_19)
f5g8_19 := int64(f5) * int64(g8_19)
f5g9_38 := int64(f5_2) * int64(g9_19)
f6g0 := int64(f6) * int64(g0)
f6g1 := int64(f6) * int64(g1)
f6g2 := int64(f6) * int64(g2)
f6g3 := int64(f6) * int64(g3)
f6g4_19 := int64(f6) * int64(g4_19)
f6g5_19 := int64(f6) * int64(g5_19)
f6g6_19 := int64(f6) * int64(g6_19)
f6g7_19 := int64(f6) * int64(g7_19)
f6g8_19 := int64(f6) * int64(g8_19)
f6g9_19 := int64(f6) * int64(g9_19)
f7g0 := int64(f7) * int64(g0)
f7g1_2 := int64(f7_2) * int64(g1)
f7g2 := int64(f7) * int64(g2)
f7g3_38 := int64(f7_2) * int64(g3_19)
f7g4_19 := int64(f7) * int64(g4_19)
f7g5_38 := int64(f7_2) * int64(g5_19)
f7g6_19 := int64(f7) * int64(g6_19)
f7g7_38 := int64(f7_2) * int64(g7_19)
f7g8_19 := int64(f7) * int64(g8_19)
f7g9_38 := int64(f7_2) * int64(g9_19)
f8g0 := int64(f8) * int64(g0)
f8g1 := int64(f8) * int64(g1)
f8g2_19 := int64(f8) * int64(g2_19)
f8g3_19 := int64(f8) * int64(g3_19)
f8g4_19 := int64(f8) * int64(g4_19)
f8g5_19 := int64(f8) * int64(g5_19)
f8g6_19 := int64(f8) * int64(g6_19)
f8g7_19 := int64(f8) * int64(g7_19)
f8g8_19 := int64(f8) * int64(g8_19)
f8g9_19 := int64(f8) * int64(g9_19)
f9g0 := int64(f9) * int64(g0)
f9g1_38 := int64(f9_2) * int64(g1_19)
f9g2_19 := int64(f9) * int64(g2_19)
f9g3_38 := int64(f9_2) * int64(g3_19)
f9g4_19 := int64(f9) * int64(g4_19)
f9g5_38 := int64(f9_2) * int64(g5_19)
f9g6_19 := int64(f9) * int64(g6_19)
f9g7_38 := int64(f9_2) * int64(g7_19)
f9g8_19 := int64(f9) * int64(g8_19)
f9g9_38 := int64(f9_2) * int64(g9_19)
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
var carry [10]int64
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
// |h0| <= 2^25
// |h4| <= 2^25
// |h1| <= 1.51*2^58
// |h5| <= 1.51*2^58
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
// |h1| <= 2^24; from now on fits into int32
// |h5| <= 2^24; from now on fits into int32
// |h2| <= 1.21*2^59
// |h6| <= 1.21*2^59
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
// |h2| <= 2^25; from now on fits into int32 unchanged
// |h6| <= 2^25; from now on fits into int32 unchanged
// |h3| <= 1.51*2^58
// |h7| <= 1.51*2^58
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
// |h3| <= 2^24; from now on fits into int32 unchanged
// |h7| <= 2^24; from now on fits into int32 unchanged
// |h4| <= 1.52*2^33
// |h8| <= 1.52*2^33
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
// |h4| <= 2^25; from now on fits into int32 unchanged
// |h8| <= 2^25; from now on fits into int32 unchanged
// |h5| <= 1.01*2^24
// |h9| <= 1.51*2^58
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
// |h9| <= 2^24; from now on fits into int32 unchanged
// |h0| <= 1.8*2^37
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
// |h0| <= 2^25; from now on fits into int32 unchanged
// |h1| <= 1.01*2^24
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feSquare calculates h = f*f. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feSquare(h, f *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
f0_2 := 2 * f0
f1_2 := 2 * f1
f2_2 := 2 * f2
f3_2 := 2 * f3
f4_2 := 2 * f4
f5_2 := 2 * f5
f6_2 := 2 * f6
f7_2 := 2 * f7
f5_38 := 38 * f5 // 1.31*2^30
f6_19 := 19 * f6 // 1.31*2^30
f7_38 := 38 * f7 // 1.31*2^30
f8_19 := 19 * f8 // 1.31*2^30
f9_38 := 38 * f9 // 1.31*2^30
f0f0 := int64(f0) * int64(f0)
f0f1_2 := int64(f0_2) * int64(f1)
f0f2_2 := int64(f0_2) * int64(f2)
f0f3_2 := int64(f0_2) * int64(f3)
f0f4_2 := int64(f0_2) * int64(f4)
f0f5_2 := int64(f0_2) * int64(f5)
f0f6_2 := int64(f0_2) * int64(f6)
f0f7_2 := int64(f0_2) * int64(f7)
f0f8_2 := int64(f0_2) * int64(f8)
f0f9_2 := int64(f0_2) * int64(f9)
f1f1_2 := int64(f1_2) * int64(f1)
f1f2_2 := int64(f1_2) * int64(f2)
f1f3_4 := int64(f1_2) * int64(f3_2)
f1f4_2 := int64(f1_2) * int64(f4)
f1f5_4 := int64(f1_2) * int64(f5_2)
f1f6_2 := int64(f1_2) * int64(f6)
f1f7_4 := int64(f1_2) * int64(f7_2)
f1f8_2 := int64(f1_2) * int64(f8)
f1f9_76 := int64(f1_2) * int64(f9_38)
f2f2 := int64(f2) * int64(f2)
f2f3_2 := int64(f2_2) * int64(f3)
f2f4_2 := int64(f2_2) * int64(f4)
f2f5_2 := int64(f2_2) * int64(f5)
f2f6_2 := int64(f2_2) * int64(f6)
f2f7_2 := int64(f2_2) * int64(f7)
f2f8_38 := int64(f2_2) * int64(f8_19)
f2f9_38 := int64(f2) * int64(f9_38)
f3f3_2 := int64(f3_2) * int64(f3)
f3f4_2 := int64(f3_2) * int64(f4)
f3f5_4 := int64(f3_2) * int64(f5_2)
f3f6_2 := int64(f3_2) * int64(f6)
f3f7_76 := int64(f3_2) * int64(f7_38)
f3f8_38 := int64(f3_2) * int64(f8_19)
f3f9_76 := int64(f3_2) * int64(f9_38)
f4f4 := int64(f4) * int64(f4)
f4f5_2 := int64(f4_2) * int64(f5)
f4f6_38 := int64(f4_2) * int64(f6_19)
f4f7_38 := int64(f4) * int64(f7_38)
f4f8_38 := int64(f4_2) * int64(f8_19)
f4f9_38 := int64(f4) * int64(f9_38)
f5f5_38 := int64(f5) * int64(f5_38)
f5f6_38 := int64(f5_2) * int64(f6_19)
f5f7_76 := int64(f5_2) * int64(f7_38)
f5f8_38 := int64(f5_2) * int64(f8_19)
f5f9_76 := int64(f5_2) * int64(f9_38)
f6f6_19 := int64(f6) * int64(f6_19)
f6f7_38 := int64(f6) * int64(f7_38)
f6f8_38 := int64(f6_2) * int64(f8_19)
f6f9_38 := int64(f6) * int64(f9_38)
f7f7_38 := int64(f7) * int64(f7_38)
f7f8_38 := int64(f7_2) * int64(f8_19)
f7f9_76 := int64(f7_2) * int64(f9_38)
f8f8_19 := int64(f8) * int64(f8_19)
f8f9_38 := int64(f8) * int64(f9_38)
f9f9_38 := int64(f9) * int64(f9_38)
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
var carry [10]int64
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feMul121666 calculates h = f * 121666. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feMul121666(h, f *fieldElement) {
h0 := int64(f[0]) * 121666
h1 := int64(f[1]) * 121666
h2 := int64(f[2]) * 121666
h3 := int64(f[3]) * 121666
h4 := int64(f[4]) * 121666
h5 := int64(f[5]) * 121666
h6 := int64(f[6]) * 121666
h7 := int64(f[7]) * 121666
h8 := int64(f[8]) * 121666
h9 := int64(f[9]) * 121666
var carry [10]int64
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feInvert sets out = z^-1.
func feInvert(out, z *fieldElement) {
var t0, t1, t2, t3 fieldElement
var i int
feSquare(&t0, z)
for i = 1; i < 1; i++ {
feSquare(&t0, &t0)
}
feSquare(&t1, &t0)
for i = 1; i < 2; i++ {
feSquare(&t1, &t1)
}
feMul(&t1, z, &t1)
feMul(&t0, &t0, &t1)
feSquare(&t2, &t0)
for i = 1; i < 1; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t1, &t2)
feSquare(&t2, &t1)
for i = 1; i < 5; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 20; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 100; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t1, &t1)
for i = 1; i < 5; i++ {
feSquare(&t1, &t1)
}
feMul(out, &t1, &t0)
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], in[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
feFromBytes(&x1, base)
feOne(&x2)
feCopy(&x3, &x1)
feOne(&z3)
swap := int32(0)
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int32(b)
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
swap = int32(b)
feSub(&tmp0, &x3, &z3)
feSub(&tmp1, &x2, &z2)
feAdd(&x2, &x2, &z2)
feAdd(&z2, &x3, &z3)
feMul(&z3, &tmp0, &x2)
feMul(&z2, &z2, &tmp1)
feSquare(&tmp0, &tmp1)
feSquare(&tmp1, &x2)
feAdd(&x3, &z3, &z2)
feSub(&z2, &z3, &z2)
feMul(&x2, &tmp1, &tmp0)
feSub(&tmp1, &tmp1, &tmp0)
feSquare(&z2, &z2)
feMul121666(&z3, &tmp1)
feSquare(&x3, &x3)
feAdd(&tmp0, &tmp0, &z3)
feMul(&z3, &x1, &z2)
feMul(&z2, &tmp1, &tmp0)
}
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
feInvert(&z2, &z2)
feMul(&x2, &x2, &z2)
feToBytes(out, &x2)
}

23
vendor/golang.org/x/crypto/curve25519/doc.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of scalar multiplication on
// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
package curve25519 // import "golang.org/x/crypto/curve25519"
// basePoint is the x coordinate of the generator of the curve.
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
// ScalarMult sets dst to the product in*base where dst and base are the x
// coordinates of group points and all values are in little-endian form.
func ScalarMult(dst, in, base *[32]byte) {
scalarMult(dst, in, base)
}
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
// coordinates of group points, base is the standard generator and all values
// are in little-endian form.
func ScalarBaseMult(dst, in *[32]byte) {
ScalarMult(dst, in, &basePoint)
}

71
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s generated vendored Normal file
View file

@ -0,0 +1,71 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// func freeze(inout *[5]uint64)
TEXT ·freeze(SB),7,$0-8
MOVQ inout+0(FP), DI
MOVQ 0(DI),SI
MOVQ 8(DI),DX
MOVQ 16(DI),CX
MOVQ 24(DI),R8
MOVQ 32(DI),R9
MOVQ ·REDMASK51(SB),AX
MOVQ AX,R10
SUBQ $18,R10
MOVQ $3,R11
REDUCELOOP:
MOVQ SI,R12
SHRQ $51,R12
ANDQ AX,SI
ADDQ R12,DX
MOVQ DX,R12
SHRQ $51,R12
ANDQ AX,DX
ADDQ R12,CX
MOVQ CX,R12
SHRQ $51,R12
ANDQ AX,CX
ADDQ R12,R8
MOVQ R8,R12
SHRQ $51,R12
ANDQ AX,R8
ADDQ R12,R9
MOVQ R9,R12
SHRQ $51,R12
ANDQ AX,R9
IMUL3Q $19,R12,R12
ADDQ R12,SI
SUBQ $1,R11
JA REDUCELOOP
MOVQ $1,R12
CMPQ R10,SI
CMOVQLT R11,R12
CMPQ AX,DX
CMOVQNE R11,R12
CMPQ AX,CX
CMOVQNE R11,R12
CMPQ AX,R8
CMOVQNE R11,R12
CMPQ AX,R9
CMOVQNE R11,R12
NEGQ R12
ANDQ R12,AX
ANDQ R12,R10
SUBQ R10,SI
SUBQ AX,DX
SUBQ AX,CX
SUBQ AX,R8
SUBQ AX,R9
MOVQ SI,0(DI)
MOVQ DX,8(DI)
MOVQ CX,16(DI)
MOVQ R8,24(DI)
MOVQ R9,32(DI)
RET

1375
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,240 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
package curve25519
// These functions are implemented in the .s files. The names of the functions
// in the rest of the file are also taken from the SUPERCOP sources to help
// people following along.
//go:noescape
func cswap(inout *[5]uint64, v uint64)
//go:noescape
func ladderstep(inout *[5][5]uint64)
//go:noescape
func freeze(inout *[5]uint64)
//go:noescape
func mul(dest, a, b *[5]uint64)
//go:noescape
func square(out, in *[5]uint64)
// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
func mladder(xr, zr *[5]uint64, s *[32]byte) {
var work [5][5]uint64
work[0] = *xr
setint(&work[1], 1)
setint(&work[2], 0)
work[3] = *xr
setint(&work[4], 1)
j := uint(6)
var prevbit byte
for i := 31; i >= 0; i-- {
for j < 8 {
bit := ((*s)[i] >> j) & 1
swap := bit ^ prevbit
prevbit = bit
cswap(&work[1], uint64(swap))
ladderstep(&work)
j--
}
j = 7
}
*xr = work[1]
*zr = work[2]
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], (*in)[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var t, z [5]uint64
unpack(&t, base)
mladder(&t, &z, &e)
invert(&z, &z)
mul(&t, &t, &z)
pack(out, &t)
}
func setint(r *[5]uint64, v uint64) {
r[0] = v
r[1] = 0
r[2] = 0
r[3] = 0
r[4] = 0
}
// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
// order.
func unpack(r *[5]uint64, x *[32]byte) {
r[0] = uint64(x[0]) |
uint64(x[1])<<8 |
uint64(x[2])<<16 |
uint64(x[3])<<24 |
uint64(x[4])<<32 |
uint64(x[5])<<40 |
uint64(x[6]&7)<<48
r[1] = uint64(x[6])>>3 |
uint64(x[7])<<5 |
uint64(x[8])<<13 |
uint64(x[9])<<21 |
uint64(x[10])<<29 |
uint64(x[11])<<37 |
uint64(x[12]&63)<<45
r[2] = uint64(x[12])>>6 |
uint64(x[13])<<2 |
uint64(x[14])<<10 |
uint64(x[15])<<18 |
uint64(x[16])<<26 |
uint64(x[17])<<34 |
uint64(x[18])<<42 |
uint64(x[19]&1)<<50
r[3] = uint64(x[19])>>1 |
uint64(x[20])<<7 |
uint64(x[21])<<15 |
uint64(x[22])<<23 |
uint64(x[23])<<31 |
uint64(x[24])<<39 |
uint64(x[25]&15)<<47
r[4] = uint64(x[25])>>4 |
uint64(x[26])<<4 |
uint64(x[27])<<12 |
uint64(x[28])<<20 |
uint64(x[29])<<28 |
uint64(x[30])<<36 |
uint64(x[31]&127)<<44
}
// pack sets out = x where out is the usual, little-endian form of the 5,
// 51-bit limbs in x.
func pack(out *[32]byte, x *[5]uint64) {
t := *x
freeze(&t)
out[0] = byte(t[0])
out[1] = byte(t[0] >> 8)
out[2] = byte(t[0] >> 16)
out[3] = byte(t[0] >> 24)
out[4] = byte(t[0] >> 32)
out[5] = byte(t[0] >> 40)
out[6] = byte(t[0] >> 48)
out[6] ^= byte(t[1]<<3) & 0xf8
out[7] = byte(t[1] >> 5)
out[8] = byte(t[1] >> 13)
out[9] = byte(t[1] >> 21)
out[10] = byte(t[1] >> 29)
out[11] = byte(t[1] >> 37)
out[12] = byte(t[1] >> 45)
out[12] ^= byte(t[2]<<6) & 0xc0
out[13] = byte(t[2] >> 2)
out[14] = byte(t[2] >> 10)
out[15] = byte(t[2] >> 18)
out[16] = byte(t[2] >> 26)
out[17] = byte(t[2] >> 34)
out[18] = byte(t[2] >> 42)
out[19] = byte(t[2] >> 50)
out[19] ^= byte(t[3]<<1) & 0xfe
out[20] = byte(t[3] >> 7)
out[21] = byte(t[3] >> 15)
out[22] = byte(t[3] >> 23)
out[23] = byte(t[3] >> 31)
out[24] = byte(t[3] >> 39)
out[25] = byte(t[3] >> 47)
out[25] ^= byte(t[4]<<4) & 0xf0
out[26] = byte(t[4] >> 4)
out[27] = byte(t[4] >> 12)
out[28] = byte(t[4] >> 20)
out[29] = byte(t[4] >> 28)
out[30] = byte(t[4] >> 36)
out[31] = byte(t[4] >> 44)
}
// invert calculates r = x^-1 mod p using Fermat's little theorem.
func invert(r *[5]uint64, x *[5]uint64) {
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
square(&z2, x) /* 2 */
square(&t, &z2) /* 4 */
square(&t, &t) /* 8 */
mul(&z9, &t, x) /* 9 */
mul(&z11, &z9, &z2) /* 11 */
square(&t, &z11) /* 22 */
mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
square(&t, &z2_5_0) /* 2^6 - 2^1 */
for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
square(&t, &t)
}
mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
square(&t, &z2_10_0) /* 2^11 - 2^1 */
for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
square(&t, &t)
}
mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
square(&t, &z2_20_0) /* 2^21 - 2^1 */
for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
square(&t, &t)
}
mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
square(&t, &t) /* 2^41 - 2^1 */
for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
square(&t, &t)
}
mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
square(&t, &z2_50_0) /* 2^51 - 2^1 */
for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
square(&t, &t)
}
mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
square(&t, &z2_100_0) /* 2^101 - 2^1 */
for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
square(&t, &t)
}
mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
square(&t, &t) /* 2^201 - 2^1 */
for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
square(&t, &t)
}
mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
square(&t, &t) /* 2^251 - 2^1 */
square(&t, &t) /* 2^252 - 2^2 */
square(&t, &t) /* 2^253 - 2^3 */
square(&t, &t) /* 2^254 - 2^4 */
square(&t, &t) /* 2^255 - 2^5 */
mul(r, &t, &z11) /* 2^255 - 21 */
}

167
vendor/golang.org/x/crypto/curve25519/mul_amd64.s generated vendored Normal file
View file

@ -0,0 +1,167 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// func mul(dest, a, b *[5]uint64)
TEXT ·mul(SB),0,$16-24
MOVQ dest+0(FP), DI
MOVQ a+8(FP), SI
MOVQ b+16(FP), DX
MOVQ DX,CX
MOVQ 24(SI),DX
IMUL3Q $19,DX,AX
MOVQ AX,0(SP)
MULQ 16(CX)
MOVQ AX,R8
MOVQ DX,R9
MOVQ 32(SI),DX
IMUL3Q $19,DX,AX
MOVQ AX,8(SP)
MULQ 8(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 0(SI),AX
MULQ 0(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 0(SI),AX
MULQ 8(CX)
MOVQ AX,R10
MOVQ DX,R11
MOVQ 0(SI),AX
MULQ 16(CX)
MOVQ AX,R12
MOVQ DX,R13
MOVQ 0(SI),AX
MULQ 24(CX)
MOVQ AX,R14
MOVQ DX,R15
MOVQ 0(SI),AX
MULQ 32(CX)
MOVQ AX,BX
MOVQ DX,BP
MOVQ 8(SI),AX
MULQ 0(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 8(SI),AX
MULQ 8(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 8(SI),AX
MULQ 16(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 8(SI),AX
MULQ 24(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 8(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 16(SI),AX
MULQ 0(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 16(SI),AX
MULQ 8(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 16(SI),AX
MULQ 16(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 16(SI),DX
IMUL3Q $19,DX,AX
MULQ 24(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 16(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 24(SI),AX
MULQ 0(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 24(SI),AX
MULQ 8(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 0(SP),AX
MULQ 24(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 0(SP),AX
MULQ 32(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 32(SI),AX
MULQ 0(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 8(SP),AX
MULQ 16(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 8(SP),AX
MULQ 24(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 8(SP),AX
MULQ 32(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ ·REDMASK51(SB),SI
SHLQ $13,R9:R8
ANDQ SI,R8
SHLQ $13,R11:R10
ANDQ SI,R10
ADDQ R9,R10
SHLQ $13,R13:R12
ANDQ SI,R12
ADDQ R11,R12
SHLQ $13,R15:R14
ANDQ SI,R14
ADDQ R13,R14
SHLQ $13,BP:BX
ANDQ SI,BX
ADDQ R15,BX
IMUL3Q $19,BP,DX
ADDQ DX,R8
MOVQ R8,DX
SHRQ $51,DX
ADDQ R10,DX
MOVQ DX,CX
SHRQ $51,DX
ANDQ SI,R8
ADDQ R12,DX
MOVQ DX,R9
SHRQ $51,DX
ANDQ SI,CX
ADDQ R14,DX
MOVQ DX,AX
SHRQ $51,DX
ANDQ SI,R9
ADDQ BX,DX
MOVQ DX,R10
SHRQ $51,DX
ANDQ SI,AX
IMUL3Q $19,DX,DX
ADDQ DX,R8
ANDQ SI,R10
MOVQ R8,0(DI)
MOVQ CX,8(DI)
MOVQ R9,16(DI)
MOVQ AX,24(DI)
MOVQ R10,32(DI)
RET

130
vendor/golang.org/x/crypto/curve25519/square_amd64.s generated vendored Normal file
View file

@ -0,0 +1,130 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// func square(out, in *[5]uint64)
TEXT ·square(SB),7,$0-16
MOVQ out+0(FP), DI
MOVQ in+8(FP), SI
MOVQ 0(SI),AX
MULQ 0(SI)
MOVQ AX,CX
MOVQ DX,R8
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 8(SI)
MOVQ AX,R9
MOVQ DX,R10
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 16(SI)
MOVQ AX,R11
MOVQ DX,R12
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 24(SI)
MOVQ AX,R13
MOVQ DX,R14
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 32(SI)
MOVQ AX,R15
MOVQ DX,BX
MOVQ 8(SI),AX
MULQ 8(SI)
ADDQ AX,R11
ADCQ DX,R12
MOVQ 8(SI),AX
SHLQ $1,AX
MULQ 16(SI)
ADDQ AX,R13
ADCQ DX,R14
MOVQ 8(SI),AX
SHLQ $1,AX
MULQ 24(SI)
ADDQ AX,R15
ADCQ DX,BX
MOVQ 8(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,CX
ADCQ DX,R8
MOVQ 16(SI),AX
MULQ 16(SI)
ADDQ AX,R15
ADCQ DX,BX
MOVQ 16(SI),DX
IMUL3Q $38,DX,AX
MULQ 24(SI)
ADDQ AX,CX
ADCQ DX,R8
MOVQ 16(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,R9
ADCQ DX,R10
MOVQ 24(SI),DX
IMUL3Q $19,DX,AX
MULQ 24(SI)
ADDQ AX,R9
ADCQ DX,R10
MOVQ 24(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,R11
ADCQ DX,R12
MOVQ 32(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(SI)
ADDQ AX,R13
ADCQ DX,R14
MOVQ ·REDMASK51(SB),SI
SHLQ $13,R8:CX
ANDQ SI,CX
SHLQ $13,R10:R9
ANDQ SI,R9
ADDQ R8,R9
SHLQ $13,R12:R11
ANDQ SI,R11
ADDQ R10,R11
SHLQ $13,R14:R13
ANDQ SI,R13
ADDQ R12,R13
SHLQ $13,BX:R15
ANDQ SI,R15
ADDQ R14,R15
IMUL3Q $19,BX,DX
ADDQ DX,CX
MOVQ CX,DX
SHRQ $51,DX
ADDQ R9,DX
ANDQ SI,CX
MOVQ DX,R8
SHRQ $51,DX
ADDQ R11,DX
ANDQ SI,R8
MOVQ DX,R9
SHRQ $51,DX
ADDQ R13,DX
ANDQ SI,R9
MOVQ DX,AX
SHRQ $51,DX
ADDQ R15,DX
ANDQ SI,AX
MOVQ DX,R10
SHRQ $51,DX
IMUL3Q $19,DX,DX
ADDQ DX,CX
ANDQ SI,R10
MOVQ CX,0(DI)
MOVQ R8,8(DI)
MOVQ R9,16(DI)
MOVQ AX,24(DI)
MOVQ R10,32(DI)
RET

181
vendor/golang.org/x/crypto/ed25519/ed25519.go generated vendored Normal file
View file

@ -0,0 +1,181 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// http://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
import (
"crypto"
cryptorand "crypto/rand"
"crypto/sha512"
"crypto/subtle"
"errors"
"io"
"strconv"
"golang.org/x/crypto/ed25519/internal/edwards25519"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
)
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
type PrivateKey []byte
// Public returns the PublicKey corresponding to priv.
func (priv PrivateKey) Public() crypto.PublicKey {
publicKey := make([]byte, PublicKeySize)
copy(publicKey, priv[32:])
return PublicKey(publicKey)
}
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
// indicate the message hasn't been hashed. This can be achieved by passing
// crypto.Hash(0) as the value for opts.
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("ed25519: cannot sign hashed message")
}
return Sign(priv, message), nil
}
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
if rand == nil {
rand = cryptorand.Reader
}
privateKey = make([]byte, PrivateKeySize)
publicKey = make([]byte, PublicKeySize)
_, err = io.ReadFull(rand, privateKey[:32])
if err != nil {
return nil, nil, err
}
digest := sha512.Sum512(privateKey[:32])
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest[:])
edwards25519.GeScalarMultBase(&A, &hBytes)
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
copy(privateKey[32:], publicKeyBytes[:])
copy(publicKey, publicKeyBytes[:])
return publicKey, privateKey, nil
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
if l := len(privateKey); l != PrivateKeySize {
panic("ed25519: bad private key length: " + strconv.Itoa(l))
}
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := make([]byte, SignatureSize)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
if l := len(publicKey); l != PublicKeySize {
panic("ed25519: bad public key length: " + strconv.Itoa(l))
}
if len(sig) != SignatureSize || sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
var publicKeyBytes [32]byte
copy(publicKeyBytes[:], publicKey)
if !A.FromBytes(&publicKeyBytes) {
return false
}
edwards25519.FeNeg(&A.X, &A.X)
edwards25519.FeNeg(&A.T, &A.T)
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var b [32]byte
copy(b[:], sig[32:])
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
var checkR [32]byte
R.ToBytes(&checkR)
return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

98
vendor/golang.org/x/crypto/ssh/buffer.go generated vendored Normal file
View file

@ -0,0 +1,98 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"io"
"sync"
)
// buffer provides a linked list buffer for data exchange
// between producer and consumer. Theoretically the buffer is
// of unlimited capacity as it does no allocation of its own.
type buffer struct {
// protects concurrent access to head, tail and closed
*sync.Cond
head *element // the buffer that will be read first
tail *element // the buffer that will be read last
closed bool
}
// An element represents a single link in a linked list.
type element struct {
buf []byte
next *element
}
// newBuffer returns an empty buffer that is not closed.
func newBuffer() *buffer {
e := new(element)
b := &buffer{
Cond: newCond(),
head: e,
tail: e,
}
return b
}
// write makes buf available for Read to receive.
// buf must not be modified after the call to write.
func (b *buffer) write(buf []byte) {
b.Cond.L.Lock()
e := &element{buf: buf}
b.tail.next = e
b.tail = e
b.Cond.Signal()
b.Cond.L.Unlock()
}
// eof closes the buffer. Reads from the buffer once all
// the data has been consumed will receive os.EOF.
func (b *buffer) eof() error {
b.Cond.L.Lock()
b.closed = true
b.Cond.Signal()
b.Cond.L.Unlock()
return nil
}
// Read reads data from the internal buffer in buf. Reads will block
// if no data is available, or until the buffer is closed.
func (b *buffer) Read(buf []byte) (n int, err error) {
b.Cond.L.Lock()
defer b.Cond.L.Unlock()
for len(buf) > 0 {
// if there is data in b.head, copy it
if len(b.head.buf) > 0 {
r := copy(buf, b.head.buf)
buf, b.head.buf = buf[r:], b.head.buf[r:]
n += r
continue
}
// if there is a next buffer, make it the head
if len(b.head.buf) == 0 && b.head != b.tail {
b.head = b.head.next
continue
}
// if at least one byte has been copied, return
if n > 0 {
break
}
// if nothing was read, and there is nothing outstanding
// check to see if the buffer is closed.
if b.closed {
err = io.EOF
break
}
// out of buffers, wait for producer
b.Cond.Wait()
}
return
}

503
vendor/golang.org/x/crypto/ssh/certs.go generated vendored Normal file
View file

@ -0,0 +1,503 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"io"
"net"
"sort"
"time"
)
// These constants from [PROTOCOL.certkeys] represent the algorithm names
// for certificate types supported by this package.
const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
)
// Certificate types distinguish between host and user
// certificates. The values can be set in the CertType field of
// Certificate.
const (
UserCert = 1
HostCert = 2
)
// Signature represents a cryptographic signature.
type Signature struct {
Format string
Blob []byte
}
// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
// a certificate does not expire.
const CertTimeInfinity = 1<<64 - 1
// An Certificate represents an OpenSSH certificate as defined in
// [PROTOCOL.certkeys]?rev=1.8.
type Certificate struct {
Nonce []byte
Key PublicKey
Serial uint64
CertType uint32
KeyId string
ValidPrincipals []string
ValidAfter uint64
ValidBefore uint64
Permissions
Reserved []byte
SignatureKey PublicKey
Signature *Signature
}
// genericCertData holds the key-independent part of the certificate data.
// Overall, certificates contain an nonce, public key fields and
// key-independent fields.
type genericCertData struct {
Serial uint64
CertType uint32
KeyId string
ValidPrincipals []byte
ValidAfter uint64
ValidBefore uint64
CriticalOptions []byte
Extensions []byte
Reserved []byte
SignatureKey []byte
Signature []byte
}
func marshalStringList(namelist []string) []byte {
var to []byte
for _, name := range namelist {
s := struct{ N string }{name}
to = append(to, Marshal(&s)...)
}
return to
}
type optionsTuple struct {
Key string
Value []byte
}
type optionsTupleValue struct {
Value string
}
// serialize a map of critical options or extensions
// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
// we need two length prefixes for a non-empty string value
func marshalTuples(tups map[string]string) []byte {
keys := make([]string, 0, len(tups))
for key := range tups {
keys = append(keys, key)
}
sort.Strings(keys)
var ret []byte
for _, key := range keys {
s := optionsTuple{Key: key}
if value := tups[key]; len(value) > 0 {
s.Value = Marshal(&optionsTupleValue{value})
}
ret = append(ret, Marshal(&s)...)
}
return ret
}
// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
// we need two length prefixes for a non-empty option value
func parseTuples(in []byte) (map[string]string, error) {
tups := map[string]string{}
var lastKey string
var haveLastKey bool
for len(in) > 0 {
var key, val, extra []byte
var ok bool
if key, in, ok = parseString(in); !ok {
return nil, errShortRead
}
keyStr := string(key)
// according to [PROTOCOL.certkeys], the names must be in
// lexical order.
if haveLastKey && keyStr <= lastKey {
return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
}
lastKey, haveLastKey = keyStr, true
// the next field is a data field, which if non-empty has a string embedded
if val, in, ok = parseString(in); !ok {
return nil, errShortRead
}
if len(val) > 0 {
val, extra, ok = parseString(val)
if !ok {
return nil, errShortRead
}
if len(extra) > 0 {
return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
}
tups[keyStr] = string(val)
} else {
tups[keyStr] = ""
}
}
return tups, nil
}
func parseCert(in []byte, privAlgo string) (*Certificate, error) {
nonce, rest, ok := parseString(in)
if !ok {
return nil, errShortRead
}
key, rest, err := parsePubKey(rest, privAlgo)
if err != nil {
return nil, err
}
var g genericCertData
if err := Unmarshal(rest, &g); err != nil {
return nil, err
}
c := &Certificate{
Nonce: nonce,
Key: key,
Serial: g.Serial,
CertType: g.CertType,
KeyId: g.KeyId,
ValidAfter: g.ValidAfter,
ValidBefore: g.ValidBefore,
}
for principals := g.ValidPrincipals; len(principals) > 0; {
principal, rest, ok := parseString(principals)
if !ok {
return nil, errShortRead
}
c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
principals = rest
}
c.CriticalOptions, err = parseTuples(g.CriticalOptions)
if err != nil {
return nil, err
}
c.Extensions, err = parseTuples(g.Extensions)
if err != nil {
return nil, err
}
c.Reserved = g.Reserved
k, err := ParsePublicKey(g.SignatureKey)
if err != nil {
return nil, err
}
c.SignatureKey = k
c.Signature, rest, ok = parseSignatureBody(g.Signature)
if !ok || len(rest) > 0 {
return nil, errors.New("ssh: signature parse error")
}
return c, nil
}
type openSSHCertSigner struct {
pub *Certificate
signer Signer
}
// NewCertSigner returns a Signer that signs with the given Certificate, whose
// private key is held by signer. It returns an error if the public key in cert
// doesn't match the key used by signer.
func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
return nil, errors.New("ssh: signer and cert have different public key")
}
return &openSSHCertSigner{cert, signer}, nil
}
func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
return s.signer.Sign(rand, data)
}
func (s *openSSHCertSigner) PublicKey() PublicKey {
return s.pub
}
const sourceAddressCriticalOption = "source-address"
// CertChecker does the work of verifying a certificate. Its methods
// can be plugged into ClientConfig.HostKeyCallback and
// ServerConfig.PublicKeyCallback. For the CertChecker to work,
// minimally, the IsAuthority callback should be set.
type CertChecker struct {
// SupportedCriticalOptions lists the CriticalOptions that the
// server application layer understands. These are only used
// for user certificates.
SupportedCriticalOptions []string
// IsAuthority should return true if the key is recognized as
// an authority. This allows for certificates to be signed by other
// certificates.
IsAuthority func(auth PublicKey) bool
// Clock is used for verifying time stamps. If nil, time.Now
// is used.
Clock func() time.Time
// UserKeyFallback is called when CertChecker.Authenticate encounters a
// public key that is not a certificate. It must implement validation
// of user keys or else, if nil, all such keys are rejected.
UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
// HostKeyFallback is called when CertChecker.CheckHostKey encounters a
// public key that is not a certificate. It must implement host key
// validation or else, if nil, all such keys are rejected.
HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error
// IsRevoked is called for each certificate so that revocation checking
// can be implemented. It should return true if the given certificate
// is revoked and false otherwise. If nil, no certificates are
// considered to have been revoked.
IsRevoked func(cert *Certificate) bool
}
// CheckHostKey checks a host key certificate. This method can be
// plugged into ClientConfig.HostKeyCallback.
func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
cert, ok := key.(*Certificate)
if !ok {
if c.HostKeyFallback != nil {
return c.HostKeyFallback(addr, remote, key)
}
return errors.New("ssh: non-certificate host key")
}
if cert.CertType != HostCert {
return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
}
return c.CheckCert(addr, cert)
}
// Authenticate checks a user certificate. Authenticate can be used as
// a value for ServerConfig.PublicKeyCallback.
func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
cert, ok := pubKey.(*Certificate)
if !ok {
if c.UserKeyFallback != nil {
return c.UserKeyFallback(conn, pubKey)
}
return nil, errors.New("ssh: normal key pairs not accepted")
}
if cert.CertType != UserCert {
return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
}
if err := c.CheckCert(conn.User(), cert); err != nil {
return nil, err
}
return &cert.Permissions, nil
}
// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
// the signature of the certificate.
func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
if c.IsRevoked != nil && c.IsRevoked(cert) {
return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
}
for opt, _ := range cert.CriticalOptions {
// sourceAddressCriticalOption will be enforced by
// serverAuthenticate
if opt == sourceAddressCriticalOption {
continue
}
found := false
for _, supp := range c.SupportedCriticalOptions {
if supp == opt {
found = true
break
}
}
if !found {
return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
}
}
if len(cert.ValidPrincipals) > 0 {
// By default, certs are valid for all users/hosts.
found := false
for _, p := range cert.ValidPrincipals {
if p == principal {
found = true
break
}
}
if !found {
return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
}
}
if !c.IsAuthority(cert.SignatureKey) {
return fmt.Errorf("ssh: certificate signed by unrecognized authority")
}
clock := c.Clock
if clock == nil {
clock = time.Now
}
unixNow := clock().Unix()
if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
return fmt.Errorf("ssh: cert is not yet valid")
}
if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
return fmt.Errorf("ssh: cert has expired")
}
if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
return fmt.Errorf("ssh: certificate signature does not verify")
}
return nil
}
// SignCert sets c.SignatureKey to the authority's public key and stores a
// Signature, by authority, in the certificate.
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
c.Nonce = make([]byte, 32)
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
return err
}
c.SignatureKey = authority.PublicKey()
sig, err := authority.Sign(rand, c.bytesForSigning())
if err != nil {
return err
}
c.Signature = sig
return nil
}
var certAlgoNames = map[string]string{
KeyAlgoRSA: CertAlgoRSAv01,
KeyAlgoDSA: CertAlgoDSAv01,
KeyAlgoECDSA256: CertAlgoECDSA256v01,
KeyAlgoECDSA384: CertAlgoECDSA384v01,
KeyAlgoECDSA521: CertAlgoECDSA521v01,
KeyAlgoED25519: CertAlgoED25519v01,
}
// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
// Panics if a non-certificate algorithm is passed.
func certToPrivAlgo(algo string) string {
for privAlgo, pubAlgo := range certAlgoNames {
if pubAlgo == algo {
return privAlgo
}
}
panic("unknown cert algorithm")
}
func (cert *Certificate) bytesForSigning() []byte {
c2 := *cert
c2.Signature = nil
out := c2.Marshal()
// Drop trailing signature length.
return out[:len(out)-4]
}
// Marshal serializes c into OpenSSH's wire format. It is part of the
// PublicKey interface.
func (c *Certificate) Marshal() []byte {
generic := genericCertData{
Serial: c.Serial,
CertType: c.CertType,
KeyId: c.KeyId,
ValidPrincipals: marshalStringList(c.ValidPrincipals),
ValidAfter: uint64(c.ValidAfter),
ValidBefore: uint64(c.ValidBefore),
CriticalOptions: marshalTuples(c.CriticalOptions),
Extensions: marshalTuples(c.Extensions),
Reserved: c.Reserved,
SignatureKey: c.SignatureKey.Marshal(),
}
if c.Signature != nil {
generic.Signature = Marshal(c.Signature)
}
genericBytes := Marshal(&generic)
keyBytes := c.Key.Marshal()
_, keyBytes, _ = parseString(keyBytes)
prefix := Marshal(&struct {
Name string
Nonce []byte
Key []byte `ssh:"rest"`
}{c.Type(), c.Nonce, keyBytes})
result := make([]byte, 0, len(prefix)+len(genericBytes))
result = append(result, prefix...)
result = append(result, genericBytes...)
return result
}
// Type returns the key name. It is part of the PublicKey interface.
func (c *Certificate) Type() string {
algo, ok := certAlgoNames[c.Key.Type()]
if !ok {
panic("unknown cert key type " + c.Key.Type())
}
return algo
}
// Verify verifies a signature against the certificate's public
// key. It is part of the PublicKey interface.
func (c *Certificate) Verify(data []byte, sig *Signature) error {
return c.Key.Verify(data, sig)
}
func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
format, in, ok := parseString(in)
if !ok {
return
}
out = &Signature{
Format: string(format),
}
if out.Blob, in, ok = parseString(in); !ok {
return
}
return out, in, ok
}
func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
sigBytes, rest, ok := parseString(in)
if !ok {
return
}
out, trailing, ok := parseSignatureBody(sigBytes)
if !ok || len(trailing) > 0 {
return nil, nil, false
}
return
}

633
vendor/golang.org/x/crypto/ssh/channel.go generated vendored Normal file
View file

@ -0,0 +1,633 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"sync"
)
const (
minPacketLength = 9
// channelMaxPacket contains the maximum number of bytes that will be
// sent in a single packet. As per RFC 4253, section 6.1, 32k is also
// the minimum.
channelMaxPacket = 1 << 15
// We follow OpenSSH here.
channelWindowSize = 64 * channelMaxPacket
)
// NewChannel represents an incoming request to a channel. It must either be
// accepted for use by calling Accept, or rejected by calling Reject.
type NewChannel interface {
// Accept accepts the channel creation request. It returns the Channel
// and a Go channel containing SSH requests. The Go channel must be
// serviced otherwise the Channel will hang.
Accept() (Channel, <-chan *Request, error)
// Reject rejects the channel creation request. After calling
// this, no other methods on the Channel may be called.
Reject(reason RejectionReason, message string) error
// ChannelType returns the type of the channel, as supplied by the
// client.
ChannelType() string
// ExtraData returns the arbitrary payload for this channel, as supplied
// by the client. This data is specific to the channel type.
ExtraData() []byte
}
// A Channel is an ordered, reliable, flow-controlled, duplex stream
// that is multiplexed over an SSH connection.
type Channel interface {
// Read reads up to len(data) bytes from the channel.
Read(data []byte) (int, error)
// Write writes len(data) bytes to the channel.
Write(data []byte) (int, error)
// Close signals end of channel use. No data may be sent after this
// call.
Close() error
// CloseWrite signals the end of sending in-band
// data. Requests may still be sent, and the other side may
// still send data
CloseWrite() error
// SendRequest sends a channel request. If wantReply is true,
// it will wait for a reply and return the result as a
// boolean, otherwise the return value will be false. Channel
// requests are out-of-band messages so they may be sent even
// if the data stream is closed or blocked by flow control.
// If the channel is closed before a reply is returned, io.EOF
// is returned.
SendRequest(name string, wantReply bool, payload []byte) (bool, error)
// Stderr returns an io.ReadWriter that writes to this channel
// with the extended data type set to stderr. Stderr may
// safely be read and written from a different goroutine than
// Read and Write respectively.
Stderr() io.ReadWriter
}
// Request is a request sent outside of the normal stream of
// data. Requests can either be specific to an SSH channel, or they
// can be global.
type Request struct {
Type string
WantReply bool
Payload []byte
ch *channel
mux *mux
}
// Reply sends a response to a request. It must be called for all requests
// where WantReply is true and is a no-op otherwise. The payload argument is
// ignored for replies to channel-specific requests.
func (r *Request) Reply(ok bool, payload []byte) error {
if !r.WantReply {
return nil
}
if r.ch == nil {
return r.mux.ackRequest(ok, payload)
}
return r.ch.ackRequest(ok)
}
// RejectionReason is an enumeration used when rejecting channel creation
// requests. See RFC 4254, section 5.1.
type RejectionReason uint32
const (
Prohibited RejectionReason = iota + 1
ConnectionFailed
UnknownChannelType
ResourceShortage
)
// String converts the rejection reason to human readable form.
func (r RejectionReason) String() string {
switch r {
case Prohibited:
return "administratively prohibited"
case ConnectionFailed:
return "connect failed"
case UnknownChannelType:
return "unknown channel type"
case ResourceShortage:
return "resource shortage"
}
return fmt.Sprintf("unknown reason %d", int(r))
}
func min(a uint32, b int) uint32 {
if a < uint32(b) {
return a
}
return uint32(b)
}
type channelDirection uint8
const (
channelInbound channelDirection = iota
channelOutbound
)
// channel is an implementation of the Channel interface that works
// with the mux class.
type channel struct {
// R/O after creation
chanType string
extraData []byte
localId, remoteId uint32
// maxIncomingPayload and maxRemotePayload are the maximum
// payload sizes of normal and extended data packets for
// receiving and sending, respectively. The wire packet will
// be 9 or 13 bytes larger (excluding encryption overhead).
maxIncomingPayload uint32
maxRemotePayload uint32
mux *mux
// decided is set to true if an accept or reject message has been sent
// (for outbound channels) or received (for inbound channels).
decided bool
// direction contains either channelOutbound, for channels created
// locally, or channelInbound, for channels created by the peer.
direction channelDirection
// Pending internal channel messages.
msg chan interface{}
// Since requests have no ID, there can be only one request
// with WantReply=true outstanding. This lock is held by a
// goroutine that has such an outgoing request pending.
sentRequestMu sync.Mutex
incomingRequests chan *Request
sentEOF bool
// thread-safe data
remoteWin window
pending *buffer
extPending *buffer
// windowMu protects myWindow, the flow-control window.
windowMu sync.Mutex
myWindow uint32
// writeMu serializes calls to mux.conn.writePacket() and
// protects sentClose and packetPool. This mutex must be
// different from windowMu, as writePacket can block if there
// is a key exchange pending.
writeMu sync.Mutex
sentClose bool
// packetPool has a buffer for each extended channel ID to
// save allocations during writes.
packetPool map[uint32][]byte
}
// writePacket sends a packet. If the packet is a channel close, it updates
// sentClose. This method takes the lock c.writeMu.
func (c *channel) writePacket(packet []byte) error {
c.writeMu.Lock()
if c.sentClose {
c.writeMu.Unlock()
return io.EOF
}
c.sentClose = (packet[0] == msgChannelClose)
err := c.mux.conn.writePacket(packet)
c.writeMu.Unlock()
return err
}
func (c *channel) sendMessage(msg interface{}) error {
if debugMux {
log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
}
p := Marshal(msg)
binary.BigEndian.PutUint32(p[1:], c.remoteId)
return c.writePacket(p)
}
// WriteExtended writes data to a specific extended stream. These streams are
// used, for example, for stderr.
func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
if c.sentEOF {
return 0, io.EOF
}
// 1 byte message type, 4 bytes remoteId, 4 bytes data length
opCode := byte(msgChannelData)
headerLength := uint32(9)
if extendedCode > 0 {
headerLength += 4
opCode = msgChannelExtendedData
}
c.writeMu.Lock()
packet := c.packetPool[extendedCode]
// We don't remove the buffer from packetPool, so
// WriteExtended calls from different goroutines will be
// flagged as errors by the race detector.
c.writeMu.Unlock()
for len(data) > 0 {
space := min(c.maxRemotePayload, len(data))
if space, err = c.remoteWin.reserve(space); err != nil {
return n, err
}
if want := headerLength + space; uint32(cap(packet)) < want {
packet = make([]byte, want)
} else {
packet = packet[:want]
}
todo := data[:space]
packet[0] = opCode
binary.BigEndian.PutUint32(packet[1:], c.remoteId)
if extendedCode > 0 {
binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
}
binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
copy(packet[headerLength:], todo)
if err = c.writePacket(packet); err != nil {
return n, err
}
n += len(todo)
data = data[len(todo):]
}
c.writeMu.Lock()
c.packetPool[extendedCode] = packet
c.writeMu.Unlock()
return n, err
}
func (c *channel) handleData(packet []byte) error {
headerLen := 9
isExtendedData := packet[0] == msgChannelExtendedData
if isExtendedData {
headerLen = 13
}
if len(packet) < headerLen {
// malformed data packet
return parseError(packet[0])
}
var extended uint32
if isExtendedData {
extended = binary.BigEndian.Uint32(packet[5:])
}
length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
if length == 0 {
return nil
}
if length > c.maxIncomingPayload {
// TODO(hanwen): should send Disconnect?
return errors.New("ssh: incoming packet exceeds maximum payload size")
}
data := packet[headerLen:]
if length != uint32(len(data)) {
return errors.New("ssh: wrong packet length")
}
c.windowMu.Lock()
if c.myWindow < length {
c.windowMu.Unlock()
// TODO(hanwen): should send Disconnect with reason?
return errors.New("ssh: remote side wrote too much")
}
c.myWindow -= length
c.windowMu.Unlock()
if extended == 1 {
c.extPending.write(data)
} else if extended > 0 {
// discard other extended data.
} else {
c.pending.write(data)
}
return nil
}
func (c *channel) adjustWindow(n uint32) error {
c.windowMu.Lock()
// Since myWindow is managed on our side, and can never exceed
// the initial window setting, we don't worry about overflow.
c.myWindow += uint32(n)
c.windowMu.Unlock()
return c.sendMessage(windowAdjustMsg{
AdditionalBytes: uint32(n),
})
}
func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
switch extended {
case 1:
n, err = c.extPending.Read(data)
case 0:
n, err = c.pending.Read(data)
default:
return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
}
if n > 0 {
err = c.adjustWindow(uint32(n))
// sendWindowAdjust can return io.EOF if the remote
// peer has closed the connection, however we want to
// defer forwarding io.EOF to the caller of Read until
// the buffer has been drained.
if n > 0 && err == io.EOF {
err = nil
}
}
return n, err
}
func (c *channel) close() {
c.pending.eof()
c.extPending.eof()
close(c.msg)
close(c.incomingRequests)
c.writeMu.Lock()
// This is not necessary for a normal channel teardown, but if
// there was another error, it is.
c.sentClose = true
c.writeMu.Unlock()
// Unblock writers.
c.remoteWin.close()
}
// responseMessageReceived is called when a success or failure message is
// received on a channel to check that such a message is reasonable for the
// given channel.
func (c *channel) responseMessageReceived() error {
if c.direction == channelInbound {
return errors.New("ssh: channel response message received on inbound channel")
}
if c.decided {
return errors.New("ssh: duplicate response received for channel")
}
c.decided = true
return nil
}
func (c *channel) handlePacket(packet []byte) error {
switch packet[0] {
case msgChannelData, msgChannelExtendedData:
return c.handleData(packet)
case msgChannelClose:
c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
c.mux.chanList.remove(c.localId)
c.close()
return nil
case msgChannelEOF:
// RFC 4254 is mute on how EOF affects dataExt messages but
// it is logical to signal EOF at the same time.
c.extPending.eof()
c.pending.eof()
return nil
}
decoded, err := decode(packet)
if err != nil {
return err
}
switch msg := decoded.(type) {
case *channelOpenFailureMsg:
if err := c.responseMessageReceived(); err != nil {
return err
}
c.mux.chanList.remove(msg.PeersId)
c.msg <- msg
case *channelOpenConfirmMsg:
if err := c.responseMessageReceived(); err != nil {
return err
}
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
}
c.remoteId = msg.MyId
c.maxRemotePayload = msg.MaxPacketSize
c.remoteWin.add(msg.MyWindow)
c.msg <- msg
case *windowAdjustMsg:
if !c.remoteWin.add(msg.AdditionalBytes) {
return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
}
case *channelRequestMsg:
req := Request{
Type: msg.Request,
WantReply: msg.WantReply,
Payload: msg.RequestSpecificData,
ch: c,
}
c.incomingRequests <- &req
default:
c.msg <- msg
}
return nil
}
func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
ch := &channel{
remoteWin: window{Cond: newCond()},
myWindow: channelWindowSize,
pending: newBuffer(),
extPending: newBuffer(),
direction: direction,
incomingRequests: make(chan *Request, 16),
msg: make(chan interface{}, 16),
chanType: chanType,
extraData: extraData,
mux: m,
packetPool: make(map[uint32][]byte),
}
ch.localId = m.chanList.add(ch)
return ch
}
var errUndecided = errors.New("ssh: must Accept or Reject channel")
var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
type extChannel struct {
code uint32
ch *channel
}
func (e *extChannel) Write(data []byte) (n int, err error) {
return e.ch.WriteExtended(data, e.code)
}
func (e *extChannel) Read(data []byte) (n int, err error) {
return e.ch.ReadExtended(data, e.code)
}
func (c *channel) Accept() (Channel, <-chan *Request, error) {
if c.decided {
return nil, nil, errDecidedAlready
}
c.maxIncomingPayload = channelMaxPacket
confirm := channelOpenConfirmMsg{
PeersId: c.remoteId,
MyId: c.localId,
MyWindow: c.myWindow,
MaxPacketSize: c.maxIncomingPayload,
}
c.decided = true
if err := c.sendMessage(confirm); err != nil {
return nil, nil, err
}
return c, c.incomingRequests, nil
}
func (ch *channel) Reject(reason RejectionReason, message string) error {
if ch.decided {
return errDecidedAlready
}
reject := channelOpenFailureMsg{
PeersId: ch.remoteId,
Reason: reason,
Message: message,
Language: "en",
}
ch.decided = true
return ch.sendMessage(reject)
}
func (ch *channel) Read(data []byte) (int, error) {
if !ch.decided {
return 0, errUndecided
}
return ch.ReadExtended(data, 0)
}
func (ch *channel) Write(data []byte) (int, error) {
if !ch.decided {
return 0, errUndecided
}
return ch.WriteExtended(data, 0)
}
func (ch *channel) CloseWrite() error {
if !ch.decided {
return errUndecided
}
ch.sentEOF = true
return ch.sendMessage(channelEOFMsg{
PeersId: ch.remoteId})
}
func (ch *channel) Close() error {
if !ch.decided {
return errUndecided
}
return ch.sendMessage(channelCloseMsg{
PeersId: ch.remoteId})
}
// Extended returns an io.ReadWriter that sends and receives data on the given,
// SSH extended stream. Such streams are used, for example, for stderr.
func (ch *channel) Extended(code uint32) io.ReadWriter {
if !ch.decided {
return nil
}
return &extChannel{code, ch}
}
func (ch *channel) Stderr() io.ReadWriter {
return ch.Extended(1)
}
func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
if !ch.decided {
return false, errUndecided
}
if wantReply {
ch.sentRequestMu.Lock()
defer ch.sentRequestMu.Unlock()
}
msg := channelRequestMsg{
PeersId: ch.remoteId,
Request: name,
WantReply: wantReply,
RequestSpecificData: payload,
}
if err := ch.sendMessage(msg); err != nil {
return false, err
}
if wantReply {
m, ok := (<-ch.msg)
if !ok {
return false, io.EOF
}
switch m.(type) {
case *channelRequestFailureMsg:
return false, nil
case *channelRequestSuccessMsg:
return true, nil
default:
return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
}
}
return false, nil
}
// ackRequest either sends an ack or nack to the channel request.
func (ch *channel) ackRequest(ok bool) error {
if !ch.decided {
return errUndecided
}
var msg interface{}
if !ok {
msg = channelRequestFailureMsg{
PeersId: ch.remoteId,
}
} else {
msg = channelRequestSuccessMsg{
PeersId: ch.remoteId,
}
}
return ch.sendMessage(msg)
}
func (ch *channel) ChannelType() string {
return ch.chanType
}
func (ch *channel) ExtraData() []byte {
return ch.extraData
}

579
vendor/golang.org/x/crypto/ssh/cipher.go generated vendored Normal file
View file

@ -0,0 +1,579 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/rc4"
"crypto/subtle"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
)
const (
packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
// RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
// MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
// indicates implementations SHOULD be able to handle larger packet sizes, but then
// waffles on about reasonable limits.
//
// OpenSSH caps their maxPacket at 256kB so we choose to do
// the same. maxPacket is also used to ensure that uint32
// length fields do not overflow, so it should remain well
// below 4G.
maxPacket = 256 * 1024
)
// noneCipher implements cipher.Stream and provides no encryption. It is used
// by the transport before the first key-exchange.
type noneCipher struct{}
func (c noneCipher) XORKeyStream(dst, src []byte) {
copy(dst, src)
}
func newAESCTR(key, iv []byte) (cipher.Stream, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return cipher.NewCTR(c, iv), nil
}
func newRC4(key, iv []byte) (cipher.Stream, error) {
return rc4.NewCipher(key)
}
type streamCipherMode struct {
keySize int
ivSize int
skip int
createFunc func(key, iv []byte) (cipher.Stream, error)
}
func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
if len(key) < c.keySize {
panic("ssh: key length too small for cipher")
}
if len(iv) < c.ivSize {
panic("ssh: iv too small for cipher")
}
stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
if err != nil {
return nil, err
}
var streamDump []byte
if c.skip > 0 {
streamDump = make([]byte, 512)
}
for remainingToDump := c.skip; remainingToDump > 0; {
dumpThisTime := remainingToDump
if dumpThisTime > len(streamDump) {
dumpThisTime = len(streamDump)
}
stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
remainingToDump -= dumpThisTime
}
return stream, nil
}
// cipherModes documents properties of supported ciphers. Ciphers not included
// are not supported and will not be negotiated, even if explicitly requested in
// ClientConfig.Crypto.Ciphers.
var cipherModes = map[string]*streamCipherMode{
// Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
// are defined in the order specified in the RFC.
"aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
"aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
"aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
// Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
// They are defined in the order specified in the RFC.
"arcfour128": {16, 0, 1536, newRC4},
"arcfour256": {32, 0, 1536, newRC4},
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
// RC4) has problems with weak keys, and should be used with caution."
// RFC4345 introduces improved versions of Arcfour.
"arcfour": {16, 0, 0, newRC4},
// AES-GCM is not a stream cipher, so it is constructed with a
// special case. If we add any more non-stream ciphers, we
// should invest a cleaner way to do this.
gcmCipherID: {16, 12, 0, nil},
// CBC mode is insecure and so is not included in the default config.
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
aes128cbcID: {16, aes.BlockSize, 0, nil},
// 3des-cbc is insecure and is disabled by default.
tripledescbcID: {24, des.BlockSize, 0, nil},
}
// prefixLen is the length of the packet prefix that contains the packet length
// and number of padding bytes.
const prefixLen = 5
// streamPacketCipher is a packetCipher using a stream cipher.
type streamPacketCipher struct {
mac hash.Hash
cipher cipher.Stream
// The following members are to avoid per-packet allocations.
prefix [prefixLen]byte
seqNumBytes [4]byte
padding [2 * packetSizeMultiple]byte
packetData []byte
macResult []byte
}
// readPacket reads and decrypt a single packet from the reader argument.
func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
return nil, err
}
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
length := binary.BigEndian.Uint32(s.prefix[0:4])
paddingLength := uint32(s.prefix[4])
var macSize uint32
if s.mac != nil {
s.mac.Reset()
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
s.mac.Write(s.seqNumBytes[:])
s.mac.Write(s.prefix[:])
macSize = uint32(s.mac.Size())
}
if length <= paddingLength+1 {
return nil, errors.New("ssh: invalid packet length, packet too small")
}
if length > maxPacket {
return nil, errors.New("ssh: invalid packet length, packet too large")
}
// the maxPacket check above ensures that length-1+macSize
// does not overflow.
if uint32(cap(s.packetData)) < length-1+macSize {
s.packetData = make([]byte, length-1+macSize)
} else {
s.packetData = s.packetData[:length-1+macSize]
}
if _, err := io.ReadFull(r, s.packetData); err != nil {
return nil, err
}
mac := s.packetData[length-1:]
data := s.packetData[:length-1]
s.cipher.XORKeyStream(data, data)
if s.mac != nil {
s.mac.Write(data)
s.macResult = s.mac.Sum(s.macResult[:0])
if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
return nil, errors.New("ssh: MAC failure")
}
}
return s.packetData[:length-paddingLength-1], nil
}
// writePacket encrypts and sends a packet of data to the writer argument
func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
if len(packet) > maxPacket {
return errors.New("ssh: packet too large")
}
paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple
if paddingLength < 4 {
paddingLength += packetSizeMultiple
}
length := len(packet) + 1 + paddingLength
binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
s.prefix[4] = byte(paddingLength)
padding := s.padding[:paddingLength]
if _, err := io.ReadFull(rand, padding); err != nil {
return err
}
if s.mac != nil {
s.mac.Reset()
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
s.mac.Write(s.seqNumBytes[:])
s.mac.Write(s.prefix[:])
s.mac.Write(packet)
s.mac.Write(padding)
}
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
s.cipher.XORKeyStream(packet, packet)
s.cipher.XORKeyStream(padding, padding)
if _, err := w.Write(s.prefix[:]); err != nil {
return err
}
if _, err := w.Write(packet); err != nil {
return err
}
if _, err := w.Write(padding); err != nil {
return err
}
if s.mac != nil {
s.macResult = s.mac.Sum(s.macResult[:0])
if _, err := w.Write(s.macResult); err != nil {
return err
}
}
return nil
}
type gcmCipher struct {
aead cipher.AEAD
prefix [4]byte
iv []byte
buf []byte
}
func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
aead, err := cipher.NewGCM(c)
if err != nil {
return nil, err
}
return &gcmCipher{
aead: aead,
iv: iv,
}, nil
}
const gcmTagSize = 16
func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
// Pad out to multiple of 16 bytes. This is different from the
// stream cipher because that encrypts the length too.
padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
if padding < 4 {
padding += packetSizeMultiple
}
length := uint32(len(packet) + int(padding) + 1)
binary.BigEndian.PutUint32(c.prefix[:], length)
if _, err := w.Write(c.prefix[:]); err != nil {
return err
}
if cap(c.buf) < int(length) {
c.buf = make([]byte, length)
} else {
c.buf = c.buf[:length]
}
c.buf[0] = padding
copy(c.buf[1:], packet)
if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
return err
}
c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
if _, err := w.Write(c.buf); err != nil {
return err
}
c.incIV()
return nil
}
func (c *gcmCipher) incIV() {
for i := 4 + 7; i >= 4; i-- {
c.iv[i]++
if c.iv[i] != 0 {
break
}
}
}
func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
return nil, err
}
length := binary.BigEndian.Uint32(c.prefix[:])
if length > maxPacket {
return nil, errors.New("ssh: max packet length exceeded.")
}
if cap(c.buf) < int(length+gcmTagSize) {
c.buf = make([]byte, length+gcmTagSize)
} else {
c.buf = c.buf[:length+gcmTagSize]
}
if _, err := io.ReadFull(r, c.buf); err != nil {
return nil, err
}
plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
if err != nil {
return nil, err
}
c.incIV()
padding := plain[0]
if padding < 4 || padding >= 20 {
return nil, fmt.Errorf("ssh: illegal padding %d", padding)
}
if int(padding+1) >= len(plain) {
return nil, fmt.Errorf("ssh: padding %d too large", padding)
}
plain = plain[1 : length-uint32(padding)]
return plain, nil
}
// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
type cbcCipher struct {
mac hash.Hash
macSize uint32
decrypter cipher.BlockMode
encrypter cipher.BlockMode
// The following members are to avoid per-packet allocations.
seqNumBytes [4]byte
packetData []byte
macResult []byte
// Amount of data we should still read to hide which
// verification error triggered.
oracleCamouflage uint32
}
func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
cbc := &cbcCipher{
mac: macModes[algs.MAC].new(macKey),
decrypter: cipher.NewCBCDecrypter(c, iv),
encrypter: cipher.NewCBCEncrypter(c, iv),
packetData: make([]byte, 1024),
}
if cbc.mac != nil {
cbc.macSize = uint32(cbc.mac.Size())
}
return cbc, nil
}
func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
cbc, err := newCBCCipher(c, iv, key, macKey, algs)
if err != nil {
return nil, err
}
return cbc, nil
}
func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
cbc, err := newCBCCipher(c, iv, key, macKey, algs)
if err != nil {
return nil, err
}
return cbc, nil
}
func maxUInt32(a, b int) uint32 {
if a > b {
return uint32(a)
}
return uint32(b)
}
const (
cbcMinPacketSizeMultiple = 8
cbcMinPacketSize = 16
cbcMinPaddingSize = 4
)
// cbcError represents a verification error that may leak information.
type cbcError string
func (e cbcError) Error() string { return string(e) }
func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
p, err := c.readPacketLeaky(seqNum, r)
if err != nil {
if _, ok := err.(cbcError); ok {
// Verification error: read a fixed amount of
// data, to make distinguishing between
// failing MAC and failing length check more
// difficult.
io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
}
}
return p, err
}
func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
blockSize := c.decrypter.BlockSize()
// Read the header, which will include some of the subsequent data in the
// case of block ciphers - this is copied back to the payload later.
// How many bytes of payload/padding will be read with this first read.
firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
firstBlock := c.packetData[:firstBlockLength]
if _, err := io.ReadFull(r, firstBlock); err != nil {
return nil, err
}
c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
c.decrypter.CryptBlocks(firstBlock, firstBlock)
length := binary.BigEndian.Uint32(firstBlock[:4])
if length > maxPacket {
return nil, cbcError("ssh: packet too large")
}
if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
// The minimum size of a packet is 16 (or the cipher block size, whichever
// is larger) bytes.
return nil, cbcError("ssh: packet too small")
}
// The length of the packet (including the length field but not the MAC) must
// be a multiple of the block size or 8, whichever is larger.
if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
return nil, cbcError("ssh: invalid packet length multiple")
}
paddingLength := uint32(firstBlock[4])
if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
return nil, cbcError("ssh: invalid packet length")
}
// Positions within the c.packetData buffer:
macStart := 4 + length
paddingStart := macStart - paddingLength
// Entire packet size, starting before length, ending at end of mac.
entirePacketSize := macStart + c.macSize
// Ensure c.packetData is large enough for the entire packet data.
if uint32(cap(c.packetData)) < entirePacketSize {
// Still need to upsize and copy, but this should be rare at runtime, only
// on upsizing the packetData buffer.
c.packetData = make([]byte, entirePacketSize)
copy(c.packetData, firstBlock)
} else {
c.packetData = c.packetData[:entirePacketSize]
}
if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
return nil, err
} else {
c.oracleCamouflage -= uint32(n)
}
remainingCrypted := c.packetData[firstBlockLength:macStart]
c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
mac := c.packetData[macStart:]
if c.mac != nil {
c.mac.Reset()
binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
c.mac.Write(c.seqNumBytes[:])
c.mac.Write(c.packetData[:macStart])
c.macResult = c.mac.Sum(c.macResult[:0])
if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
return nil, cbcError("ssh: MAC failure")
}
}
return c.packetData[prefixLen:paddingStart], nil
}
func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
// Length of encrypted portion of the packet (header, payload, padding).
// Enforce minimum padding and packet size.
encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
// Enforce block size.
encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
length := encLength - 4
paddingLength := int(length) - (1 + len(packet))
// Overall buffer contains: header, payload, padding, mac.
// Space for the MAC is reserved in the capacity but not the slice length.
bufferSize := encLength + c.macSize
if uint32(cap(c.packetData)) < bufferSize {
c.packetData = make([]byte, encLength, bufferSize)
} else {
c.packetData = c.packetData[:encLength]
}
p := c.packetData
// Packet header.
binary.BigEndian.PutUint32(p, length)
p = p[4:]
p[0] = byte(paddingLength)
// Payload.
p = p[1:]
copy(p, packet)
// Padding.
p = p[len(packet):]
if _, err := io.ReadFull(rand, p); err != nil {
return err
}
if c.mac != nil {
c.mac.Reset()
binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
c.mac.Write(c.seqNumBytes[:])
c.mac.Write(c.packetData)
// The MAC is now appended into the capacity reserved for it earlier.
c.packetData = c.mac.Sum(c.packetData)
}
c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
if _, err := w.Write(c.packetData); err != nil {
return err
}
return nil
}

213
vendor/golang.org/x/crypto/ssh/client.go generated vendored Normal file
View file

@ -0,0 +1,213 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"errors"
"fmt"
"net"
"sync"
"time"
)
// Client implements a traditional SSH client that supports shells,
// subprocesses, port forwarding and tunneled dialing.
type Client struct {
Conn
forwards forwardList // forwarded tcpip connections from the remote side
mu sync.Mutex
channelHandlers map[string]chan NewChannel
}
// HandleChannelOpen returns a channel on which NewChannel requests
// for the given type are sent. If the type already is being handled,
// nil is returned. The channel is closed when the connection is closed.
func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
c.mu.Lock()
defer c.mu.Unlock()
if c.channelHandlers == nil {
// The SSH channel has been closed.
c := make(chan NewChannel)
close(c)
return c
}
ch := c.channelHandlers[channelType]
if ch != nil {
return nil
}
ch = make(chan NewChannel, 16)
c.channelHandlers[channelType] = ch
return ch
}
// NewClient creates a Client on top of the given connection.
func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
conn := &Client{
Conn: c,
channelHandlers: make(map[string]chan NewChannel, 1),
}
go conn.handleGlobalRequests(reqs)
go conn.handleChannelOpens(chans)
go func() {
conn.Wait()
conn.forwards.closeAll()
}()
go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
return conn
}
// NewClientConn establishes an authenticated SSH connection using c
// as the underlying transport. The Request and NewChannel channels
// must be serviced or the connection will hang.
func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
fullConf := *config
fullConf.SetDefaults()
conn := &connection{
sshConn: sshConn{conn: c},
}
if err := conn.clientHandshake(addr, &fullConf); err != nil {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
}
conn.mux = newMux(conn.transport)
return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
}
// clientHandshake performs the client side key exchange. See RFC 4253 Section
// 7.
func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
if config.ClientVersion != "" {
c.clientVersion = []byte(config.ClientVersion)
} else {
c.clientVersion = []byte(packageVersion)
}
var err error
c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
if err != nil {
return err
}
c.transport = newClientTransport(
newTransport(c.sshConn.conn, config.Rand, true /* is client */),
c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
if err := c.transport.requestInitialKeyChange(); err != nil {
return err
}
// We just did the key change, so the session ID is established.
c.sessionID = c.transport.getSessionID()
return c.clientAuthenticate(config)
}
// verifyHostKeySignature verifies the host key obtained in the key
// exchange.
func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
sig, rest, ok := parseSignatureBody(result.Signature)
if len(rest) > 0 || !ok {
return errors.New("ssh: signature parse error")
}
return hostKey.Verify(result.H, sig)
}
// NewSession opens a new Session for this client. (A session is a remote
// execution of a program.)
func (c *Client) NewSession() (*Session, error) {
ch, in, err := c.OpenChannel("session", nil)
if err != nil {
return nil, err
}
return newSession(ch, in)
}
func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
for r := range incoming {
// This handles keepalive messages and matches
// the behaviour of OpenSSH.
r.Reply(false, nil)
}
}
// handleChannelOpens channel open messages from the remote side.
func (c *Client) handleChannelOpens(in <-chan NewChannel) {
for ch := range in {
c.mu.Lock()
handler := c.channelHandlers[ch.ChannelType()]
c.mu.Unlock()
if handler != nil {
handler <- ch
} else {
ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
}
}
c.mu.Lock()
for _, ch := range c.channelHandlers {
close(ch)
}
c.channelHandlers = nil
c.mu.Unlock()
}
// Dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client. For access
// to incoming channels and requests, use net.Dial with NewClientConn
// instead.
func Dial(network, addr string, config *ClientConfig) (*Client, error) {
conn, err := net.DialTimeout(network, addr, config.Timeout)
if err != nil {
return nil, err
}
c, chans, reqs, err := NewClientConn(conn, addr, config)
if err != nil {
return nil, err
}
return NewClient(c, chans, reqs), nil
}
// A ClientConfig structure is used to configure a Client. It must not be
// modified after having been passed to an SSH function.
type ClientConfig struct {
// Config contains configuration that is shared between clients and
// servers.
Config
// User contains the username to authenticate as.
User string
// Auth contains possible authentication methods to use with the
// server. Only the first instance of a particular RFC 4252 method will
// be used during authentication.
Auth []AuthMethod
// HostKeyCallback, if not nil, is called during the cryptographic
// handshake to validate the server's host key. A nil HostKeyCallback
// implies that all host keys are accepted.
HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
// ClientVersion contains the version identification string that will
// be used for the connection. If empty, a reasonable default is used.
ClientVersion string
// HostKeyAlgorithms lists the key types that the client will
// accept from the server as host key, in order of
// preference. If empty, a reasonable default is used. Any
// string returned from PublicKey.Type method may be used, or
// any of the CertAlgoXxxx and KeyAlgoXxxx constants.
HostKeyAlgorithms []string
// Timeout is the maximum amount of time for the TCP connection to establish.
//
// A Timeout of zero means no timeout.
Timeout time.Duration
}

473
vendor/golang.org/x/crypto/ssh/client_auth.go generated vendored Normal file
View file

@ -0,0 +1,473 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"io"
)
// clientAuthenticate authenticates with the remote server. See RFC 4252.
func (c *connection) clientAuthenticate(config *ClientConfig) error {
// initiate user auth session
if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
return err
}
packet, err := c.transport.readPacket()
if err != nil {
return err
}
var serviceAccept serviceAcceptMsg
if err := Unmarshal(packet, &serviceAccept); err != nil {
return err
}
// during the authentication phase the client first attempts the "none" method
// then any untried methods suggested by the server.
tried := make(map[string]bool)
var lastMethods []string
for auth := AuthMethod(new(noneAuth)); auth != nil; {
ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand)
if err != nil {
return err
}
if ok {
// success
return nil
}
tried[auth.method()] = true
if methods == nil {
methods = lastMethods
}
lastMethods = methods
auth = nil
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
if tried[candidateMethod] {
continue
}
for _, meth := range methods {
if meth == candidateMethod {
auth = a
break findNext
}
}
}
}
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
}
func keys(m map[string]bool) []string {
s := make([]string, 0, len(m))
for key := range m {
s = append(s, key)
}
return s
}
// An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface {
// auth authenticates user over transport t.
// Returns true if authentication is successful.
// If authentication is not successful, a []string of alternative
// method names is returned. If the slice is nil, it will be ignored
// and the previous set of possible methods will be reused.
auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
// method returns the RFC 4252 method name.
method() string
}
// "none" authentication, RFC 4252 section 5.2.
type noneAuth int
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
if err := c.writePacket(Marshal(&userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: "none",
})); err != nil {
return false, nil, err
}
return handleAuthResponse(c)
}
func (n *noneAuth) method() string {
return "none"
}
// passwordCallback is an AuthMethod that fetches the password through
// a function call, e.g. by prompting the user.
type passwordCallback func() (password string, err error)
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
type passwordAuthMsg struct {
User string `sshtype:"50"`
Service string
Method string
Reply bool
Password string
}
pw, err := cb()
// REVIEW NOTE: is there a need to support skipping a password attempt?
// The program may only find out that the user doesn't have a password
// when prompting.
if err != nil {
return false, nil, err
}
if err := c.writePacket(Marshal(&passwordAuthMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
Reply: false,
Password: pw,
})); err != nil {
return false, nil, err
}
return handleAuthResponse(c)
}
func (cb passwordCallback) method() string {
return "password"
}
// Password returns an AuthMethod using the given password.
func Password(secret string) AuthMethod {
return passwordCallback(func() (string, error) { return secret, nil })
}
// PasswordCallback returns an AuthMethod that uses a callback for
// fetching a password.
func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
return passwordCallback(prompt)
}
type publickeyAuthMsg struct {
User string `sshtype:"50"`
Service string
Method string
// HasSig indicates to the receiver packet that the auth request is signed and
// should be used for authentication of the request.
HasSig bool
Algoname string
PubKey []byte
// Sig is tagged with "rest" so Marshal will exclude it during
// validateKey
Sig []byte `ssh:"rest"`
}
// publicKeyCallback is an AuthMethod that uses a set of key
// pairs for authentication.
type publicKeyCallback func() ([]Signer, error)
func (cb publicKeyCallback) method() string {
return "publickey"
}
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
// Authentication is performed in two stages. The first stage sends an
// enquiry to test if each key is acceptable to the remote. The second
// stage attempts to authenticate with the valid keys obtained in the
// first stage.
signers, err := cb()
if err != nil {
return false, nil, err
}
var validKeys []Signer
for _, signer := range signers {
if ok, err := validateKey(signer.PublicKey(), user, c); ok {
validKeys = append(validKeys, signer)
} else {
if err != nil {
return false, nil, err
}
}
}
// methods that may continue if this auth is not successful.
var methods []string
for _, signer := range validKeys {
pub := signer.PublicKey()
pubKey := pub.Marshal()
sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
}, []byte(pub.Type()), pubKey))
if err != nil {
return false, nil, err
}
// manually wrap the serialized signature in a string
s := Marshal(sign)
sig := make([]byte, stringLength(len(s)))
marshalString(sig, s)
msg := publickeyAuthMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
HasSig: true,
Algoname: pub.Type(),
PubKey: pubKey,
Sig: sig,
}
p := Marshal(&msg)
if err := c.writePacket(p); err != nil {
return false, nil, err
}
var success bool
success, methods, err = handleAuthResponse(c)
if err != nil {
return false, nil, err
}
if success {
return success, methods, err
}
}
return false, methods, nil
}
// validateKey validates the key provided is acceptable to the server.
func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
pubKey := key.Marshal()
msg := publickeyAuthMsg{
User: user,
Service: serviceSSH,
Method: "publickey",
HasSig: false,
Algoname: key.Type(),
PubKey: pubKey,
}
if err := c.writePacket(Marshal(&msg)); err != nil {
return false, err
}
return confirmKeyAck(key, c)
}
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
pubKey := key.Marshal()
algoname := key.Type()
for {
packet, err := c.readPacket()
if err != nil {
return false, err
}
switch packet[0] {
case msgUserAuthBanner:
// TODO(gpaul): add callback to present the banner to the user
case msgUserAuthPubKeyOk:
var msg userAuthPubKeyOkMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, err
}
if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
return false, nil
}
return true, nil
case msgUserAuthFailure:
return false, nil
default:
return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
}
}
}
// PublicKeys returns an AuthMethod that uses the given key
// pairs.
func PublicKeys(signers ...Signer) AuthMethod {
return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
}
// PublicKeysCallback returns an AuthMethod that runs the given
// function to obtain a list of key pairs.
func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
return publicKeyCallback(getSigners)
}
// handleAuthResponse returns whether the preceding authentication request succeeded
// along with a list of remaining authentication methods to try next and
// an error if an unexpected response was received.
func handleAuthResponse(c packetConn) (bool, []string, error) {
for {
packet, err := c.readPacket()
if err != nil {
return false, nil, err
}
switch packet[0] {
case msgUserAuthBanner:
// TODO: add callback to present the banner to the user
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, nil, err
}
return false, msg.Methods, nil
case msgUserAuthSuccess:
return true, nil, nil
default:
return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
}
}
}
// KeyboardInteractiveChallenge should print questions, optionally
// disabling echoing (e.g. for passwords), and return all the answers.
// Challenge may be called multiple times in a single session. After
// successful authentication, the server may send a challenge with no
// questions, for which the user and instruction messages should be
// printed. RFC 4256 section 3.3 details how the UI should behave for
// both CLI and GUI environments.
type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
// KeyboardInteractive returns a AuthMethod using a prompt/response
// sequence controlled by the server.
func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
return challenge
}
func (cb KeyboardInteractiveChallenge) method() string {
return "keyboard-interactive"
}
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
type initiateMsg struct {
User string `sshtype:"50"`
Service string
Method string
Language string
Submethods string
}
if err := c.writePacket(Marshal(&initiateMsg{
User: user,
Service: serviceSSH,
Method: "keyboard-interactive",
})); err != nil {
return false, nil, err
}
for {
packet, err := c.readPacket()
if err != nil {
return false, nil, err
}
// like handleAuthResponse, but with less options.
switch packet[0] {
case msgUserAuthBanner:
// TODO: Print banners during userauth.
continue
case msgUserAuthInfoRequest:
// OK
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, nil, err
}
return false, msg.Methods, nil
case msgUserAuthSuccess:
return true, nil, nil
default:
return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
}
var msg userAuthInfoRequestMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, nil, err
}
// Manually unpack the prompt/echo pairs.
rest := msg.Prompts
var prompts []string
var echos []bool
for i := 0; i < int(msg.NumPrompts); i++ {
prompt, r, ok := parseString(rest)
if !ok || len(r) == 0 {
return false, nil, errors.New("ssh: prompt format error")
}
prompts = append(prompts, string(prompt))
echos = append(echos, r[0] != 0)
rest = r[1:]
}
if len(rest) != 0 {
return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
}
answers, err := cb(msg.User, msg.Instruction, prompts, echos)
if err != nil {
return false, nil, err
}
if len(answers) != len(prompts) {
return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
}
responseLength := 1 + 4
for _, a := range answers {
responseLength += stringLength(len(a))
}
serialized := make([]byte, responseLength)
p := serialized
p[0] = msgUserAuthInfoResponse
p = p[1:]
p = marshalUint32(p, uint32(len(answers)))
for _, a := range answers {
p = marshalString(p, []byte(a))
}
if err := c.writePacket(serialized); err != nil {
return false, nil, err
}
}
}
type retryableAuthMethod struct {
authMethod AuthMethod
maxTries int
}
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
ok, methods, err = r.authMethod.auth(session, user, c, rand)
if ok || err != nil { // either success or error terminate
return ok, methods, err
}
}
return ok, methods, err
}
func (r *retryableAuthMethod) method() string {
return r.authMethod.method()
}
// RetryableAuthMethod is a decorator for other auth methods enabling them to
// be retried up to maxTries before considering that AuthMethod itself failed.
// If maxTries is <= 0, will retry indefinitely
//
// This is useful for interactive clients using challenge/response type
// authentication (e.g. Keyboard-Interactive, Password, etc) where the user
// could mistype their response resulting in the server issuing a
// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4
// [keyboard-interactive]); Without this decorator, the non-retryable
// AuthMethod would be removed from future consideration, and never tried again
// (and so the user would never be able to retry their entry).
func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod {
return &retryableAuthMethod{authMethod: auth, maxTries: maxTries}
}

356
vendor/golang.org/x/crypto/ssh/common.go generated vendored Normal file
View file

@ -0,0 +1,356 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto"
"crypto/rand"
"fmt"
"io"
"sync"
_ "crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
)
// These are string constants in the SSH protocol.
const (
compressionNone = "none"
serviceUserAuth = "ssh-userauth"
serviceSSH = "ssh-connection"
)
// supportedCiphers specifies the supported ciphers in preference order.
var supportedCiphers = []string{
"aes128-ctr", "aes192-ctr", "aes256-ctr",
"aes128-gcm@openssh.com",
"arcfour256", "arcfour128",
}
// supportedKexAlgos specifies the supported key-exchange algorithms in
// preference order.
var supportedKexAlgos = []string{
kexAlgoCurve25519SHA256,
// P384 and P521 are not constant-time yet, but since we don't
// reuse ephemeral keys, using them for ECDH should be OK.
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA1, kexAlgoDH1SHA1,
}
// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSA, KeyAlgoDSA,
KeyAlgoED25519,
}
// supportedMACs specifies a default set of MAC algorithms in preference order.
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
// because they have reached the end of their useful life.
var supportedMACs = []string{
"hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
}
var supportedCompressions = []string{compressionNone}
// hashFuncs keeps the mapping of supported algorithms to their respective
// hashes needed for signature verification.
var hashFuncs = map[string]crypto.Hash{
KeyAlgoRSA: crypto.SHA1,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
CertAlgoRSAv01: crypto.SHA1,
CertAlgoDSAv01: crypto.SHA1,
CertAlgoECDSA256v01: crypto.SHA256,
CertAlgoECDSA384v01: crypto.SHA384,
CertAlgoECDSA521v01: crypto.SHA512,
}
// unexpectedMessageError results when the SSH message that we received didn't
// match what we wanted.
func unexpectedMessageError(expected, got uint8) error {
return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
}
// parseError results from a malformed SSH message.
func parseError(tag uint8) error {
return fmt.Errorf("ssh: parse error in message type %d", tag)
}
func findCommon(what string, client []string, server []string) (common string, err error) {
for _, c := range client {
for _, s := range server {
if c == s {
return c, nil
}
}
}
return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
}
type directionAlgorithms struct {
Cipher string
MAC string
Compression string
}
type algorithms struct {
kex string
hostKey string
w directionAlgorithms
r directionAlgorithms
}
func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
result := &algorithms{}
result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
if err != nil {
return
}
result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
if err != nil {
return
}
result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
if err != nil {
return
}
result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
if err != nil {
return
}
result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
if err != nil {
return
}
result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
if err != nil {
return
}
result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
if err != nil {
return
}
result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
if err != nil {
return
}
return result, nil
}
// If rekeythreshold is too small, we can't make any progress sending
// stuff.
const minRekeyThreshold uint64 = 256
// Config contains configuration data common to both ServerConfig and
// ClientConfig.
type Config struct {
// Rand provides the source of entropy for cryptographic
// primitives. If Rand is nil, the cryptographic random reader
// in package crypto/rand will be used.
Rand io.Reader
// The maximum number of bytes sent or received after which a
// new key is negotiated. It must be at least 256. If
// unspecified, 1 gigabyte is used.
RekeyThreshold uint64
// The allowed key exchanges algorithms. If unspecified then a
// default set of algorithms is used.
KeyExchanges []string
// The allowed cipher algorithms. If unspecified then a sensible
// default is used.
Ciphers []string
// The allowed MAC algorithms. If unspecified then a sensible default
// is used.
MACs []string
}
// SetDefaults sets sensible values for unset fields in config. This is
// exported for testing: Configs passed to SSH functions are copied and have
// default values set automatically.
func (c *Config) SetDefaults() {
if c.Rand == nil {
c.Rand = rand.Reader
}
if c.Ciphers == nil {
c.Ciphers = supportedCiphers
}
var ciphers []string
for _, c := range c.Ciphers {
if cipherModes[c] != nil {
// reject the cipher if we have no cipherModes definition
ciphers = append(ciphers, c)
}
}
c.Ciphers = ciphers
if c.KeyExchanges == nil {
c.KeyExchanges = supportedKexAlgos
}
if c.MACs == nil {
c.MACs = supportedMACs
}
if c.RekeyThreshold == 0 {
// RFC 4253, section 9 suggests rekeying after 1G.
c.RekeyThreshold = 1 << 30
}
if c.RekeyThreshold < minRekeyThreshold {
c.RekeyThreshold = minRekeyThreshold
}
}
// buildDataSignedForAuth returns the data that is signed in order to prove
// possession of a private key. See RFC 4252, section 7.
func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
data := struct {
Session []byte
Type byte
User string
Service string
Method string
Sign bool
Algo []byte
PubKey []byte
}{
sessionId,
msgUserAuthRequest,
req.User,
req.Service,
req.Method,
true,
algo,
pubKey,
}
return Marshal(data)
}
func appendU16(buf []byte, n uint16) []byte {
return append(buf, byte(n>>8), byte(n))
}
func appendU32(buf []byte, n uint32) []byte {
return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
func appendU64(buf []byte, n uint64) []byte {
return append(buf,
byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
func appendInt(buf []byte, n int) []byte {
return appendU32(buf, uint32(n))
}
func appendString(buf []byte, s string) []byte {
buf = appendU32(buf, uint32(len(s)))
buf = append(buf, s...)
return buf
}
func appendBool(buf []byte, b bool) []byte {
if b {
return append(buf, 1)
}
return append(buf, 0)
}
// newCond is a helper to hide the fact that there is no usable zero
// value for sync.Cond.
func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
// window represents the buffer available to clients
// wishing to write to a channel.
type window struct {
*sync.Cond
win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
writeWaiters int
closed bool
}
// add adds win to the amount of window available
// for consumers.
func (w *window) add(win uint32) bool {
// a zero sized window adjust is a noop.
if win == 0 {
return true
}
w.L.Lock()
if w.win+win < win {
w.L.Unlock()
return false
}
w.win += win
// It is unusual that multiple goroutines would be attempting to reserve
// window space, but not guaranteed. Use broadcast to notify all waiters
// that additional window is available.
w.Broadcast()
w.L.Unlock()
return true
}
// close sets the window to closed, so all reservations fail
// immediately.
func (w *window) close() {
w.L.Lock()
w.closed = true
w.Broadcast()
w.L.Unlock()
}
// reserve reserves win from the available window capacity.
// If no capacity remains, reserve will block. reserve may
// return less than requested.
func (w *window) reserve(win uint32) (uint32, error) {
var err error
w.L.Lock()
w.writeWaiters++
w.Broadcast()
for w.win == 0 && !w.closed {
w.Wait()
}
w.writeWaiters--
if w.win < win {
win = w.win
}
w.win -= win
if w.closed {
err = io.EOF
}
w.L.Unlock()
return win, err
}
// waitWriterBlocked waits until some goroutine is blocked for further
// writes. It is used in tests only.
func (w *window) waitWriterBlocked() {
w.Cond.L.Lock()
for w.writeWaiters == 0 {
w.Cond.Wait()
}
w.Cond.L.Unlock()
}

143
vendor/golang.org/x/crypto/ssh/connection.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"fmt"
"net"
)
// OpenChannelError is returned if the other side rejects an
// OpenChannel request.
type OpenChannelError struct {
Reason RejectionReason
Message string
}
func (e *OpenChannelError) Error() string {
return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
}
// ConnMetadata holds metadata for the connection.
type ConnMetadata interface {
// User returns the user ID for this connection.
User() string
// SessionID returns the sesson hash, also denoted by H.
SessionID() []byte
// ClientVersion returns the client's version string as hashed
// into the session ID.
ClientVersion() []byte
// ServerVersion returns the server's version string as hashed
// into the session ID.
ServerVersion() []byte
// RemoteAddr returns the remote address for this connection.
RemoteAddr() net.Addr
// LocalAddr returns the local address for this connection.
LocalAddr() net.Addr
}
// Conn represents an SSH connection for both server and client roles.
// Conn is the basis for implementing an application layer, such
// as ClientConn, which implements the traditional shell access for
// clients.
type Conn interface {
ConnMetadata
// SendRequest sends a global request, and returns the
// reply. If wantReply is true, it returns the response status
// and payload. See also RFC4254, section 4.
SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
// OpenChannel tries to open an channel. If the request is
// rejected, it returns *OpenChannelError. On success it returns
// the SSH Channel and a Go channel for incoming, out-of-band
// requests. The Go channel must be serviced, or the
// connection will hang.
OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
// Close closes the underlying network connection
Close() error
// Wait blocks until the connection has shut down, and returns the
// error causing the shutdown.
Wait() error
// TODO(hanwen): consider exposing:
// RequestKeyChange
// Disconnect
}
// DiscardRequests consumes and rejects all requests from the
// passed-in channel.
func DiscardRequests(in <-chan *Request) {
for req := range in {
if req.WantReply {
req.Reply(false, nil)
}
}
}
// A connection represents an incoming connection.
type connection struct {
transport *handshakeTransport
sshConn
// The connection protocol.
*mux
}
func (c *connection) Close() error {
return c.sshConn.conn.Close()
}
// sshconn provides net.Conn metadata, but disallows direct reads and
// writes.
type sshConn struct {
conn net.Conn
user string
sessionID []byte
clientVersion []byte
serverVersion []byte
}
func dup(src []byte) []byte {
dst := make([]byte, len(src))
copy(dst, src)
return dst
}
func (c *sshConn) User() string {
return c.user
}
func (c *sshConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
func (c *sshConn) Close() error {
return c.conn.Close()
}
func (c *sshConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
func (c *sshConn) SessionID() []byte {
return dup(c.sessionID)
}
func (c *sshConn) ClientVersion() []byte {
return dup(c.clientVersion)
}
func (c *sshConn) ServerVersion() []byte {
return dup(c.serverVersion)
}

18
vendor/golang.org/x/crypto/ssh/doc.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package ssh implements an SSH client and server.
SSH is a transport security protocol, an authentication protocol and a
family of application protocols. The most typical application level
protocol is a remote shell and this is specifically implemented. However,
the multiplexed nature of SSH is exposed to users that wish to support
others.
References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
*/
package ssh // import "golang.org/x/crypto/ssh"

460
vendor/golang.org/x/crypto/ssh/handshake.go generated vendored Normal file
View file

@ -0,0 +1,460 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto/rand"
"errors"
"fmt"
"io"
"log"
"net"
"sync"
)
// debugHandshake, if set, prints messages sent and received. Key
// exchange messages are printed as if DH were used, so the debug
// messages are wrong when using ECDH.
const debugHandshake = false
// keyingTransport is a packet based transport that supports key
// changes. It need not be thread-safe. It should pass through
// msgNewKeys in both directions.
type keyingTransport interface {
packetConn
// prepareKeyChange sets up a key change. The key change for a
// direction will be effected if a msgNewKeys message is sent
// or received.
prepareKeyChange(*algorithms, *kexResult) error
}
// handshakeTransport implements rekeying on top of a keyingTransport
// and offers a thread-safe writePacket() interface.
type handshakeTransport struct {
conn keyingTransport
config *Config
serverVersion []byte
clientVersion []byte
// hostKeys is non-empty if we are the server. In that case,
// it contains all host keys that can be used to sign the
// connection.
hostKeys []Signer
// hostKeyAlgorithms is non-empty if we are the client. In that case,
// we accept these key types from the server as host key.
hostKeyAlgorithms []string
// On read error, incoming is closed, and readError is set.
incoming chan []byte
readError error
// data for host key checking
hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
dialAddress string
remoteAddr net.Addr
readSinceKex uint64
// Protects the writing side of the connection
mu sync.Mutex
cond *sync.Cond
sentInitPacket []byte
sentInitMsg *kexInitMsg
writtenSinceKex uint64
writeError error
// The session ID or nil if first kex did not complete yet.
sessionID []byte
}
func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
t := &handshakeTransport{
conn: conn,
serverVersion: serverVersion,
clientVersion: clientVersion,
incoming: make(chan []byte, 16),
config: config,
}
t.cond = sync.NewCond(&t.mu)
return t
}
func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
t.dialAddress = dialAddr
t.remoteAddr = addr
t.hostKeyCallback = config.HostKeyCallback
if config.HostKeyAlgorithms != nil {
t.hostKeyAlgorithms = config.HostKeyAlgorithms
} else {
t.hostKeyAlgorithms = supportedHostKeyAlgos
}
go t.readLoop()
return t
}
func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
t.hostKeys = config.hostKeys
go t.readLoop()
return t
}
func (t *handshakeTransport) getSessionID() []byte {
return t.sessionID
}
func (t *handshakeTransport) id() string {
if len(t.hostKeys) > 0 {
return "server"
}
return "client"
}
func (t *handshakeTransport) readPacket() ([]byte, error) {
p, ok := <-t.incoming
if !ok {
return nil, t.readError
}
return p, nil
}
func (t *handshakeTransport) readLoop() {
for {
p, err := t.readOnePacket()
if err != nil {
t.readError = err
close(t.incoming)
break
}
if p[0] == msgIgnore || p[0] == msgDebug {
continue
}
t.incoming <- p
}
// If we can't read, declare the writing part dead too.
t.mu.Lock()
defer t.mu.Unlock()
if t.writeError == nil {
t.writeError = t.readError
}
t.cond.Broadcast()
}
func (t *handshakeTransport) readOnePacket() ([]byte, error) {
if t.readSinceKex > t.config.RekeyThreshold {
if err := t.requestKeyChange(); err != nil {
return nil, err
}
}
p, err := t.conn.readPacket()
if err != nil {
return nil, err
}
t.readSinceKex += uint64(len(p))
if debugHandshake {
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
log.Printf("%s got data (packet %d bytes)", t.id(), len(p))
} else {
msg, err := decode(p)
log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
}
}
if p[0] != msgKexInit {
return p, nil
}
t.mu.Lock()
firstKex := t.sessionID == nil
err = t.enterKeyExchangeLocked(p)
if err != nil {
// drop connection
t.conn.Close()
t.writeError = err
}
if debugHandshake {
log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
}
// Unblock writers.
t.sentInitMsg = nil
t.sentInitPacket = nil
t.cond.Broadcast()
t.writtenSinceKex = 0
t.mu.Unlock()
if err != nil {
return nil, err
}
t.readSinceKex = 0
// By default, a key exchange is hidden from higher layers by
// translating it into msgIgnore.
successPacket := []byte{msgIgnore}
if firstKex {
// sendKexInit() for the first kex waits for
// msgNewKeys so the authentication process is
// guaranteed to happen over an encrypted transport.
successPacket = []byte{msgNewKeys}
}
return successPacket, nil
}
// keyChangeCategory describes whether a key exchange is the first on a
// connection, or a subsequent one.
type keyChangeCategory bool
const (
firstKeyExchange keyChangeCategory = true
subsequentKeyExchange keyChangeCategory = false
)
// sendKexInit sends a key change message, and returns the message
// that was sent. After initiating the key change, all writes will be
// blocked until the change is done, and a failed key change will
// close the underlying transport. This function is safe for
// concurrent use by multiple goroutines.
func (t *handshakeTransport) sendKexInit(isFirst keyChangeCategory) error {
var err error
t.mu.Lock()
// If this is the initial key change, but we already have a sessionID,
// then do nothing because the key exchange has already completed
// asynchronously.
if !isFirst || t.sessionID == nil {
_, _, err = t.sendKexInitLocked(isFirst)
}
t.mu.Unlock()
if err != nil {
return err
}
if isFirst {
if packet, err := t.readPacket(); err != nil {
return err
} else if packet[0] != msgNewKeys {
return unexpectedMessageError(msgNewKeys, packet[0])
}
}
return nil
}
func (t *handshakeTransport) requestInitialKeyChange() error {
return t.sendKexInit(firstKeyExchange)
}
func (t *handshakeTransport) requestKeyChange() error {
return t.sendKexInit(subsequentKeyExchange)
}
// sendKexInitLocked sends a key change message. t.mu must be locked
// while this happens.
func (t *handshakeTransport) sendKexInitLocked(isFirst keyChangeCategory) (*kexInitMsg, []byte, error) {
// kexInits may be sent either in response to the other side,
// or because our side wants to initiate a key change, so we
// may have already sent a kexInit. In that case, don't send a
// second kexInit.
if t.sentInitMsg != nil {
return t.sentInitMsg, t.sentInitPacket, nil
}
msg := &kexInitMsg{
KexAlgos: t.config.KeyExchanges,
CiphersClientServer: t.config.Ciphers,
CiphersServerClient: t.config.Ciphers,
MACsClientServer: t.config.MACs,
MACsServerClient: t.config.MACs,
CompressionClientServer: supportedCompressions,
CompressionServerClient: supportedCompressions,
}
io.ReadFull(rand.Reader, msg.Cookie[:])
if len(t.hostKeys) > 0 {
for _, k := range t.hostKeys {
msg.ServerHostKeyAlgos = append(
msg.ServerHostKeyAlgos, k.PublicKey().Type())
}
} else {
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
}
packet := Marshal(msg)
// writePacket destroys the contents, so save a copy.
packetCopy := make([]byte, len(packet))
copy(packetCopy, packet)
if err := t.conn.writePacket(packetCopy); err != nil {
return nil, nil, err
}
t.sentInitMsg = msg
t.sentInitPacket = packet
return msg, packet, nil
}
func (t *handshakeTransport) writePacket(p []byte) error {
t.mu.Lock()
defer t.mu.Unlock()
if t.writtenSinceKex > t.config.RekeyThreshold {
t.sendKexInitLocked(subsequentKeyExchange)
}
for t.sentInitMsg != nil && t.writeError == nil {
t.cond.Wait()
}
if t.writeError != nil {
return t.writeError
}
t.writtenSinceKex += uint64(len(p))
switch p[0] {
case msgKexInit:
return errors.New("ssh: only handshakeTransport can send kexInit")
case msgNewKeys:
return errors.New("ssh: only handshakeTransport can send newKeys")
default:
return t.conn.writePacket(p)
}
}
func (t *handshakeTransport) Close() error {
return t.conn.Close()
}
// enterKeyExchange runs the key exchange. t.mu must be held while running this.
func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) error {
if debugHandshake {
log.Printf("%s entered key exchange", t.id())
}
myInit, myInitPacket, err := t.sendKexInitLocked(subsequentKeyExchange)
if err != nil {
return err
}
otherInit := &kexInitMsg{}
if err := Unmarshal(otherInitPacket, otherInit); err != nil {
return err
}
magics := handshakeMagics{
clientVersion: t.clientVersion,
serverVersion: t.serverVersion,
clientKexInit: otherInitPacket,
serverKexInit: myInitPacket,
}
clientInit := otherInit
serverInit := myInit
if len(t.hostKeys) == 0 {
clientInit = myInit
serverInit = otherInit
magics.clientKexInit = myInitPacket
magics.serverKexInit = otherInitPacket
}
algs, err := findAgreedAlgorithms(clientInit, serverInit)
if err != nil {
return err
}
// We don't send FirstKexFollows, but we handle receiving it.
//
// RFC 4253 section 7 defines the kex and the agreement method for
// first_kex_packet_follows. It states that the guessed packet
// should be ignored if the "kex algorithm and/or the host
// key algorithm is guessed wrong (server and client have
// different preferred algorithm), or if any of the other
// algorithms cannot be agreed upon". The other algorithms have
// already been checked above so the kex algorithm and host key
// algorithm are checked here.
if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) {
// other side sent a kex message for the wrong algorithm,
// which we have to ignore.
if _, err := t.conn.readPacket(); err != nil {
return err
}
}
kex, ok := kexAlgoMap[algs.kex]
if !ok {
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex)
}
var result *kexResult
if len(t.hostKeys) > 0 {
result, err = t.server(kex, algs, &magics)
} else {
result, err = t.client(kex, algs, &magics)
}
if err != nil {
return err
}
if t.sessionID == nil {
t.sessionID = result.H
}
result.SessionID = t.sessionID
t.conn.prepareKeyChange(algs, result)
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
return err
}
if packet, err := t.conn.readPacket(); err != nil {
return err
} else if packet[0] != msgNewKeys {
return unexpectedMessageError(msgNewKeys, packet[0])
}
return nil
}
func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
var hostKey Signer
for _, k := range t.hostKeys {
if algs.hostKey == k.PublicKey().Type() {
hostKey = k
}
}
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
return r, err
}
func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
result, err := kex.Client(t.conn, t.config.Rand, magics)
if err != nil {
return nil, err
}
hostKey, err := ParsePublicKey(result.HostKey)
if err != nil {
return nil, err
}
if err := verifyHostKeySignature(hostKey, result); err != nil {
return nil, err
}
if t.hostKeyCallback != nil {
err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
if err != nil {
return nil, err
}
}
return result, nil
}

540
vendor/golang.org/x/crypto/ssh/kex.go generated vendored Normal file
View file

@ -0,0 +1,540 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
"golang.org/x/crypto/curve25519"
)
const (
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
kexAlgoECDH256 = "ecdh-sha2-nistp256"
kexAlgoECDH384 = "ecdh-sha2-nistp384"
kexAlgoECDH521 = "ecdh-sha2-nistp521"
kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
)
// kexResult captures the outcome of a key exchange.
type kexResult struct {
// Session hash. See also RFC 4253, section 8.
H []byte
// Shared secret. See also RFC 4253, section 8.
K []byte
// Host key as hashed into H.
HostKey []byte
// Signature of H.
Signature []byte
// A cryptographic hash function that matches the security
// level of the key exchange algorithm. It is used for
// calculating H, and for deriving keys from H and K.
Hash crypto.Hash
// The session ID, which is the first H computed. This is used
// to derive key material inside the transport.
SessionID []byte
}
// handshakeMagics contains data that is always included in the
// session hash.
type handshakeMagics struct {
clientVersion, serverVersion []byte
clientKexInit, serverKexInit []byte
}
func (m *handshakeMagics) write(w io.Writer) {
writeString(w, m.clientVersion)
writeString(w, m.serverVersion)
writeString(w, m.clientKexInit)
writeString(w, m.serverKexInit)
}
// kexAlgorithm abstracts different key exchange algorithms.
type kexAlgorithm interface {
// Server runs server-side key agreement, signing the result
// with a hostkey.
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
// Client runs the client-side key agreement. Caller is
// responsible for verifying the host key signature.
Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
}
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
type dhGroup struct {
g, p, pMinus1 *big.Int
}
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
return nil, errors.New("ssh: DH parameter out of bounds")
}
return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
}
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
hashFunc := crypto.SHA1
var x *big.Int
for {
var err error
if x, err = rand.Int(randSource, group.pMinus1); err != nil {
return nil, err
}
if x.Sign() > 0 {
break
}
}
X := new(big.Int).Exp(group.g, x, group.p)
kexDHInit := kexDHInitMsg{
X: X,
}
if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var kexDHReply kexDHReplyMsg
if err = Unmarshal(packet, &kexDHReply); err != nil {
return nil, err
}
kInt, err := group.diffieHellman(kexDHReply.Y, x)
if err != nil {
return nil, err
}
h := hashFunc.New()
magics.write(h)
writeString(h, kexDHReply.HostKey)
writeInt(h, X)
writeInt(h, kexDHReply.Y)
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: kexDHReply.HostKey,
Signature: kexDHReply.Signature,
Hash: crypto.SHA1,
}, nil
}
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
hashFunc := crypto.SHA1
packet, err := c.readPacket()
if err != nil {
return
}
var kexDHInit kexDHInitMsg
if err = Unmarshal(packet, &kexDHInit); err != nil {
return
}
var y *big.Int
for {
if y, err = rand.Int(randSource, group.pMinus1); err != nil {
return
}
if y.Sign() > 0 {
break
}
}
Y := new(big.Int).Exp(group.g, y, group.p)
kInt, err := group.diffieHellman(kexDHInit.X, y)
if err != nil {
return nil, err
}
hostKeyBytes := priv.PublicKey().Marshal()
h := hashFunc.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeInt(h, kexDHInit.X)
writeInt(h, Y)
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
H := h.Sum(nil)
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, randSource, H)
if err != nil {
return nil, err
}
kexDHReply := kexDHReplyMsg{
HostKey: hostKeyBytes,
Y: Y,
Signature: sig,
}
packet = Marshal(&kexDHReply)
err = c.writePacket(packet)
return &kexResult{
H: H,
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA1,
}, nil
}
// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
// described in RFC 5656, section 4.
type ecdh struct {
curve elliptic.Curve
}
func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
if err != nil {
return nil, err
}
kexInit := kexECDHInitMsg{
ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
}
serialized := Marshal(&kexInit)
if err := c.writePacket(serialized); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var reply kexECDHReplyMsg
if err = Unmarshal(packet, &reply); err != nil {
return nil, err
}
x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
if err != nil {
return nil, err
}
// generate shared secret
secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
h := ecHash(kex.curve).New()
magics.write(h)
writeString(h, reply.HostKey)
writeString(h, kexInit.ClientPubKey)
writeString(h, reply.EphemeralPubKey)
K := make([]byte, intLength(secret))
marshalInt(K, secret)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: reply.HostKey,
Signature: reply.Signature,
Hash: ecHash(kex.curve),
}, nil
}
// unmarshalECKey parses and checks an EC key.
func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
x, y = elliptic.Unmarshal(curve, pubkey)
if x == nil {
return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
}
if !validateECPublicKey(curve, x, y) {
return nil, nil, errors.New("ssh: public key not on curve")
}
return x, y, nil
}
// validateECPublicKey checks that the point is a valid public key for
// the given curve. See [SEC1], 3.2.2
func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
if x.Cmp(curve.Params().P) >= 0 {
return false
}
if y.Cmp(curve.Params().P) >= 0 {
return false
}
if !curve.IsOnCurve(x, y) {
return false
}
// We don't check if N * PubKey == 0, since
//
// - the NIST curves have cofactor = 1, so this is implicit.
// (We don't foresee an implementation that supports non NIST
// curves)
//
// - for ephemeral keys, we don't need to worry about small
// subgroup attacks.
return true
}
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var kexECDHInit kexECDHInitMsg
if err = Unmarshal(packet, &kexECDHInit); err != nil {
return nil, err
}
clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
if err != nil {
return nil, err
}
// We could cache this key across multiple users/multiple
// connection attempts, but the benefit is small. OpenSSH
// generates a new key for each incoming connection.
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
if err != nil {
return nil, err
}
hostKeyBytes := priv.PublicKey().Marshal()
serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
// generate shared secret
secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
h := ecHash(kex.curve).New()
magics.write(h)
writeString(h, hostKeyBytes)
writeString(h, kexECDHInit.ClientPubKey)
writeString(h, serializedEphKey)
K := make([]byte, intLength(secret))
marshalInt(K, secret)
h.Write(K)
H := h.Sum(nil)
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, rand, H)
if err != nil {
return nil, err
}
reply := kexECDHReplyMsg{
EphemeralPubKey: serializedEphKey,
HostKey: hostKeyBytes,
Signature: sig,
}
serialized := Marshal(&reply)
if err := c.writePacket(serialized); err != nil {
return nil, err
}
return &kexResult{
H: H,
K: K,
HostKey: reply.HostKey,
Signature: sig,
Hash: ecHash(kex.curve),
}, nil
}
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
// This is the group called diffie-hellman-group1-sha1 in RFC
// 4253 and Oakley Group 2 in RFC 2409.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
}
// curve25519sha256 implements the curve25519-sha256@libssh.org key
// agreement protocol, as described in
// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
type curve25519sha256 struct{}
type curve25519KeyPair struct {
priv [32]byte
pub [32]byte
}
func (kp *curve25519KeyPair) generate(rand io.Reader) error {
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
return err
}
curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
return nil
}
// curve25519Zeros is just an array of 32 zero bytes so that we have something
// convenient to compare against in order to reject curve25519 points with the
// wrong order.
var curve25519Zeros [32]byte
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
return nil, err
}
if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var reply kexECDHReplyMsg
if err = Unmarshal(packet, &reply); err != nil {
return nil, err
}
if len(reply.EphemeralPubKey) != 32 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
var servPub, secret [32]byte
copy(servPub[:], reply.EphemeralPubKey)
curve25519.ScalarMult(&secret, &kp.priv, &servPub)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
}
h := crypto.SHA256.New()
magics.write(h)
writeString(h, reply.HostKey)
writeString(h, kp.pub[:])
writeString(h, reply.EphemeralPubKey)
kInt := new(big.Int).SetBytes(secret[:])
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: reply.HostKey,
Signature: reply.Signature,
Hash: crypto.SHA256,
}, nil
}
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
}
var kexInit kexECDHInitMsg
if err = Unmarshal(packet, &kexInit); err != nil {
return
}
if len(kexInit.ClientPubKey) != 32 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
return nil, err
}
var clientPub, secret [32]byte
copy(clientPub[:], kexInit.ClientPubKey)
curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
}
hostKeyBytes := priv.PublicKey().Marshal()
h := crypto.SHA256.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeString(h, kexInit.ClientPubKey)
writeString(h, kp.pub[:])
kInt := new(big.Int).SetBytes(secret[:])
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
H := h.Sum(nil)
sig, err := signAndMarshal(priv, rand, H)
if err != nil {
return nil, err
}
reply := kexECDHReplyMsg{
EphemeralPubKey: kp.pub[:],
HostKey: hostKeyBytes,
Signature: sig,
}
if err := c.writePacket(Marshal(&reply)); err != nil {
return nil, err
}
return &kexResult{
H: H,
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA256,
}, nil
}

880
vendor/golang.org/x/crypto/ssh/keys.go generated vendored Normal file
View file

@ -0,0 +1,880 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/asn1"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
"strings"
"golang.org/x/crypto/ed25519"
)
// These constants represent the algorithm names for key types supported by this
// package.
const (
KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss"
KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
KeyAlgoED25519 = "ssh-ed25519"
)
// parsePubKey parses a public key of the given algorithm.
// Use ParsePublicKey for keys with prepended algorithm.
func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
switch algo {
case KeyAlgoRSA:
return parseRSA(in)
case KeyAlgoDSA:
return parseDSA(in)
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
return parseECDSA(in)
case KeyAlgoED25519:
return parseED25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
cert, err := parseCert(in, certToPrivAlgo(algo))
if err != nil {
return nil, nil, err
}
return cert, nil, nil
}
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
}
// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
// (see sshd(8) manual page) once the options and key type fields have been
// removed.
func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
in = bytes.TrimSpace(in)
i := bytes.IndexAny(in, " \t")
if i == -1 {
i = len(in)
}
base64Key := in[:i]
key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
n, err := base64.StdEncoding.Decode(key, base64Key)
if err != nil {
return nil, "", err
}
key = key[:n]
out, err = ParsePublicKey(key)
if err != nil {
return nil, "", err
}
comment = string(bytes.TrimSpace(in[i:]))
return out, comment, nil
}
// ParseKnownHosts parses an entry in the format of the known_hosts file.
//
// The known_hosts format is documented in the sshd(8) manual page. This
// function will parse a single entry from in. On successful return, marker
// will contain the optional marker value (i.e. "cert-authority" or "revoked")
// or else be empty, hosts will contain the hosts that this entry matches,
// pubKey will contain the public key and comment will contain any trailing
// comment at the end of the line. See the sshd(8) manual page for the various
// forms that a host string can take.
//
// The unparsed remainder of the input will be returned in rest. This function
// can be called repeatedly to parse multiple entries.
//
// If no entries were found in the input then err will be io.EOF. Otherwise a
// non-nil err value indicates a parse error.
func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
rest = in[end+1:]
in = in[:end]
} else {
rest = nil
}
end = bytes.IndexByte(in, '\r')
if end != -1 {
in = in[:end]
}
in = bytes.TrimSpace(in)
if len(in) == 0 || in[0] == '#' {
in = rest
continue
}
i := bytes.IndexAny(in, " \t")
if i == -1 {
in = rest
continue
}
// Strip out the beginning of the known_host key.
// This is either an optional marker or a (set of) hostname(s).
keyFields := bytes.Fields(in)
if len(keyFields) < 3 || len(keyFields) > 5 {
return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
}
// keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
// list of hosts
marker := ""
if keyFields[0][0] == '@' {
marker = string(keyFields[0][1:])
keyFields = keyFields[1:]
}
hosts := string(keyFields[0])
// keyFields[1] contains the key type (e.g. “ssh-rsa”).
// However, that information is duplicated inside the
// base64-encoded key and so is ignored here.
key := bytes.Join(keyFields[2:], []byte(" "))
if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
return "", nil, nil, "", nil, err
}
return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
}
return "", nil, nil, "", nil, io.EOF
}
// ParseAuthorizedKeys parses a public key from an authorized_keys
// file used in OpenSSH according to the sshd(8) manual page.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
rest = in[end+1:]
in = in[:end]
} else {
rest = nil
}
end = bytes.IndexByte(in, '\r')
if end != -1 {
in = in[:end]
}
in = bytes.TrimSpace(in)
if len(in) == 0 || in[0] == '#' {
in = rest
continue
}
i := bytes.IndexAny(in, " \t")
if i == -1 {
in = rest
continue
}
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil
}
// No key type recognised. Maybe there's an options field at
// the beginning.
var b byte
inQuote := false
var candidateOptions []string
optionStart := 0
for i, b = range in {
isEnd := !inQuote && (b == ' ' || b == '\t')
if (b == ',' && !inQuote) || isEnd {
if i-optionStart > 0 {
candidateOptions = append(candidateOptions, string(in[optionStart:i]))
}
optionStart = i + 1
}
if isEnd {
break
}
if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
inQuote = !inQuote
}
}
for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
i++
}
if i == len(in) {
// Invalid line: unmatched quote
in = rest
continue
}
in = in[i:]
i = bytes.IndexAny(in, " \t")
if i == -1 {
in = rest
continue
}
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions
return out, comment, options, rest, nil
}
in = rest
continue
}
return nil, "", nil, nil, errors.New("ssh: no key found")
}
// ParsePublicKey parses an SSH public key formatted for use in
// the SSH wire protocol according to RFC 4253, section 6.6.
func ParsePublicKey(in []byte) (out PublicKey, err error) {
algo, in, ok := parseString(in)
if !ok {
return nil, errShortRead
}
var rest []byte
out, rest, err = parsePubKey(in, string(algo))
if len(rest) > 0 {
return nil, errors.New("ssh: trailing junk in public key")
}
return out, err
}
// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
// authorized_keys file. The return value ends with newline.
func MarshalAuthorizedKey(key PublicKey) []byte {
b := &bytes.Buffer{}
b.WriteString(key.Type())
b.WriteByte(' ')
e := base64.NewEncoder(base64.StdEncoding, b)
e.Write(key.Marshal())
e.Close()
b.WriteByte('\n')
return b.Bytes()
}
// PublicKey is an abstraction of different types of public keys.
type PublicKey interface {
// Type returns the key's type, e.g. "ssh-rsa".
Type() string
// Marshal returns the serialized key data in SSH wire format,
// with the name prefix.
Marshal() []byte
// Verify that sig is a signature on the given data using this
// key. This function will hash the data appropriately first.
Verify(data []byte, sig *Signature) error
}
// CryptoPublicKey, if implemented by a PublicKey,
// returns the underlying crypto.PublicKey form of the key.
type CryptoPublicKey interface {
CryptoPublicKey() crypto.PublicKey
}
// A Signer can create signatures that verify against a public key.
type Signer interface {
// PublicKey returns an associated PublicKey instance.
PublicKey() PublicKey
// Sign returns raw signature for the given data. This method
// will apply the hash specified for the keytype to the data.
Sign(rand io.Reader, data []byte) (*Signature, error)
}
type rsaPublicKey rsa.PublicKey
func (r *rsaPublicKey) Type() string {
return "ssh-rsa"
}
// parseRSA parses an RSA key according to RFC 4253, section 6.6.
func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
E *big.Int
N *big.Int
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
if w.E.BitLen() > 24 {
return nil, nil, errors.New("ssh: exponent too large")
}
e := w.E.Int64()
if e < 3 || e&1 == 0 {
return nil, nil, errors.New("ssh: incorrect exponent")
}
var key rsa.PublicKey
key.E = int(e)
key.N = w.N
return (*rsaPublicKey)(&key), w.Rest, nil
}
func (r *rsaPublicKey) Marshal() []byte {
e := new(big.Int).SetInt64(int64(r.E))
// RSA publickey struct layout should match the struct used by
// parseRSACert in the x/crypto/ssh/agent package.
wirekey := struct {
Name string
E *big.Int
N *big.Int
}{
KeyAlgoRSA,
e,
r.N,
}
return Marshal(&wirekey)
}
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != r.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
}
func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*rsa.PublicKey)(r)
}
type dsaPublicKey dsa.PublicKey
func (r *dsaPublicKey) Type() string {
return "ssh-dss"
}
// parseDSA parses an DSA key according to RFC 4253, section 6.6.
func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
P, Q, G, Y *big.Int
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := &dsaPublicKey{
Parameters: dsa.Parameters{
P: w.P,
Q: w.Q,
G: w.G,
},
Y: w.Y,
}
return key, w.Rest, nil
}
func (k *dsaPublicKey) Marshal() []byte {
// DSA publickey struct layout should match the struct used by
// parseDSACert in the x/crypto/ssh/agent package.
w := struct {
Name string
P, Q, G, Y *big.Int
}{
k.Type(),
k.P,
k.Q,
k.G,
k.Y,
}
return Marshal(&w)
}
func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
// Per RFC 4253, section 6.6,
// The value for 'dss_signature_blob' is encoded as a string containing
// r, followed by s (which are 160-bit integers, without lengths or
// padding, unsigned, and in network byte order).
// For DSS purposes, sig.Blob should be exactly 40 bytes in length.
if len(sig.Blob) != 40 {
return errors.New("ssh: DSA signature parse error")
}
r := new(big.Int).SetBytes(sig.Blob[:20])
s := new(big.Int).SetBytes(sig.Blob[20:])
if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
return nil
}
return errors.New("ssh: signature did not verify")
}
func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*dsa.PublicKey)(k)
}
type dsaPrivateKey struct {
*dsa.PrivateKey
}
func (k *dsaPrivateKey) PublicKey() PublicKey {
return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
}
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
if err != nil {
return nil, err
}
sig := make([]byte, 40)
rb := r.Bytes()
sb := s.Bytes()
copy(sig[20-len(rb):20], rb)
copy(sig[40-len(sb):], sb)
return &Signature{
Format: k.PublicKey().Type(),
Blob: sig,
}, nil
}
type ecdsaPublicKey ecdsa.PublicKey
func (key *ecdsaPublicKey) Type() string {
return "ecdsa-sha2-" + key.nistID()
}
func (key *ecdsaPublicKey) nistID() string {
switch key.Params().BitSize {
case 256:
return "nistp256"
case 384:
return "nistp384"
case 521:
return "nistp521"
}
panic("ssh: unsupported ecdsa key size")
}
type ed25519PublicKey ed25519.PublicKey
func (key ed25519PublicKey) Type() string {
return KeyAlgoED25519
}
func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
KeyBytes []byte
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := ed25519.PublicKey(w.KeyBytes)
return (ed25519PublicKey)(key), w.Rest, nil
}
func (key ed25519PublicKey) Marshal() []byte {
w := struct {
Name string
KeyBytes []byte
}{
KeyAlgoED25519,
[]byte(key),
}
return Marshal(&w)
}
func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error {
if sig.Format != key.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
}
edKey := (ed25519.PublicKey)(key)
if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
return errors.New("ssh: signature did not verify")
}
return nil
}
func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey {
return ed25519.PublicKey(k)
}
func supportedEllipticCurve(curve elliptic.Curve) bool {
return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
}
// ecHash returns the hash to match the given elliptic curve, see RFC
// 5656, section 6.2.1
func ecHash(curve elliptic.Curve) crypto.Hash {
bitSize := curve.Params().BitSize
switch {
case bitSize <= 256:
return crypto.SHA256
case bitSize <= 384:
return crypto.SHA384
}
return crypto.SHA512
}
// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
Curve string
KeyBytes []byte
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := new(ecdsa.PublicKey)
switch w.Curve {
case "nistp256":
key.Curve = elliptic.P256()
case "nistp384":
key.Curve = elliptic.P384()
case "nistp521":
key.Curve = elliptic.P521()
default:
return nil, nil, errors.New("ssh: unsupported curve")
}
key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
if key.X == nil || key.Y == nil {
return nil, nil, errors.New("ssh: invalid curve point")
}
return (*ecdsaPublicKey)(key), w.Rest, nil
}
func (key *ecdsaPublicKey) Marshal() []byte {
// See RFC 5656, section 3.1.
keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
// ECDSA publickey struct layout should match the struct used by
// parseECDSACert in the x/crypto/ssh/agent package.
w := struct {
Name string
ID string
Key []byte
}{
key.Type(),
key.nistID(),
keyBytes,
}
return Marshal(&w)
}
func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != key.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
}
h := ecHash(key.Curve).New()
h.Write(data)
digest := h.Sum(nil)
// Per RFC 5656, section 3.1.2,
// The ecdsa_signature_blob value has the following specific encoding:
// mpint r
// mpint s
var ecSig struct {
R *big.Int
S *big.Int
}
if err := Unmarshal(sig.Blob, &ecSig); err != nil {
return err
}
if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
return nil
}
return errors.New("ssh: signature did not verify")
}
func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*ecdsa.PublicKey)(k)
}
// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
func NewSignerFromKey(key interface{}) (Signer, error) {
switch key := key.(type) {
case crypto.Signer:
return NewSignerFromSigner(key)
case *dsa.PrivateKey:
return &dsaPrivateKey{key}, nil
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
}
type wrappedSigner struct {
signer crypto.Signer
pubKey PublicKey
}
// NewSignerFromSigner takes any crypto.Signer implementation and
// returns a corresponding Signer interface. This can be used, for
// example, with keys kept in hardware modules.
func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
pubKey, err := NewPublicKey(signer.Public())
if err != nil {
return nil, err
}
return &wrappedSigner{signer, pubKey}, nil
}
func (s *wrappedSigner) PublicKey() PublicKey {
return s.pubKey
}
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
var hashFunc crypto.Hash
switch key := s.pubKey.(type) {
case *rsaPublicKey, *dsaPublicKey:
hashFunc = crypto.SHA1
case *ecdsaPublicKey:
hashFunc = ecHash(key.Curve)
case ed25519PublicKey:
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()
h.Write(data)
digest = h.Sum(nil)
} else {
digest = data
}
signature, err := s.signer.Sign(rand, digest, hashFunc)
if err != nil {
return nil, err
}
// crypto.Signer.Sign is expected to return an ASN.1-encoded signature
// for ECDSA and DSA, but that's not the encoding expected by SSH, so
// re-encode.
switch s.pubKey.(type) {
case *ecdsaPublicKey, *dsaPublicKey:
type asn1Signature struct {
R, S *big.Int
}
asn1Sig := new(asn1Signature)
_, err := asn1.Unmarshal(signature, asn1Sig)
if err != nil {
return nil, err
}
switch s.pubKey.(type) {
case *ecdsaPublicKey:
signature = Marshal(asn1Sig)
case *dsaPublicKey:
signature = make([]byte, 40)
r := asn1Sig.R.Bytes()
s := asn1Sig.S.Bytes()
copy(signature[20-len(r):20], r)
copy(signature[40-len(s):40], s)
}
}
return &Signature{
Format: s.pubKey.Type(),
Blob: signature,
}, nil
}
// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
// or ed25519.PublicKey returns a corresponding PublicKey instance.
// ECDSA keys must use P-256, P-384 or P-521.
func NewPublicKey(key interface{}) (PublicKey, error) {
switch key := key.(type) {
case *rsa.PublicKey:
return (*rsaPublicKey)(key), nil
case *ecdsa.PublicKey:
if !supportedEllipticCurve(key.Curve) {
return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
}
return (*ecdsaPublicKey)(key), nil
case *dsa.PublicKey:
return (*dsaPublicKey)(key), nil
case ed25519.PublicKey:
return (ed25519PublicKey)(key), nil
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
}
// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
// the same keys as ParseRawPrivateKey.
func ParsePrivateKey(pemBytes []byte) (Signer, error) {
key, err := ParseRawPrivateKey(pemBytes)
if err != nil {
return nil, err
}
return NewSignerFromKey(key)
}
// encryptedBlock tells whether a private key is
// encrypted by examining its Proc-Type header
// for a mention of ENCRYPTED
// according to RFC 1421 Section 4.6.1.1.
func encryptedBlock(block *pem.Block) bool {
return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("ssh: no key found")
}
if encryptedBlock(block) {
return nil, errors.New("ssh: cannot decode encrypted private keys")
}
switch block.Type {
case "RSA PRIVATE KEY":
return x509.ParsePKCS1PrivateKey(block.Bytes)
case "EC PRIVATE KEY":
return x509.ParseECPrivateKey(block.Bytes)
case "DSA PRIVATE KEY":
return ParseDSAPrivateKey(block.Bytes)
case "OPENSSH PRIVATE KEY":
return parseOpenSSHPrivateKey(block.Bytes)
default:
return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
}
}
// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
// specified by the OpenSSL DSA man page.
func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
var k struct {
Version int
P *big.Int
Q *big.Int
G *big.Int
Priv *big.Int
Pub *big.Int
}
rest, err := asn1.Unmarshal(der, &k)
if err != nil {
return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
}
if len(rest) > 0 {
return nil, errors.New("ssh: garbage after DSA key")
}
return &dsa.PrivateKey{
PublicKey: dsa.PublicKey{
Parameters: dsa.Parameters{
P: k.P,
Q: k.Q,
G: k.G,
},
Y: k.Priv,
},
X: k.Pub,
}, nil
}
// Implemented based on the documentation at
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
magic := append([]byte("openssh-key-v1"), 0)
if !bytes.Equal(magic, key[0:len(magic)]) {
return nil, errors.New("ssh: invalid openssh private key format")
}
remaining := key[len(magic):]
var w struct {
CipherName string
KdfName string
KdfOpts string
NumKeys uint32
PubKey []byte
PrivKeyBlock []byte
}
if err := Unmarshal(remaining, &w); err != nil {
return nil, err
}
pk1 := struct {
Check1 uint32
Check2 uint32
Keytype string
Pub []byte
Priv []byte
Comment string
Pad []byte `ssh:"rest"`
}{}
if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
return nil, err
}
if pk1.Check1 != pk1.Check2 {
return nil, errors.New("ssh: checkint mismatch")
}
// we only handle ed25519 keys currently
if pk1.Keytype != KeyAlgoED25519 {
return nil, errors.New("ssh: unhandled key type")
}
for i, b := range pk1.Pad {
if int(b) != i+1 {
return nil, errors.New("ssh: padding not as expected")
}
}
if len(pk1.Priv) != ed25519.PrivateKeySize {
return nil, errors.New("ssh: private key unexpected length")
}
pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
copy(pk, pk1.Priv)
return &pk, nil
}

57
vendor/golang.org/x/crypto/ssh/mac.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
// Message authentication support
import (
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"hash"
)
type macMode struct {
keySize int
new func(key []byte) hash.Hash
}
// truncatingMAC wraps around a hash.Hash and truncates the output digest to
// a given size.
type truncatingMAC struct {
length int
hmac hash.Hash
}
func (t truncatingMAC) Write(data []byte) (int, error) {
return t.hmac.Write(data)
}
func (t truncatingMAC) Sum(in []byte) []byte {
out := t.hmac.Sum(in)
return out[:len(in)+t.length]
}
func (t truncatingMAC) Reset() {
t.hmac.Reset()
}
func (t truncatingMAC) Size() int {
return t.length
}
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
var macModes = map[string]*macMode{
"hmac-sha2-256": {32, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
}},
"hmac-sha1": {20, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key)
}},
"hmac-sha1-96": {20, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)}
}},
}

758
vendor/golang.org/x/crypto/ssh/messages.go generated vendored Normal file
View file

@ -0,0 +1,758 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"reflect"
"strconv"
"strings"
)
// These are SSH message type numbers. They are scattered around several
// documents but many were taken from [SSH-PARAMETERS].
const (
msgIgnore = 2
msgUnimplemented = 3
msgDebug = 4
msgNewKeys = 21
// Standard authentication messages
msgUserAuthSuccess = 52
msgUserAuthBanner = 53
)
// SSH messages:
//
// These structures mirror the wire format of the corresponding SSH messages.
// They are marshaled using reflection with the marshal and unmarshal functions
// in this file. The only wrinkle is that a final member of type []byte with a
// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
// See RFC 4253, section 11.1.
const msgDisconnect = 1
// disconnectMsg is the message that signals a disconnect. It is also
// the error type returned from mux.Wait()
type disconnectMsg struct {
Reason uint32 `sshtype:"1"`
Message string
Language string
}
func (d *disconnectMsg) Error() string {
return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message)
}
// See RFC 4253, section 7.1.
const msgKexInit = 20
type kexInitMsg struct {
Cookie [16]byte `sshtype:"20"`
KexAlgos []string
ServerHostKeyAlgos []string
CiphersClientServer []string
CiphersServerClient []string
MACsClientServer []string
MACsServerClient []string
CompressionClientServer []string
CompressionServerClient []string
LanguagesClientServer []string
LanguagesServerClient []string
FirstKexFollows bool
Reserved uint32
}
// See RFC 4253, section 8.
// Diffie-Helman
const msgKexDHInit = 30
type kexDHInitMsg struct {
X *big.Int `sshtype:"30"`
}
const msgKexECDHInit = 30
type kexECDHInitMsg struct {
ClientPubKey []byte `sshtype:"30"`
}
const msgKexECDHReply = 31
type kexECDHReplyMsg struct {
HostKey []byte `sshtype:"31"`
EphemeralPubKey []byte
Signature []byte
}
const msgKexDHReply = 31
type kexDHReplyMsg struct {
HostKey []byte `sshtype:"31"`
Y *big.Int
Signature []byte
}
// See RFC 4253, section 10.
const msgServiceRequest = 5
type serviceRequestMsg struct {
Service string `sshtype:"5"`
}
// See RFC 4253, section 10.
const msgServiceAccept = 6
type serviceAcceptMsg struct {
Service string `sshtype:"6"`
}
// See RFC 4252, section 5.
const msgUserAuthRequest = 50
type userAuthRequestMsg struct {
User string `sshtype:"50"`
Service string
Method string
Payload []byte `ssh:"rest"`
}
// Used for debug printouts of packets.
type userAuthSuccessMsg struct {
}
// See RFC 4252, section 5.1
const msgUserAuthFailure = 51
type userAuthFailureMsg struct {
Methods []string `sshtype:"51"`
PartialSuccess bool
}
// See RFC 4256, section 3.2
const msgUserAuthInfoRequest = 60
const msgUserAuthInfoResponse = 61
type userAuthInfoRequestMsg struct {
User string `sshtype:"60"`
Instruction string
DeprecatedLanguage string
NumPrompts uint32
Prompts []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
const msgChannelOpen = 90
type channelOpenMsg struct {
ChanType string `sshtype:"90"`
PeersId uint32
PeersWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
}
const msgChannelExtendedData = 95
const msgChannelData = 94
// Used for debug print outs of packets.
type channelDataMsg struct {
PeersId uint32 `sshtype:"94"`
Length uint32
Rest []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
const msgChannelOpenConfirm = 91
type channelOpenConfirmMsg struct {
PeersId uint32 `sshtype:"91"`
MyId uint32
MyWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
const msgChannelOpenFailure = 92
type channelOpenFailureMsg struct {
PeersId uint32 `sshtype:"92"`
Reason RejectionReason
Message string
Language string
}
const msgChannelRequest = 98
type channelRequestMsg struct {
PeersId uint32 `sshtype:"98"`
Request string
WantReply bool
RequestSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.4.
const msgChannelSuccess = 99
type channelRequestSuccessMsg struct {
PeersId uint32 `sshtype:"99"`
}
// See RFC 4254, section 5.4.
const msgChannelFailure = 100
type channelRequestFailureMsg struct {
PeersId uint32 `sshtype:"100"`
}
// See RFC 4254, section 5.3
const msgChannelClose = 97
type channelCloseMsg struct {
PeersId uint32 `sshtype:"97"`
}
// See RFC 4254, section 5.3
const msgChannelEOF = 96
type channelEOFMsg struct {
PeersId uint32 `sshtype:"96"`
}
// See RFC 4254, section 4
const msgGlobalRequest = 80
type globalRequestMsg struct {
Type string `sshtype:"80"`
WantReply bool
Data []byte `ssh:"rest"`
}
// See RFC 4254, section 4
const msgRequestSuccess = 81
type globalRequestSuccessMsg struct {
Data []byte `ssh:"rest" sshtype:"81"`
}
// See RFC 4254, section 4
const msgRequestFailure = 82
type globalRequestFailureMsg struct {
Data []byte `ssh:"rest" sshtype:"82"`
}
// See RFC 4254, section 5.2
const msgChannelWindowAdjust = 93
type windowAdjustMsg struct {
PeersId uint32 `sshtype:"93"`
AdditionalBytes uint32
}
// See RFC 4252, section 7
const msgUserAuthPubKeyOk = 60
type userAuthPubKeyOkMsg struct {
Algo string `sshtype:"60"`
PubKey []byte
}
// typeTags returns the possible type bytes for the given reflect.Type, which
// should be a struct. The possible values are separated by a '|' character.
func typeTags(structType reflect.Type) (tags []byte) {
tagStr := structType.Field(0).Tag.Get("sshtype")
for _, tag := range strings.Split(tagStr, "|") {
i, err := strconv.Atoi(tag)
if err == nil {
tags = append(tags, byte(i))
}
}
return tags
}
func fieldError(t reflect.Type, field int, problem string) error {
if problem != "" {
problem = ": " + problem
}
return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
}
var errShortRead = errors.New("ssh: short read")
// Unmarshal parses data in SSH wire format into a structure. The out
// argument should be a pointer to struct. If the first member of the
// struct has the "sshtype" tag set to a '|'-separated set of numbers
// in decimal, the packet must start with one of those numbers. In
// case of error, Unmarshal returns a ParseError or
// UnexpectedMessageError.
func Unmarshal(data []byte, out interface{}) error {
v := reflect.ValueOf(out).Elem()
structType := v.Type()
expectedTypes := typeTags(structType)
var expectedType byte
if len(expectedTypes) > 0 {
expectedType = expectedTypes[0]
}
if len(data) == 0 {
return parseError(expectedType)
}
if len(expectedTypes) > 0 {
goodType := false
for _, e := range expectedTypes {
if e > 0 && data[0] == e {
goodType = true
break
}
}
if !goodType {
return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes)
}
data = data[1:]
}
var ok bool
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
if len(data) < 1 {
return errShortRead
}
field.SetBool(data[0] != 0)
data = data[1:]
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
return fieldError(structType, i, "array of unsupported type")
}
if len(data) < t.Len() {
return errShortRead
}
for j, n := 0, t.Len(); j < n; j++ {
field.Index(j).Set(reflect.ValueOf(data[j]))
}
data = data[t.Len():]
case reflect.Uint64:
var u64 uint64
if u64, data, ok = parseUint64(data); !ok {
return errShortRead
}
field.SetUint(u64)
case reflect.Uint32:
var u32 uint32
if u32, data, ok = parseUint32(data); !ok {
return errShortRead
}
field.SetUint(uint64(u32))
case reflect.Uint8:
if len(data) < 1 {
return errShortRead
}
field.SetUint(uint64(data[0]))
data = data[1:]
case reflect.String:
var s []byte
if s, data, ok = parseString(data); !ok {
return fieldError(structType, i, "")
}
field.SetString(string(s))
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if structType.Field(i).Tag.Get("ssh") == "rest" {
field.Set(reflect.ValueOf(data))
data = nil
} else {
var s []byte
if s, data, ok = parseString(data); !ok {
return errShortRead
}
field.Set(reflect.ValueOf(s))
}
case reflect.String:
var nl []string
if nl, data, ok = parseNameList(data); !ok {
return errShortRead
}
field.Set(reflect.ValueOf(nl))
default:
return fieldError(structType, i, "slice of unsupported type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
if n, data, ok = parseInt(data); !ok {
return errShortRead
}
field.Set(reflect.ValueOf(n))
} else {
return fieldError(structType, i, "pointer to unsupported type")
}
default:
return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t))
}
}
if len(data) != 0 {
return parseError(expectedType)
}
return nil
}
// Marshal serializes the message in msg to SSH wire format. The msg
// argument should be a struct or pointer to struct. If the first
// member has the "sshtype" tag set to a number in decimal, that
// number is prepended to the result. If the last of member has the
// "ssh" tag set to "rest", its contents are appended to the output.
func Marshal(msg interface{}) []byte {
out := make([]byte, 0, 64)
return marshalStruct(out, msg)
}
func marshalStruct(out []byte, msg interface{}) []byte {
v := reflect.Indirect(reflect.ValueOf(msg))
msgTypes := typeTags(v.Type())
if len(msgTypes) > 0 {
out = append(out, msgTypes[0])
}
for i, n := 0, v.NumField(); i < n; i++ {
field := v.Field(i)
switch t := field.Type(); t.Kind() {
case reflect.Bool:
var v uint8
if field.Bool() {
v = 1
}
out = append(out, v)
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
}
for j, l := 0, t.Len(); j < l; j++ {
out = append(out, uint8(field.Index(j).Uint()))
}
case reflect.Uint32:
out = appendU32(out, uint32(field.Uint()))
case reflect.Uint64:
out = appendU64(out, uint64(field.Uint()))
case reflect.Uint8:
out = append(out, uint8(field.Uint()))
case reflect.String:
s := field.String()
out = appendInt(out, len(s))
out = append(out, s...)
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if v.Type().Field(i).Tag.Get("ssh") != "rest" {
out = appendInt(out, field.Len())
}
out = append(out, field.Bytes()...)
case reflect.String:
offset := len(out)
out = appendU32(out, 0)
if n := field.Len(); n > 0 {
for j := 0; j < n; j++ {
f := field.Index(j)
if j != 0 {
out = append(out, ',')
}
out = append(out, f.String()...)
}
// overwrite length value
binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
}
default:
panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
nValue := reflect.ValueOf(&n)
nValue.Elem().Set(field)
needed := intLength(n)
oldLength := len(out)
if cap(out)-len(out) < needed {
newOut := make([]byte, len(out), 2*(len(out)+needed))
copy(newOut, out)
out = newOut
}
out = out[:oldLength+needed]
marshalInt(out[oldLength:], n)
} else {
panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
}
}
}
return out
}
var bigOne = big.NewInt(1)
func parseString(in []byte) (out, rest []byte, ok bool) {
if len(in) < 4 {
return
}
length := binary.BigEndian.Uint32(in)
in = in[4:]
if uint32(len(in)) < length {
return
}
out = in[:length]
rest = in[length:]
ok = true
return
}
var (
comma = []byte{','}
emptyNameList = []string{}
)
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
if len(contents) == 0 {
out = emptyNameList
return
}
parts := bytes.Split(contents, comma)
out = make([]string, len(parts))
for i, part := range parts {
out[i] = string(part)
}
return
}
func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
out = new(big.Int)
if len(contents) > 0 && contents[0]&0x80 == 0x80 {
// This is a negative number
notBytes := make([]byte, len(contents))
for i := range notBytes {
notBytes[i] = ^contents[i]
}
out.SetBytes(notBytes)
out.Add(out, bigOne)
out.Neg(out)
} else {
// Positive number
out.SetBytes(contents)
}
ok = true
return
}
func parseUint32(in []byte) (uint32, []byte, bool) {
if len(in) < 4 {
return 0, nil, false
}
return binary.BigEndian.Uint32(in), in[4:], true
}
func parseUint64(in []byte) (uint64, []byte, bool) {
if len(in) < 8 {
return 0, nil, false
}
return binary.BigEndian.Uint64(in), in[8:], true
}
func intLength(n *big.Int) int {
length := 4 /* length bytes */
if n.Sign() < 0 {
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bitLen := nMinus1.BitLen()
if bitLen%8 == 0 {
// The number will need 0xff padding
length++
}
length += (bitLen + 7) / 8
} else if n.Sign() == 0 {
// A zero is the zero length string
} else {
bitLen := n.BitLen()
if bitLen%8 == 0 {
// The number will need 0x00 padding
length++
}
length += (bitLen + 7) / 8
}
return length
}
func marshalUint32(to []byte, n uint32) []byte {
binary.BigEndian.PutUint32(to, n)
return to[4:]
}
func marshalUint64(to []byte, n uint64) []byte {
binary.BigEndian.PutUint64(to, n)
return to[8:]
}
func marshalInt(to []byte, n *big.Int) []byte {
lengthBytes := to
to = to[4:]
length := 0
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement
// form. So we'll subtract 1 and invert. If the
// most-significant-bit isn't set then we'll need to pad the
// beginning with 0xff in order to keep the number negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
to[0] = 0xff
to = to[1:]
length++
}
nBytes := copy(to, bytes)
to = to[nBytes:]
length += nBytes
} else if n.Sign() == 0 {
// A zero is the zero length string
} else {
bytes := n.Bytes()
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
// We'll have to pad this with a 0x00 in order to
// stop it looking like a negative number.
to[0] = 0
to = to[1:]
length++
}
nBytes := copy(to, bytes)
to = to[nBytes:]
length += nBytes
}
lengthBytes[0] = byte(length >> 24)
lengthBytes[1] = byte(length >> 16)
lengthBytes[2] = byte(length >> 8)
lengthBytes[3] = byte(length)
return to
}
func writeInt(w io.Writer, n *big.Int) {
length := intLength(n)
buf := make([]byte, length)
marshalInt(buf, n)
w.Write(buf)
}
func writeString(w io.Writer, s []byte) {
var lengthBytes [4]byte
lengthBytes[0] = byte(len(s) >> 24)
lengthBytes[1] = byte(len(s) >> 16)
lengthBytes[2] = byte(len(s) >> 8)
lengthBytes[3] = byte(len(s))
w.Write(lengthBytes[:])
w.Write(s)
}
func stringLength(n int) int {
return 4 + n
}
func marshalString(to []byte, s []byte) []byte {
to[0] = byte(len(s) >> 24)
to[1] = byte(len(s) >> 16)
to[2] = byte(len(s) >> 8)
to[3] = byte(len(s))
to = to[4:]
copy(to, s)
return to[len(s):]
}
var bigIntType = reflect.TypeOf((*big.Int)(nil))
// Decode a packet into its corresponding message.
func decode(packet []byte) (interface{}, error) {
var msg interface{}
switch packet[0] {
case msgDisconnect:
msg = new(disconnectMsg)
case msgServiceRequest:
msg = new(serviceRequestMsg)
case msgServiceAccept:
msg = new(serviceAcceptMsg)
case msgKexInit:
msg = new(kexInitMsg)
case msgKexDHInit:
msg = new(kexDHInitMsg)
case msgKexDHReply:
msg = new(kexDHReplyMsg)
case msgUserAuthRequest:
msg = new(userAuthRequestMsg)
case msgUserAuthSuccess:
return new(userAuthSuccessMsg), nil
case msgUserAuthFailure:
msg = new(userAuthFailureMsg)
case msgUserAuthPubKeyOk:
msg = new(userAuthPubKeyOkMsg)
case msgGlobalRequest:
msg = new(globalRequestMsg)
case msgRequestSuccess:
msg = new(globalRequestSuccessMsg)
case msgRequestFailure:
msg = new(globalRequestFailureMsg)
case msgChannelOpen:
msg = new(channelOpenMsg)
case msgChannelData:
msg = new(channelDataMsg)
case msgChannelOpenConfirm:
msg = new(channelOpenConfirmMsg)
case msgChannelOpenFailure:
msg = new(channelOpenFailureMsg)
case msgChannelWindowAdjust:
msg = new(windowAdjustMsg)
case msgChannelEOF:
msg = new(channelEOFMsg)
case msgChannelClose:
msg = new(channelCloseMsg)
case msgChannelRequest:
msg = new(channelRequestMsg)
case msgChannelSuccess:
msg = new(channelRequestSuccessMsg)
case msgChannelFailure:
msg = new(channelRequestFailureMsg)
default:
return nil, unexpectedMessageError(0, packet[0])
}
if err := Unmarshal(packet, msg); err != nil {
return nil, err
}
return msg, nil
}

330
vendor/golang.org/x/crypto/ssh/mux.go generated vendored Normal file
View file

@ -0,0 +1,330 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"encoding/binary"
"fmt"
"io"
"log"
"sync"
"sync/atomic"
)
// debugMux, if set, causes messages in the connection protocol to be
// logged.
const debugMux = false
// chanList is a thread safe channel list.
type chanList struct {
// protects concurrent access to chans
sync.Mutex
// chans are indexed by the local id of the channel, which the
// other side should send in the PeersId field.
chans []*channel
// This is a debugging aid: it offsets all IDs by this
// amount. This helps distinguish otherwise identical
// server/client muxes
offset uint32
}
// Assigns a channel ID to the given channel.
func (c *chanList) add(ch *channel) uint32 {
c.Lock()
defer c.Unlock()
for i := range c.chans {
if c.chans[i] == nil {
c.chans[i] = ch
return uint32(i) + c.offset
}
}
c.chans = append(c.chans, ch)
return uint32(len(c.chans)-1) + c.offset
}
// getChan returns the channel for the given ID.
func (c *chanList) getChan(id uint32) *channel {
id -= c.offset
c.Lock()
defer c.Unlock()
if id < uint32(len(c.chans)) {
return c.chans[id]
}
return nil
}
func (c *chanList) remove(id uint32) {
id -= c.offset
c.Lock()
if id < uint32(len(c.chans)) {
c.chans[id] = nil
}
c.Unlock()
}
// dropAll forgets all channels it knows, returning them in a slice.
func (c *chanList) dropAll() []*channel {
c.Lock()
defer c.Unlock()
var r []*channel
for _, ch := range c.chans {
if ch == nil {
continue
}
r = append(r, ch)
}
c.chans = nil
return r
}
// mux represents the state for the SSH connection protocol, which
// multiplexes many channels onto a single packet transport.
type mux struct {
conn packetConn
chanList chanList
incomingChannels chan NewChannel
globalSentMu sync.Mutex
globalResponses chan interface{}
incomingRequests chan *Request
errCond *sync.Cond
err error
}
// When debugging, each new chanList instantiation has a different
// offset.
var globalOff uint32
func (m *mux) Wait() error {
m.errCond.L.Lock()
defer m.errCond.L.Unlock()
for m.err == nil {
m.errCond.Wait()
}
return m.err
}
// newMux returns a mux that runs over the given connection.
func newMux(p packetConn) *mux {
m := &mux{
conn: p,
incomingChannels: make(chan NewChannel, 16),
globalResponses: make(chan interface{}, 1),
incomingRequests: make(chan *Request, 16),
errCond: newCond(),
}
if debugMux {
m.chanList.offset = atomic.AddUint32(&globalOff, 1)
}
go m.loop()
return m
}
func (m *mux) sendMessage(msg interface{}) error {
p := Marshal(msg)
if debugMux {
log.Printf("send global(%d): %#v", m.chanList.offset, msg)
}
return m.conn.writePacket(p)
}
func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
if wantReply {
m.globalSentMu.Lock()
defer m.globalSentMu.Unlock()
}
if err := m.sendMessage(globalRequestMsg{
Type: name,
WantReply: wantReply,
Data: payload,
}); err != nil {
return false, nil, err
}
if !wantReply {
return false, nil, nil
}
msg, ok := <-m.globalResponses
if !ok {
return false, nil, io.EOF
}
switch msg := msg.(type) {
case *globalRequestFailureMsg:
return false, msg.Data, nil
case *globalRequestSuccessMsg:
return true, msg.Data, nil
default:
return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
}
}
// ackRequest must be called after processing a global request that
// has WantReply set.
func (m *mux) ackRequest(ok bool, data []byte) error {
if ok {
return m.sendMessage(globalRequestSuccessMsg{Data: data})
}
return m.sendMessage(globalRequestFailureMsg{Data: data})
}
func (m *mux) Close() error {
return m.conn.Close()
}
// loop runs the connection machine. It will process packets until an
// error is encountered. To synchronize on loop exit, use mux.Wait.
func (m *mux) loop() {
var err error
for err == nil {
err = m.onePacket()
}
for _, ch := range m.chanList.dropAll() {
ch.close()
}
close(m.incomingChannels)
close(m.incomingRequests)
close(m.globalResponses)
m.conn.Close()
m.errCond.L.Lock()
m.err = err
m.errCond.Broadcast()
m.errCond.L.Unlock()
if debugMux {
log.Println("loop exit", err)
}
}
// onePacket reads and processes one packet.
func (m *mux) onePacket() error {
packet, err := m.conn.readPacket()
if err != nil {
return err
}
if debugMux {
if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
} else {
p, _ := decode(packet)
log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
}
}
switch packet[0] {
case msgChannelOpen:
return m.handleChannelOpen(packet)
case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
return m.handleGlobalPacket(packet)
}
// assume a channel packet.
if len(packet) < 5 {
return parseError(packet[0])
}
id := binary.BigEndian.Uint32(packet[1:])
ch := m.chanList.getChan(id)
if ch == nil {
return fmt.Errorf("ssh: invalid channel %d", id)
}
return ch.handlePacket(packet)
}
func (m *mux) handleGlobalPacket(packet []byte) error {
msg, err := decode(packet)
if err != nil {
return err
}
switch msg := msg.(type) {
case *globalRequestMsg:
m.incomingRequests <- &Request{
Type: msg.Type,
WantReply: msg.WantReply,
Payload: msg.Data,
mux: m,
}
case *globalRequestSuccessMsg, *globalRequestFailureMsg:
m.globalResponses <- msg
default:
panic(fmt.Sprintf("not a global message %#v", msg))
}
return nil
}
// handleChannelOpen schedules a channel to be Accept()ed.
func (m *mux) handleChannelOpen(packet []byte) error {
var msg channelOpenMsg
if err := Unmarshal(packet, &msg); err != nil {
return err
}
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
failMsg := channelOpenFailureMsg{
PeersId: msg.PeersId,
Reason: ConnectionFailed,
Message: "invalid request",
Language: "en_US.UTF-8",
}
return m.sendMessage(failMsg)
}
c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
c.remoteId = msg.PeersId
c.maxRemotePayload = msg.MaxPacketSize
c.remoteWin.add(msg.PeersWindow)
m.incomingChannels <- c
return nil
}
func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
ch, err := m.openChannel(chanType, extra)
if err != nil {
return nil, nil, err
}
return ch, ch.incomingRequests, nil
}
func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
ch := m.newChannel(chanType, channelOutbound, extra)
ch.maxIncomingPayload = channelMaxPacket
open := channelOpenMsg{
ChanType: chanType,
PeersWindow: ch.myWindow,
MaxPacketSize: ch.maxIncomingPayload,
TypeSpecificData: extra,
PeersId: ch.localId,
}
if err := m.sendMessage(open); err != nil {
return nil, err
}
switch msg := (<-ch.msg).(type) {
case *channelOpenConfirmMsg:
return ch, nil
case *channelOpenFailureMsg:
return nil, &OpenChannelError{msg.Reason, msg.Message}
default:
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
}
}

488
vendor/golang.org/x/crypto/ssh/server.go generated vendored Normal file
View file

@ -0,0 +1,488 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"io"
"net"
)
// The Permissions type holds fine-grained permissions that are
// specific to a user or a specific authentication method for a
// user. Permissions, except for "source-address", must be enforced in
// the server application layer, after successful authentication. The
// Permissions are passed on in ServerConn so a server implementation
// can honor them.
type Permissions struct {
// Critical options restrict default permissions. Common
// restrictions are "source-address" and "force-command". If
// the server cannot enforce the restriction, or does not
// recognize it, the user should not authenticate.
CriticalOptions map[string]string
// Extensions are extra functionality that the server may
// offer on authenticated connections. Common extensions are
// "permit-agent-forwarding", "permit-X11-forwarding". Lack of
// support for an extension does not preclude authenticating a
// user.
Extensions map[string]string
}
// ServerConfig holds server specific configuration data.
type ServerConfig struct {
// Config contains configuration shared between client and server.
Config
hostKeys []Signer
// NoClientAuth is true if clients are allowed to connect without
// authenticating.
NoClientAuth bool
// PasswordCallback, if non-nil, is called when a user
// attempts to authenticate using a password.
PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
// PublicKeyCallback, if non-nil, is called when a client attempts public
// key authentication. It must return true if the given public key is
// valid for the given user. For example, see CertChecker.Authenticate.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
// KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be
// used to query the user. The callback may offer multiple
// Challenge rounds. To avoid information leaks, the client
// should be presented a challenge even if the user is
// unknown.
KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
// AuthLogCallback, if non-nil, is called to log all authentication
// attempts.
AuthLogCallback func(conn ConnMetadata, method string, err error)
// ServerVersion is the version identification string to announce in
// the public handshake.
// If empty, a reasonable default is used.
// Note that RFC 4253 section 4.2 requires that this string start with
// "SSH-2.0-".
ServerVersion string
}
// AddHostKey adds a private key as a host key. If an existing host
// key exists with the same algorithm, it is overwritten. Each server
// config must have at least one host key.
func (s *ServerConfig) AddHostKey(key Signer) {
for i, k := range s.hostKeys {
if k.PublicKey().Type() == key.PublicKey().Type() {
s.hostKeys[i] = key
return
}
}
s.hostKeys = append(s.hostKeys, key)
}
// cachedPubKey contains the results of querying whether a public key is
// acceptable for a user.
type cachedPubKey struct {
user string
pubKeyData []byte
result error
perms *Permissions
}
const maxCachedPubKeys = 16
// pubKeyCache caches tests for public keys. Since SSH clients
// will query whether a public key is acceptable before attempting to
// authenticate with it, we end up with duplicate queries for public
// key validity. The cache only applies to a single ServerConn.
type pubKeyCache struct {
keys []cachedPubKey
}
// get returns the result for a given user/algo/key tuple.
func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
for _, k := range c.keys {
if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
return k, true
}
}
return cachedPubKey{}, false
}
// add adds the given tuple to the cache.
func (c *pubKeyCache) add(candidate cachedPubKey) {
if len(c.keys) < maxCachedPubKeys {
c.keys = append(c.keys, candidate)
}
}
// ServerConn is an authenticated SSH connection, as seen from the
// server
type ServerConn struct {
Conn
// If the succeeding authentication callback returned a
// non-nil Permissions pointer, it is stored here.
Permissions *Permissions
}
// NewServerConn starts a new SSH server with c as the underlying
// transport. It starts with a handshake and, if the handshake is
// unsuccessful, it closes the connection and returns an error. The
// Request and NewChannel channels must be serviced, or the connection
// will hang.
func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
fullConf := *config
fullConf.SetDefaults()
s := &connection{
sshConn: sshConn{conn: c},
}
perms, err := s.serverHandshake(&fullConf)
if err != nil {
c.Close()
return nil, nil, nil, err
}
return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
}
// signAndMarshal signs the data with the appropriate algorithm,
// and serializes the result in SSH wire format.
func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
sig, err := k.Sign(rand, data)
if err != nil {
return nil, err
}
return Marshal(sig), nil
}
// handshake performs key exchange and user authentication.
func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
if len(config.hostKeys) == 0 {
return nil, errors.New("ssh: server has no host keys")
}
if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
}
if config.ServerVersion != "" {
s.serverVersion = []byte(config.ServerVersion)
} else {
s.serverVersion = []byte(packageVersion)
}
var err error
s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
if err != nil {
return nil, err
}
tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
if err := s.transport.requestInitialKeyChange(); err != nil {
return nil, err
}
// We just did the key change, so the session ID is established.
s.sessionID = s.transport.getSessionID()
var packet []byte
if packet, err = s.transport.readPacket(); err != nil {
return nil, err
}
var serviceRequest serviceRequestMsg
if err = Unmarshal(packet, &serviceRequest); err != nil {
return nil, err
}
if serviceRequest.Service != serviceUserAuth {
return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
}
serviceAccept := serviceAcceptMsg{
Service: serviceUserAuth,
}
if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
return nil, err
}
perms, err := s.serverAuthenticate(config)
if err != nil {
return nil, err
}
s.mux = newMux(s.transport)
return perms, err
}
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
return true
}
return false
}
func checkSourceAddress(addr net.Addr, sourceAddr string) error {
if addr == nil {
return errors.New("ssh: no address known for client, but source-address match required")
}
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
}
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
if bytes.Equal(allowedIP, tcpAddr.IP) {
return nil
}
} else {
_, ipNet, err := net.ParseCIDR(sourceAddr)
if err != nil {
return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
}
if ipNet.Contains(tcpAddr.IP) {
return nil
}
}
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
}
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
var err error
var cache pubKeyCache
var perms *Permissions
userAuthLoop:
for {
var userAuthReq userAuthRequestMsg
if packet, err := s.transport.readPacket(); err != nil {
return nil, err
} else if err = Unmarshal(packet, &userAuthReq); err != nil {
return nil, err
}
if userAuthReq.Service != serviceSSH {
return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
}
s.user = userAuthReq.User
perms = nil
authErr := errors.New("no auth passed yet")
switch userAuthReq.Method {
case "none":
if config.NoClientAuth {
authErr = nil
}
case "password":
if config.PasswordCallback == nil {
authErr = errors.New("ssh: password auth not configured")
break
}
payload := userAuthReq.Payload
if len(payload) < 1 || payload[0] != 0 {
return nil, parseError(msgUserAuthRequest)
}
payload = payload[1:]
password, payload, ok := parseString(payload)
if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
perms, authErr = config.PasswordCallback(s, password)
case "keyboard-interactive":
if config.KeyboardInteractiveCallback == nil {
authErr = errors.New("ssh: keyboard-interactive auth not configubred")
break
}
prompter := &sshClientKeyboardInteractive{s}
perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
case "publickey":
if config.PublicKeyCallback == nil {
authErr = errors.New("ssh: publickey auth not configured")
break
}
payload := userAuthReq.Payload
if len(payload) < 1 {
return nil, parseError(msgUserAuthRequest)
}
isQuery := payload[0] == 0
payload = payload[1:]
algoBytes, payload, ok := parseString(payload)
if !ok {
return nil, parseError(msgUserAuthRequest)
}
algo := string(algoBytes)
if !isAcceptableAlgo(algo) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break
}
pubKeyData, payload, ok := parseString(payload)
if !ok {
return nil, parseError(msgUserAuthRequest)
}
pubKey, err := ParsePublicKey(pubKeyData)
if err != nil {
return nil, err
}
candidate, ok := cache.get(s.user, pubKeyData)
if !ok {
candidate.user = s.user
candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
candidate.result = checkSourceAddress(
s.RemoteAddr(),
candidate.perms.CriticalOptions[sourceAddressCriticalOption])
}
cache.add(candidate)
}
if isQuery {
// The client can query if the given public key
// would be okay.
if len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
if candidate.result == nil {
okMsg := userAuthPubKeyOkMsg{
Algo: algo,
PubKey: pubKeyData,
}
if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
return nil, err
}
continue userAuthLoop
}
authErr = candidate.result
} else {
sig, payload, ok := parseSignature(payload)
if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
// Ensure the public key algo and signature algo
// are supported. Compare the private key
// algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but
// for certs, the names differ.
if !isAcceptableAlgo(sig.Format) {
break
}
signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData)
if err := pubKey.Verify(signedData, sig); err != nil {
return nil, err
}
authErr = candidate.result
perms = candidate.perms
}
default:
authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
}
if config.AuthLogCallback != nil {
config.AuthLogCallback(s, userAuthReq.Method, authErr)
}
if authErr == nil {
break userAuthLoop
}
var failureMsg userAuthFailureMsg
if config.PasswordCallback != nil {
failureMsg.Methods = append(failureMsg.Methods, "password")
}
if config.PublicKeyCallback != nil {
failureMsg.Methods = append(failureMsg.Methods, "publickey")
}
if config.KeyboardInteractiveCallback != nil {
failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
}
if len(failureMsg.Methods) == 0 {
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
}
if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil {
return nil, err
}
}
if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
return nil, err
}
return perms, nil
}
// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
// asking the client on the other side of a ServerConn.
type sshClientKeyboardInteractive struct {
*connection
}
func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
if len(questions) != len(echos) {
return nil, errors.New("ssh: echos and questions must have equal length")
}
var prompts []byte
for i := range questions {
prompts = appendString(prompts, questions[i])
prompts = appendBool(prompts, echos[i])
}
if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
Instruction: instruction,
NumPrompts: uint32(len(questions)),
Prompts: prompts,
})); err != nil {
return nil, err
}
packet, err := c.transport.readPacket()
if err != nil {
return nil, err
}
if packet[0] != msgUserAuthInfoResponse {
return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
}
packet = packet[1:]
n, packet, ok := parseUint32(packet)
if !ok || int(n) != len(questions) {
return nil, parseError(msgUserAuthInfoResponse)
}
for i := uint32(0); i < n; i++ {
ans, rest, ok := parseString(packet)
if !ok {
return nil, parseError(msgUserAuthInfoResponse)
}
answers = append(answers, string(ans))
packet = rest
}
if len(packet) != 0 {
return nil, errors.New("ssh: junk at end of message")
}
return answers, nil
}

627
vendor/golang.org/x/crypto/ssh/session.go generated vendored Normal file
View file

@ -0,0 +1,627 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
// Session implements an interactive session described in
// "RFC 4254, section 6".
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
)
type Signal string
// POSIX signals as listed in RFC 4254 Section 6.10.
const (
SIGABRT Signal = "ABRT"
SIGALRM Signal = "ALRM"
SIGFPE Signal = "FPE"
SIGHUP Signal = "HUP"
SIGILL Signal = "ILL"
SIGINT Signal = "INT"
SIGKILL Signal = "KILL"
SIGPIPE Signal = "PIPE"
SIGQUIT Signal = "QUIT"
SIGSEGV Signal = "SEGV"
SIGTERM Signal = "TERM"
SIGUSR1 Signal = "USR1"
SIGUSR2 Signal = "USR2"
)
var signals = map[Signal]int{
SIGABRT: 6,
SIGALRM: 14,
SIGFPE: 8,
SIGHUP: 1,
SIGILL: 4,
SIGINT: 2,
SIGKILL: 9,
SIGPIPE: 13,
SIGQUIT: 3,
SIGSEGV: 11,
SIGTERM: 15,
}
type TerminalModes map[uint8]uint32
// POSIX terminal mode flags as listed in RFC 4254 Section 8.
const (
tty_OP_END = 0
VINTR = 1
VQUIT = 2
VERASE = 3
VKILL = 4
VEOF = 5
VEOL = 6
VEOL2 = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VWERASE = 13
VLNEXT = 14
VFLUSH = 15
VSWTCH = 16
VSTATUS = 17
VDISCARD = 18
IGNPAR = 30
PARMRK = 31
INPCK = 32
ISTRIP = 33
INLCR = 34
IGNCR = 35
ICRNL = 36
IUCLC = 37
IXON = 38
IXANY = 39
IXOFF = 40
IMAXBEL = 41
ISIG = 50
ICANON = 51
XCASE = 52
ECHO = 53
ECHOE = 54
ECHOK = 55
ECHONL = 56
NOFLSH = 57
TOSTOP = 58
IEXTEN = 59
ECHOCTL = 60
ECHOKE = 61
PENDIN = 62
OPOST = 70
OLCUC = 71
ONLCR = 72
OCRNL = 73
ONOCR = 74
ONLRET = 75
CS7 = 90
CS8 = 91
PARENB = 92
PARODD = 93
TTY_OP_ISPEED = 128
TTY_OP_OSPEED = 129
)
// A Session represents a connection to a remote command or shell.
type Session struct {
// Stdin specifies the remote process's standard input.
// If Stdin is nil, the remote process reads from an empty
// bytes.Buffer.
Stdin io.Reader
// Stdout and Stderr specify the remote process's standard
// output and error.
//
// If either is nil, Run connects the corresponding file
// descriptor to an instance of ioutil.Discard. There is a
// fixed amount of buffering that is shared for the two streams.
// If either blocks it may eventually cause the remote
// command to block.
Stdout io.Writer
Stderr io.Writer
ch Channel // the channel backing this session
started bool // true once Start, Run or Shell is invoked.
copyFuncs []func() error
errors chan error // one send per copyFunc
// true if pipe method is active
stdinpipe, stdoutpipe, stderrpipe bool
// stdinPipeWriter is non-nil if StdinPipe has not been called
// and Stdin was specified by the user; it is the write end of
// a pipe connecting Session.Stdin to the stdin channel.
stdinPipeWriter io.WriteCloser
exitStatus chan error
}
// SendRequest sends an out-of-band channel request on the SSH channel
// underlying the session.
func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
return s.ch.SendRequest(name, wantReply, payload)
}
func (s *Session) Close() error {
return s.ch.Close()
}
// RFC 4254 Section 6.4.
type setenvRequest struct {
Name string
Value string
}
// Setenv sets an environment variable that will be applied to any
// command executed by Shell or Run.
func (s *Session) Setenv(name, value string) error {
msg := setenvRequest{
Name: name,
Value: value,
}
ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
if err == nil && !ok {
err = errors.New("ssh: setenv failed")
}
return err
}
// RFC 4254 Section 6.2.
type ptyRequestMsg struct {
Term string
Columns uint32
Rows uint32
Width uint32
Height uint32
Modelist string
}
// RequestPty requests the association of a pty with the session on the remote host.
func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
var tm []byte
for k, v := range termmodes {
kv := struct {
Key byte
Val uint32
}{k, v}
tm = append(tm, Marshal(&kv)...)
}
tm = append(tm, tty_OP_END)
req := ptyRequestMsg{
Term: term,
Columns: uint32(w),
Rows: uint32(h),
Width: uint32(w * 8),
Height: uint32(h * 8),
Modelist: string(tm),
}
ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
if err == nil && !ok {
err = errors.New("ssh: pty-req failed")
}
return err
}
// RFC 4254 Section 6.5.
type subsystemRequestMsg struct {
Subsystem string
}
// RequestSubsystem requests the association of a subsystem with the session on the remote host.
// A subsystem is a predefined command that runs in the background when the ssh session is initiated
func (s *Session) RequestSubsystem(subsystem string) error {
msg := subsystemRequestMsg{
Subsystem: subsystem,
}
ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
if err == nil && !ok {
err = errors.New("ssh: subsystem request failed")
}
return err
}
// RFC 4254 Section 6.9.
type signalMsg struct {
Signal string
}
// Signal sends the given signal to the remote process.
// sig is one of the SIG* constants.
func (s *Session) Signal(sig Signal) error {
msg := signalMsg{
Signal: string(sig),
}
_, err := s.ch.SendRequest("signal", false, Marshal(&msg))
return err
}
// RFC 4254 Section 6.5.
type execMsg struct {
Command string
}
// Start runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start or Shell.
func (s *Session) Start(cmd string) error {
if s.started {
return errors.New("ssh: session already started")
}
req := execMsg{
Command: cmd,
}
ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
if err == nil && !ok {
err = fmt.Errorf("ssh: command %v failed", cmd)
}
if err != nil {
return err
}
return s.start()
}
// Run runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start, Shell, Output,
// or CombinedOutput.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the remote server does not send an exit status, an error of type
// *ExitMissingError is returned. If the command completes
// unsuccessfully or is interrupted by a signal, the error is of type
// *ExitError. Other error types may be returned for I/O problems.
func (s *Session) Run(cmd string) error {
err := s.Start(cmd)
if err != nil {
return err
}
return s.Wait()
}
// Output runs cmd on the remote host and returns its standard output.
func (s *Session) Output(cmd string) ([]byte, error) {
if s.Stdout != nil {
return nil, errors.New("ssh: Stdout already set")
}
var b bytes.Buffer
s.Stdout = &b
err := s.Run(cmd)
return b.Bytes(), err
}
type singleWriter struct {
b bytes.Buffer
mu sync.Mutex
}
func (w *singleWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
return w.b.Write(p)
}
// CombinedOutput runs cmd on the remote host and returns its combined
// standard output and standard error.
func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
if s.Stdout != nil {
return nil, errors.New("ssh: Stdout already set")
}
if s.Stderr != nil {
return nil, errors.New("ssh: Stderr already set")
}
var b singleWriter
s.Stdout = &b
s.Stderr = &b
err := s.Run(cmd)
return b.b.Bytes(), err
}
// Shell starts a login shell on the remote host. A Session only
// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
func (s *Session) Shell() error {
if s.started {
return errors.New("ssh: session already started")
}
ok, err := s.ch.SendRequest("shell", true, nil)
if err == nil && !ok {
return errors.New("ssh: could not start shell")
}
if err != nil {
return err
}
return s.start()
}
func (s *Session) start() error {
s.started = true
type F func(*Session)
for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
setupFd(s)
}
s.errors = make(chan error, len(s.copyFuncs))
for _, fn := range s.copyFuncs {
go func(fn func() error) {
s.errors <- fn()
}(fn)
}
return nil
}
// Wait waits for the remote command to exit.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the remote server does not send an exit status, an error of type
// *ExitMissingError is returned. If the command completes
// unsuccessfully or is interrupted by a signal, the error is of type
// *ExitError. Other error types may be returned for I/O problems.
func (s *Session) Wait() error {
if !s.started {
return errors.New("ssh: session not started")
}
waitErr := <-s.exitStatus
if s.stdinPipeWriter != nil {
s.stdinPipeWriter.Close()
}
var copyError error
for _ = range s.copyFuncs {
if err := <-s.errors; err != nil && copyError == nil {
copyError = err
}
}
if waitErr != nil {
return waitErr
}
return copyError
}
func (s *Session) wait(reqs <-chan *Request) error {
wm := Waitmsg{status: -1}
// Wait for msg channel to be closed before returning.
for msg := range reqs {
switch msg.Type {
case "exit-status":
wm.status = int(binary.BigEndian.Uint32(msg.Payload))
case "exit-signal":
var sigval struct {
Signal string
CoreDumped bool
Error string
Lang string
}
if err := Unmarshal(msg.Payload, &sigval); err != nil {
return err
}
// Must sanitize strings?
wm.signal = sigval.Signal
wm.msg = sigval.Error
wm.lang = sigval.Lang
default:
// This handles keepalives and matches
// OpenSSH's behaviour.
if msg.WantReply {
msg.Reply(false, nil)
}
}
}
if wm.status == 0 {
return nil
}
if wm.status == -1 {
// exit-status was never sent from server
if wm.signal == "" {
// signal was not sent either. RFC 4254
// section 6.10 recommends against this
// behavior, but it is allowed, so we let
// clients handle it.
return &ExitMissingError{}
}
wm.status = 128
if _, ok := signals[Signal(wm.signal)]; ok {
wm.status += signals[Signal(wm.signal)]
}
}
return &ExitError{wm}
}
// ExitMissingError is returned if a session is torn down cleanly, but
// the server sends no confirmation of the exit status.
type ExitMissingError struct{}
func (e *ExitMissingError) Error() string {
return "wait: remote command exited without exit status or exit signal"
}
func (s *Session) stdin() {
if s.stdinpipe {
return
}
var stdin io.Reader
if s.Stdin == nil {
stdin = new(bytes.Buffer)
} else {
r, w := io.Pipe()
go func() {
_, err := io.Copy(w, s.Stdin)
w.CloseWithError(err)
}()
stdin, s.stdinPipeWriter = r, w
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.ch, stdin)
if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
err = err1
}
return err
})
}
func (s *Session) stdout() {
if s.stdoutpipe {
return
}
if s.Stdout == nil {
s.Stdout = ioutil.Discard
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.Stdout, s.ch)
return err
})
}
func (s *Session) stderr() {
if s.stderrpipe {
return
}
if s.Stderr == nil {
s.Stderr = ioutil.Discard
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.Stderr, s.ch.Stderr())
return err
})
}
// sessionStdin reroutes Close to CloseWrite.
type sessionStdin struct {
io.Writer
ch Channel
}
func (s *sessionStdin) Close() error {
return s.ch.CloseWrite()
}
// StdinPipe returns a pipe that will be connected to the
// remote command's standard input when the command starts.
func (s *Session) StdinPipe() (io.WriteCloser, error) {
if s.Stdin != nil {
return nil, errors.New("ssh: Stdin already set")
}
if s.started {
return nil, errors.New("ssh: StdinPipe after process started")
}
s.stdinpipe = true
return &sessionStdin{s.ch, s.ch}, nil
}
// StdoutPipe returns a pipe that will be connected to the
// remote command's standard output when the command starts.
// There is a fixed amount of buffering that is shared between
// stdout and stderr streams. If the StdoutPipe reader is
// not serviced fast enough it may eventually cause the
// remote command to block.
func (s *Session) StdoutPipe() (io.Reader, error) {
if s.Stdout != nil {
return nil, errors.New("ssh: Stdout already set")
}
if s.started {
return nil, errors.New("ssh: StdoutPipe after process started")
}
s.stdoutpipe = true
return s.ch, nil
}
// StderrPipe returns a pipe that will be connected to the
// remote command's standard error when the command starts.
// There is a fixed amount of buffering that is shared between
// stdout and stderr streams. If the StderrPipe reader is
// not serviced fast enough it may eventually cause the
// remote command to block.
func (s *Session) StderrPipe() (io.Reader, error) {
if s.Stderr != nil {
return nil, errors.New("ssh: Stderr already set")
}
if s.started {
return nil, errors.New("ssh: StderrPipe after process started")
}
s.stderrpipe = true
return s.ch.Stderr(), nil
}
// newSession returns a new interactive session on the remote host.
func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
s := &Session{
ch: ch,
}
s.exitStatus = make(chan error, 1)
go func() {
s.exitStatus <- s.wait(reqs)
}()
return s, nil
}
// An ExitError reports unsuccessful completion of a remote command.
type ExitError struct {
Waitmsg
}
func (e *ExitError) Error() string {
return e.Waitmsg.String()
}
// Waitmsg stores the information about an exited remote command
// as reported by Wait.
type Waitmsg struct {
status int
signal string
msg string
lang string
}
// ExitStatus returns the exit status of the remote command.
func (w Waitmsg) ExitStatus() int {
return w.status
}
// Signal returns the exit signal of the remote command if
// it was terminated violently.
func (w Waitmsg) Signal() string {
return w.signal
}
// Msg returns the exit message given by the remote command
func (w Waitmsg) Msg() string {
return w.msg
}
// Lang returns the language tag. See RFC 3066
func (w Waitmsg) Lang() string {
return w.lang
}
func (w Waitmsg) String() string {
str := fmt.Sprintf("Process exited with status %v", w.status)
if w.signal != "" {
str += fmt.Sprintf(" from signal %v", w.signal)
}
if w.msg != "" {
str += fmt.Sprintf(". Reason was: %v", w.msg)
}
return str
}

407
vendor/golang.org/x/crypto/ssh/tcpip.go generated vendored Normal file
View file

@ -0,0 +1,407 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"errors"
"fmt"
"io"
"math/rand"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Listen requests the remote peer open a listening socket on
// addr. Incoming connections will be available by calling Accept on
// the returned net.Listener. The listener must be serviced, or the
// SSH connection may hang.
func (c *Client) Listen(n, addr string) (net.Listener, error) {
laddr, err := net.ResolveTCPAddr(n, addr)
if err != nil {
return nil, err
}
return c.ListenTCP(laddr)
}
// Automatic port allocation is broken with OpenSSH before 6.0. See
// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In
// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
// rather than the actual port number. This means you can never open
// two different listeners with auto allocated ports. We work around
// this by trying explicit ports until we succeed.
const openSSHPrefix = "OpenSSH_"
var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
// isBrokenOpenSSHVersion returns true if the given version string
// specifies a version of OpenSSH that is known to have a bug in port
// forwarding.
func isBrokenOpenSSHVersion(versionStr string) bool {
i := strings.Index(versionStr, openSSHPrefix)
if i < 0 {
return false
}
i += len(openSSHPrefix)
j := i
for ; j < len(versionStr); j++ {
if versionStr[j] < '0' || versionStr[j] > '9' {
break
}
}
version, _ := strconv.Atoi(versionStr[i:j])
return version < 6
}
// autoPortListenWorkaround simulates automatic port allocation by
// trying random ports repeatedly.
func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
var sshListener net.Listener
var err error
const tries = 10
for i := 0; i < tries; i++ {
addr := *laddr
addr.Port = 1024 + portRandomizer.Intn(60000)
sshListener, err = c.ListenTCP(&addr)
if err == nil {
laddr.Port = addr.Port
return sshListener, err
}
}
return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
}
// RFC 4254 7.1
type channelForwardMsg struct {
addr string
rport uint32
}
// ListenTCP requests the remote peer open a listening socket
// on laddr. Incoming connections will be available by calling
// Accept on the returned net.Listener.
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
return c.autoPortListenWorkaround(laddr)
}
m := channelForwardMsg{
laddr.IP.String(),
uint32(laddr.Port),
}
// send message
ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
if err != nil {
return nil, err
}
if !ok {
return nil, errors.New("ssh: tcpip-forward request denied by peer")
}
// If the original port was 0, then the remote side will
// supply a real port number in the response.
if laddr.Port == 0 {
var p struct {
Port uint32
}
if err := Unmarshal(resp, &p); err != nil {
return nil, err
}
laddr.Port = int(p.Port)
}
// Register this forward, using the port number we obtained.
ch := c.forwards.add(*laddr)
return &tcpListener{laddr, c, ch}, nil
}
// forwardList stores a mapping between remote
// forward requests and the tcpListeners.
type forwardList struct {
sync.Mutex
entries []forwardEntry
}
// forwardEntry represents an established mapping of a laddr on a
// remote ssh server to a channel connected to a tcpListener.
type forwardEntry struct {
laddr net.TCPAddr
c chan forward
}
// forward represents an incoming forwarded tcpip connection. The
// arguments to add/remove/lookup should be address as specified in
// the original forward-request.
type forward struct {
newCh NewChannel // the ssh client channel underlying this forward
raddr *net.TCPAddr // the raddr of the incoming connection
}
func (l *forwardList) add(addr net.TCPAddr) chan forward {
l.Lock()
defer l.Unlock()
f := forwardEntry{
addr,
make(chan forward, 1),
}
l.entries = append(l.entries, f)
return f.c
}
// See RFC 4254, section 7.2
type forwardedTCPPayload struct {
Addr string
Port uint32
OriginAddr string
OriginPort uint32
}
// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
if port == 0 || port > 65535 {
return nil, fmt.Errorf("ssh: port number out of range: %d", port)
}
ip := net.ParseIP(string(addr))
if ip == nil {
return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
}
return &net.TCPAddr{IP: ip, Port: int(port)}, nil
}
func (l *forwardList) handleChannels(in <-chan NewChannel) {
for ch := range in {
var payload forwardedTCPPayload
if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
continue
}
// RFC 4254 section 7.2 specifies that incoming
// addresses should list the address, in string
// format. It is implied that this should be an IP
// address, as it would be impossible to connect to it
// otherwise.
laddr, err := parseTCPAddr(payload.Addr, payload.Port)
if err != nil {
ch.Reject(ConnectionFailed, err.Error())
continue
}
raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
if err != nil {
ch.Reject(ConnectionFailed, err.Error())
continue
}
if ok := l.forward(*laddr, *raddr, ch); !ok {
// Section 7.2, implementations MUST reject spurious incoming
// connections.
ch.Reject(Prohibited, "no forward for address")
continue
}
}
}
// remove removes the forward entry, and the channel feeding its
// listener.
func (l *forwardList) remove(addr net.TCPAddr) {
l.Lock()
defer l.Unlock()
for i, f := range l.entries {
if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
l.entries = append(l.entries[:i], l.entries[i+1:]...)
close(f.c)
return
}
}
}
// closeAll closes and clears all forwards.
func (l *forwardList) closeAll() {
l.Lock()
defer l.Unlock()
for _, f := range l.entries {
close(f.c)
}
l.entries = nil
}
func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
l.Lock()
defer l.Unlock()
for _, f := range l.entries {
if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
f.c <- forward{ch, &raddr}
return true
}
}
return false
}
type tcpListener struct {
laddr *net.TCPAddr
conn *Client
in <-chan forward
}
// Accept waits for and returns the next connection to the listener.
func (l *tcpListener) Accept() (net.Conn, error) {
s, ok := <-l.in
if !ok {
return nil, io.EOF
}
ch, incoming, err := s.newCh.Accept()
if err != nil {
return nil, err
}
go DiscardRequests(incoming)
return &tcpChanConn{
Channel: ch,
laddr: l.laddr,
raddr: s.raddr,
}, nil
}
// Close closes the listener.
func (l *tcpListener) Close() error {
m := channelForwardMsg{
l.laddr.IP.String(),
uint32(l.laddr.Port),
}
// this also closes the listener.
l.conn.forwards.remove(*l.laddr)
ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
if err == nil && !ok {
err = errors.New("ssh: cancel-tcpip-forward failed")
}
return err
}
// Addr returns the listener's network address.
func (l *tcpListener) Addr() net.Addr {
return l.laddr
}
// Dial initiates a connection to the addr from the remote host.
// The resulting connection has a zero LocalAddr() and RemoteAddr().
func (c *Client) Dial(n, addr string) (net.Conn, error) {
// Parse the address into host and numeric port.
host, portString, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
port, err := strconv.ParseUint(portString, 10, 16)
if err != nil {
return nil, err
}
// Use a zero address for local and remote address.
zeroAddr := &net.TCPAddr{
IP: net.IPv4zero,
Port: 0,
}
ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
if err != nil {
return nil, err
}
return &tcpChanConn{
Channel: ch,
laddr: zeroAddr,
raddr: zeroAddr,
}, nil
}
// DialTCP connects to the remote address raddr on the network net,
// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
// as the local address for the connection.
func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
if laddr == nil {
laddr = &net.TCPAddr{
IP: net.IPv4zero,
Port: 0,
}
}
ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
if err != nil {
return nil, err
}
return &tcpChanConn{
Channel: ch,
laddr: laddr,
raddr: raddr,
}, nil
}
// RFC 4254 7.2
type channelOpenDirectMsg struct {
raddr string
rport uint32
laddr string
lport uint32
}
func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
msg := channelOpenDirectMsg{
raddr: raddr,
rport: uint32(rport),
laddr: laddr,
lport: uint32(lport),
}
ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
if err != nil {
return nil, err
}
go DiscardRequests(in)
return ch, err
}
type tcpChan struct {
Channel // the backing channel
}
// tcpChanConn fulfills the net.Conn interface without
// the tcpChan having to hold laddr or raddr directly.
type tcpChanConn struct {
Channel
laddr, raddr net.Addr
}
// LocalAddr returns the local network address.
func (t *tcpChanConn) LocalAddr() net.Addr {
return t.laddr
}
// RemoteAddr returns the remote network address.
func (t *tcpChanConn) RemoteAddr() net.Addr {
return t.raddr
}
// SetDeadline sets the read and write deadlines associated
// with the connection.
func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
if err := t.SetReadDeadline(deadline); err != nil {
return err
}
return t.SetWriteDeadline(deadline)
}
// SetReadDeadline sets the read deadline.
// A zero value for t means Read will not time out.
// After the deadline, the error from Read will implement net.Error
// with Timeout() == true.
func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
return errors.New("ssh: tcpChan: deadline not supported")
}
// SetWriteDeadline exists to satisfy the net.Conn interface
// but is not implemented by this type. It always returns an error.
func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
return errors.New("ssh: tcpChan: deadline not supported")
}

333
vendor/golang.org/x/crypto/ssh/transport.go generated vendored Normal file
View file

@ -0,0 +1,333 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bufio"
"errors"
"io"
)
const (
gcmCipherID = "aes128-gcm@openssh.com"
aes128cbcID = "aes128-cbc"
tripledescbcID = "3des-cbc"
)
// packetConn represents a transport that implements packet based
// operations.
type packetConn interface {
// Encrypt and send a packet of data to the remote peer.
writePacket(packet []byte) error
// Read a packet from the connection
readPacket() ([]byte, error)
// Close closes the write-side of the connection.
Close() error
}
// transport is the keyingTransport that implements the SSH packet
// protocol.
type transport struct {
reader connectionState
writer connectionState
bufReader *bufio.Reader
bufWriter *bufio.Writer
rand io.Reader
io.Closer
}
// packetCipher represents a combination of SSH encryption/MAC
// protocol. A single instance should be used for one direction only.
type packetCipher interface {
// writePacket encrypts the packet and writes it to w. The
// contents of the packet are generally scrambled.
writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
// readPacket reads and decrypts a packet of data. The
// returned packet may be overwritten by future calls of
// readPacket.
readPacket(seqnum uint32, r io.Reader) ([]byte, error)
}
// connectionState represents one side (read or write) of the
// connection. This is necessary because each direction has its own
// keys, and can even have its own algorithms
type connectionState struct {
packetCipher
seqNum uint32
dir direction
pendingKeyChange chan packetCipher
}
// prepareKeyChange sets up key material for a keychange. The key changes in
// both directions are triggered by reading and writing a msgNewKey packet
// respectively.
func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
return err
} else {
t.reader.pendingKeyChange <- ciph
}
if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
return err
} else {
t.writer.pendingKeyChange <- ciph
}
return nil
}
// Read and decrypt next packet.
func (t *transport) readPacket() ([]byte, error) {
return t.reader.readPacket(t.bufReader)
}
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
packet, err := s.packetCipher.readPacket(s.seqNum, r)
s.seqNum++
if err == nil && len(packet) == 0 {
err = errors.New("ssh: zero length packet")
}
if len(packet) > 0 {
switch packet[0] {
case msgNewKeys:
select {
case cipher := <-s.pendingKeyChange:
s.packetCipher = cipher
default:
return nil, errors.New("ssh: got bogus newkeys message.")
}
case msgDisconnect:
// Transform a disconnect message into an
// error. Since this is lowest level at which
// we interpret message types, doing it here
// ensures that we don't have to handle it
// elsewhere.
var msg disconnectMsg
if err := Unmarshal(packet, &msg); err != nil {
return nil, err
}
return nil, &msg
}
}
// The packet may point to an internal buffer, so copy the
// packet out here.
fresh := make([]byte, len(packet))
copy(fresh, packet)
return fresh, err
}
func (t *transport) writePacket(packet []byte) error {
return t.writer.writePacket(t.bufWriter, t.rand, packet)
}
func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
if err != nil {
return err
}
if err = w.Flush(); err != nil {
return err
}
s.seqNum++
if changeKeys {
select {
case cipher := <-s.pendingKeyChange:
s.packetCipher = cipher
default:
panic("ssh: no key material for msgNewKeys")
}
}
return err
}
func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
t := &transport{
bufReader: bufio.NewReader(rwc),
bufWriter: bufio.NewWriter(rwc),
rand: rand,
reader: connectionState{
packetCipher: &streamPacketCipher{cipher: noneCipher{}},
pendingKeyChange: make(chan packetCipher, 1),
},
writer: connectionState{
packetCipher: &streamPacketCipher{cipher: noneCipher{}},
pendingKeyChange: make(chan packetCipher, 1),
},
Closer: rwc,
}
if isClient {
t.reader.dir = serverKeys
t.writer.dir = clientKeys
} else {
t.reader.dir = clientKeys
t.writer.dir = serverKeys
}
return t
}
type direction struct {
ivTag []byte
keyTag []byte
macKeyTag []byte
}
var (
serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
)
// generateKeys generates key material for IV, MAC and encryption.
func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
cipherMode := cipherModes[algs.Cipher]
macMode := macModes[algs.MAC]
iv = make([]byte, cipherMode.ivSize)
key = make([]byte, cipherMode.keySize)
macKey = make([]byte, macMode.keySize)
generateKeyMaterial(iv, d.ivTag, kex)
generateKeyMaterial(key, d.keyTag, kex)
generateKeyMaterial(macKey, d.macKeyTag, kex)
return
}
// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
// described in RFC 4253, section 6.4. direction should either be serverKeys
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
iv, key, macKey := generateKeys(d, algs, kex)
if algs.Cipher == gcmCipherID {
return newGCMCipher(iv, key, macKey)
}
if algs.Cipher == aes128cbcID {
return newAESCBCCipher(iv, key, macKey, algs)
}
if algs.Cipher == tripledescbcID {
return newTripleDESCBCCipher(iv, key, macKey, algs)
}
c := &streamPacketCipher{
mac: macModes[algs.MAC].new(macKey),
}
c.macResult = make([]byte, c.mac.Size())
var err error
c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
if err != nil {
return nil, err
}
return c, nil
}
// generateKeyMaterial fills out with key material generated from tag, K, H
// and sessionId, as specified in RFC 4253, section 7.2.
func generateKeyMaterial(out, tag []byte, r *kexResult) {
var digestsSoFar []byte
h := r.Hash.New()
for len(out) > 0 {
h.Reset()
h.Write(r.K)
h.Write(r.H)
if len(digestsSoFar) == 0 {
h.Write(tag)
h.Write(r.SessionID)
} else {
h.Write(digestsSoFar)
}
digest := h.Sum(nil)
n := copy(out, digest)
out = out[n:]
if len(out) > 0 {
digestsSoFar = append(digestsSoFar, digest...)
}
}
}
const packageVersion = "SSH-2.0-Go"
// Sends and receives a version line. The versionLine string should
// be US ASCII, start with "SSH-2.0-", and should not include a
// newline. exchangeVersions returns the other side's version line.
func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
// Contrary to the RFC, we do not ignore lines that don't
// start with "SSH-2.0-" to make the library usable with
// nonconforming servers.
for _, c := range versionLine {
// The spec disallows non US-ASCII chars, and
// specifically forbids null chars.
if c < 32 {
return nil, errors.New("ssh: junk character in version line")
}
}
if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
return
}
them, err = readVersion(rw)
return them, err
}
// maxVersionStringBytes is the maximum number of bytes that we'll
// accept as a version string. RFC 4253 section 4.2 limits this at 255
// chars
const maxVersionStringBytes = 255
// Read version string as specified by RFC 4253, section 4.2.
func readVersion(r io.Reader) ([]byte, error) {
versionString := make([]byte, 0, 64)
var ok bool
var buf [1]byte
for len(versionString) < maxVersionStringBytes {
_, err := io.ReadFull(r, buf[:])
if err != nil {
return nil, err
}
// The RFC says that the version should be terminated with \r\n
// but several SSH servers actually only send a \n.
if buf[0] == '\n' {
ok = true
break
}
// non ASCII chars are disallowed, but we are lenient,
// since Go doesn't use null-terminated strings.
// The RFC allows a comment after a space, however,
// all of it (version and comments) goes into the
// session hash.
versionString = append(versionString, buf[0])
}
if !ok {
return nil, errors.New("ssh: overflow reading version string")
}
// There might be a '\r' on the end which we should remove.
if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
versionString = versionString[:len(versionString)-1]
}
return versionString, nil
}

27
vendor/golang.org/x/net/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/net/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

78
vendor/golang.org/x/net/html/atom/atom.go generated vendored Normal file
View file

@ -0,0 +1,78 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package atom provides integer codes (also known as atoms) for a fixed set of
// frequently occurring HTML strings: tag names and attribute keys such as "p"
// and "id".
//
// Sharing an atom's name between all elements with the same tag can result in
// fewer string allocations when tokenizing and parsing HTML. Integer
// comparisons are also generally faster than string comparisons.
//
// The value of an atom's particular code is not guaranteed to stay the same
// between versions of this package. Neither is any ordering guaranteed:
// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
// be dense. The only guarantees are that e.g. looking up "div" will yield
// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
package atom // import "golang.org/x/net/html/atom"
// Atom is an integer code for a string. The zero value maps to "".
type Atom uint32
// String returns the atom's name.
func (a Atom) String() string {
start := uint32(a >> 8)
n := uint32(a & 0xff)
if start+n > uint32(len(atomText)) {
return ""
}
return atomText[start : start+n]
}
func (a Atom) string() string {
return atomText[a>>8 : a>>8+a&0xff]
}
// fnv computes the FNV hash with an arbitrary starting value h.
func fnv(h uint32, s []byte) uint32 {
for i := range s {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
func match(s string, t []byte) bool {
for i, c := range t {
if s[i] != c {
return false
}
}
return true
}
// Lookup returns the atom whose name is s. It returns zero if there is no
// such atom. The lookup is case sensitive.
func Lookup(s []byte) Atom {
if len(s) == 0 || len(s) > maxAtomLen {
return 0
}
h := fnv(hash0, s)
if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
return a
}
if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
return a
}
return 0
}
// String returns a string whose contents are equal to s. In that sense, it is
// equivalent to string(s) but may be more efficient.
func String(s []byte) string {
if a := Lookup(s); a != 0 {
return a.String()
}
return string(s)
}

648
vendor/golang.org/x/net/html/atom/gen.go generated vendored Normal file
View file

@ -0,0 +1,648 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This program generates table.go and table_test.go.
// Invoke as
//
// go run gen.go |gofmt >table.go
// go run gen.go -test |gofmt >table_test.go
import (
"flag"
"fmt"
"math/rand"
"os"
"sort"
"strings"
)
// identifier converts s to a Go exported identifier.
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
func identifier(s string) string {
b := make([]byte, 0, len(s))
cap := true
for _, c := range s {
if c == '-' {
cap = true
continue
}
if cap && 'a' <= c && c <= 'z' {
c -= 'a' - 'A'
}
cap = false
b = append(b, byte(c))
}
return string(b)
}
var test = flag.Bool("test", false, "generate table_test.go")
func main() {
flag.Parse()
var all []string
all = append(all, elements...)
all = append(all, attributes...)
all = append(all, eventHandlers...)
all = append(all, extra...)
sort.Strings(all)
if *test {
fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
fmt.Printf("package atom\n\n")
fmt.Printf("var testAtomList = []string{\n")
for _, s := range all {
fmt.Printf("\t%q,\n", s)
}
fmt.Printf("}\n")
return
}
// uniq - lists have dups
// compute max len too
maxLen := 0
w := 0
for _, s := range all {
if w == 0 || all[w-1] != s {
if maxLen < len(s) {
maxLen = len(s)
}
all[w] = s
w++
}
}
all = all[:w]
// Find hash that minimizes table size.
var best *table
for i := 0; i < 1000000; i++ {
if best != nil && 1<<(best.k-1) < len(all) {
break
}
h := rand.Uint32()
for k := uint(0); k <= 16; k++ {
if best != nil && k >= best.k {
break
}
var t table
if t.init(h, k, all) {
best = &t
break
}
}
}
if best == nil {
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
os.Exit(1)
}
// Lay out strings, using overlaps when possible.
layout := append([]string{}, all...)
// Remove strings that are substrings of other strings
for changed := true; changed; {
changed = false
for i, s := range layout {
if s == "" {
continue
}
for j, t := range layout {
if i != j && t != "" && strings.Contains(s, t) {
changed = true
layout[j] = ""
}
}
}
}
// Join strings where one suffix matches another prefix.
for {
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
// maximizing overlap length k.
besti := -1
bestj := -1
bestk := 0
for i, s := range layout {
if s == "" {
continue
}
for j, t := range layout {
if i == j {
continue
}
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
if s[len(s)-k:] == t[:k] {
besti = i
bestj = j
bestk = k
}
}
}
}
if bestk > 0 {
layout[besti] += layout[bestj][bestk:]
layout[bestj] = ""
continue
}
break
}
text := strings.Join(layout, "")
atom := map[string]uint32{}
for _, s := range all {
off := strings.Index(text, s)
if off < 0 {
panic("lost string " + s)
}
atom[s] = uint32(off<<8 | len(s))
}
// Generate the Go code.
fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
fmt.Printf("package atom\n\nconst (\n")
for _, s := range all {
fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
}
fmt.Printf(")\n\n")
fmt.Printf("const hash0 = %#x\n\n", best.h0)
fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
for i, s := range best.tab {
if s == "" {
continue
}
fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
}
fmt.Printf("}\n")
datasize := (1 << best.k) * 4
fmt.Printf("const atomText =\n")
textsize := len(text)
for len(text) > 60 {
fmt.Printf("\t%q +\n", text[:60])
text = text[60:]
}
fmt.Printf("\t%q\n\n", text)
fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
}
type byLen []string
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byLen) Len() int { return len(x) }
// fnv computes the FNV hash with an arbitrary starting value h.
func fnv(h uint32, s string) uint32 {
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
// A table represents an attempt at constructing the lookup table.
// The lookup table uses cuckoo hashing, meaning that each string
// can be found in one of two positions.
type table struct {
h0 uint32
k uint
mask uint32
tab []string
}
// hash returns the two hashes for s.
func (t *table) hash(s string) (h1, h2 uint32) {
h := fnv(t.h0, s)
h1 = h & t.mask
h2 = (h >> 16) & t.mask
return
}
// init initializes the table with the given parameters.
// h0 is the initial hash value,
// k is the number of bits of hash value to use, and
// x is the list of strings to store in the table.
// init returns false if the table cannot be constructed.
func (t *table) init(h0 uint32, k uint, x []string) bool {
t.h0 = h0
t.k = k
t.tab = make([]string, 1<<k)
t.mask = 1<<k - 1
for _, s := range x {
if !t.insert(s) {
return false
}
}
return true
}
// insert inserts s in the table.
func (t *table) insert(s string) bool {
h1, h2 := t.hash(s)
if t.tab[h1] == "" {
t.tab[h1] = s
return true
}
if t.tab[h2] == "" {
t.tab[h2] = s
return true
}
if t.push(h1, 0) {
t.tab[h1] = s
return true
}
if t.push(h2, 0) {
t.tab[h2] = s
return true
}
return false
}
// push attempts to push aside the entry in slot i.
func (t *table) push(i uint32, depth int) bool {
if depth > len(t.tab) {
return false
}
s := t.tab[i]
h1, h2 := t.hash(s)
j := h1 + h2 - i
if t.tab[j] != "" && !t.push(j, depth+1) {
return false
}
t.tab[j] = s
return true
}
// The lists of element names and attribute keys were taken from
// https://html.spec.whatwg.org/multipage/indices.html#index
// as of the "HTML Living Standard - Last Updated 21 February 2015" version.
var elements = []string{
"a",
"abbr",
"address",
"area",
"article",
"aside",
"audio",
"b",
"base",
"bdi",
"bdo",
"blockquote",
"body",
"br",
"button",
"canvas",
"caption",
"cite",
"code",
"col",
"colgroup",
"command",
"data",
"datalist",
"dd",
"del",
"details",
"dfn",
"dialog",
"div",
"dl",
"dt",
"em",
"embed",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"head",
"header",
"hgroup",
"hr",
"html",
"i",
"iframe",
"img",
"input",
"ins",
"kbd",
"keygen",
"label",
"legend",
"li",
"link",
"map",
"mark",
"menu",
"menuitem",
"meta",
"meter",
"nav",
"noscript",
"object",
"ol",
"optgroup",
"option",
"output",
"p",
"param",
"pre",
"progress",
"q",
"rp",
"rt",
"ruby",
"s",
"samp",
"script",
"section",
"select",
"small",
"source",
"span",
"strong",
"style",
"sub",
"summary",
"sup",
"table",
"tbody",
"td",
"template",
"textarea",
"tfoot",
"th",
"thead",
"time",
"title",
"tr",
"track",
"u",
"ul",
"var",
"video",
"wbr",
}
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
var attributes = []string{
"abbr",
"accept",
"accept-charset",
"accesskey",
"action",
"alt",
"async",
"autocomplete",
"autofocus",
"autoplay",
"challenge",
"charset",
"checked",
"cite",
"class",
"cols",
"colspan",
"command",
"content",
"contenteditable",
"contextmenu",
"controls",
"coords",
"crossorigin",
"data",
"datetime",
"default",
"defer",
"dir",
"dirname",
"disabled",
"download",
"draggable",
"dropzone",
"enctype",
"for",
"form",
"formaction",
"formenctype",
"formmethod",
"formnovalidate",
"formtarget",
"headers",
"height",
"hidden",
"high",
"href",
"hreflang",
"http-equiv",
"icon",
"id",
"inputmode",
"ismap",
"itemid",
"itemprop",
"itemref",
"itemscope",
"itemtype",
"keytype",
"kind",
"label",
"lang",
"list",
"loop",
"low",
"manifest",
"max",
"maxlength",
"media",
"mediagroup",
"method",
"min",
"minlength",
"multiple",
"muted",
"name",
"novalidate",
"open",
"optimum",
"pattern",
"ping",
"placeholder",
"poster",
"preload",
"radiogroup",
"readonly",
"rel",
"required",
"reversed",
"rows",
"rowspan",
"sandbox",
"spellcheck",
"scope",
"scoped",
"seamless",
"selected",
"shape",
"size",
"sizes",
"sortable",
"sorted",
"span",
"src",
"srcdoc",
"srclang",
"start",
"step",
"style",
"tabindex",
"target",
"title",
"translate",
"type",
"typemustmatch",
"usemap",
"value",
"width",
"wrap",
}
var eventHandlers = []string{
"onabort",
"onautocomplete",
"onautocompleteerror",
"onafterprint",
"onbeforeprint",
"onbeforeunload",
"onblur",
"oncancel",
"oncanplay",
"oncanplaythrough",
"onchange",
"onclick",
"onclose",
"oncontextmenu",
"oncuechange",
"ondblclick",
"ondrag",
"ondragend",
"ondragenter",
"ondragleave",
"ondragover",
"ondragstart",
"ondrop",
"ondurationchange",
"onemptied",
"onended",
"onerror",
"onfocus",
"onhashchange",
"oninput",
"oninvalid",
"onkeydown",
"onkeypress",
"onkeyup",
"onlanguagechange",
"onload",
"onloadeddata",
"onloadedmetadata",
"onloadstart",
"onmessage",
"onmousedown",
"onmousemove",
"onmouseout",
"onmouseover",
"onmouseup",
"onmousewheel",
"onoffline",
"ononline",
"onpagehide",
"onpageshow",
"onpause",
"onplay",
"onplaying",
"onpopstate",
"onprogress",
"onratechange",
"onreset",
"onresize",
"onscroll",
"onseeked",
"onseeking",
"onselect",
"onshow",
"onsort",
"onstalled",
"onstorage",
"onsubmit",
"onsuspend",
"ontimeupdate",
"ontoggle",
"onunload",
"onvolumechange",
"onwaiting",
}
// extra are ad-hoc values not covered by any of the lists above.
var extra = []string{
"align",
"annotation",
"annotation-xml",
"applet",
"basefont",
"bgsound",
"big",
"blink",
"center",
"color",
"desc",
"face",
"font",
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
"foreignobject",
"frame",
"frameset",
"image",
"isindex",
"listing",
"malignmark",
"marquee",
"math",
"mglyph",
"mi",
"mn",
"mo",
"ms",
"mtext",
"nobr",
"noembed",
"noframes",
"plaintext",
"prompt",
"public",
"spacer",
"strike",
"svg",
"system",
"tt",
"xmp",
}

713
vendor/golang.org/x/net/html/atom/table.go generated vendored Normal file
View file

@ -0,0 +1,713 @@
// generated by go run gen.go; DO NOT EDIT
package atom
const (
A Atom = 0x1
Abbr Atom = 0x4
Accept Atom = 0x2106
AcceptCharset Atom = 0x210e
Accesskey Atom = 0x3309
Action Atom = 0x1f606
Address Atom = 0x4f307
Align Atom = 0x1105
Alt Atom = 0x4503
Annotation Atom = 0x1670a
AnnotationXml Atom = 0x1670e
Applet Atom = 0x2b306
Area Atom = 0x2fa04
Article Atom = 0x38807
Aside Atom = 0x8305
Async Atom = 0x7b05
Audio Atom = 0xa605
Autocomplete Atom = 0x1fc0c
Autofocus Atom = 0xb309
Autoplay Atom = 0xce08
B Atom = 0x101
Base Atom = 0xd604
Basefont Atom = 0xd608
Bdi Atom = 0x1a03
Bdo Atom = 0xe703
Bgsound Atom = 0x11807
Big Atom = 0x12403
Blink Atom = 0x12705
Blockquote Atom = 0x12c0a
Body Atom = 0x2f04
Br Atom = 0x202
Button Atom = 0x13606
Canvas Atom = 0x7f06
Caption Atom = 0x1bb07
Center Atom = 0x5b506
Challenge Atom = 0x21f09
Charset Atom = 0x2807
Checked Atom = 0x32807
Cite Atom = 0x3c804
Class Atom = 0x4de05
Code Atom = 0x14904
Col Atom = 0x15003
Colgroup Atom = 0x15008
Color Atom = 0x15d05
Cols Atom = 0x16204
Colspan Atom = 0x16207
Command Atom = 0x17507
Content Atom = 0x42307
Contenteditable Atom = 0x4230f
Contextmenu Atom = 0x3310b
Controls Atom = 0x18808
Coords Atom = 0x19406
Crossorigin Atom = 0x19f0b
Data Atom = 0x44a04
Datalist Atom = 0x44a08
Datetime Atom = 0x23c08
Dd Atom = 0x26702
Default Atom = 0x8607
Defer Atom = 0x14b05
Del Atom = 0x3ef03
Desc Atom = 0x4db04
Details Atom = 0x4807
Dfn Atom = 0x6103
Dialog Atom = 0x1b06
Dir Atom = 0x6903
Dirname Atom = 0x6907
Disabled Atom = 0x10c08
Div Atom = 0x11303
Dl Atom = 0x11e02
Download Atom = 0x40008
Draggable Atom = 0x17b09
Dropzone Atom = 0x39108
Dt Atom = 0x50902
Em Atom = 0x6502
Embed Atom = 0x6505
Enctype Atom = 0x21107
Face Atom = 0x5b304
Fieldset Atom = 0x1b008
Figcaption Atom = 0x1b80a
Figure Atom = 0x1cc06
Font Atom = 0xda04
Footer Atom = 0x8d06
For Atom = 0x1d803
ForeignObject Atom = 0x1d80d
Foreignobject Atom = 0x1e50d
Form Atom = 0x1f204
Formaction Atom = 0x1f20a
Formenctype Atom = 0x20d0b
Formmethod Atom = 0x2280a
Formnovalidate Atom = 0x2320e
Formtarget Atom = 0x2470a
Frame Atom = 0x9a05
Frameset Atom = 0x9a08
H1 Atom = 0x26e02
H2 Atom = 0x29402
H3 Atom = 0x2a702
H4 Atom = 0x2e902
H5 Atom = 0x2f302
H6 Atom = 0x50b02
Head Atom = 0x2d504
Header Atom = 0x2d506
Headers Atom = 0x2d507
Height Atom = 0x25106
Hgroup Atom = 0x25906
Hidden Atom = 0x26506
High Atom = 0x26b04
Hr Atom = 0x27002
Href Atom = 0x27004
Hreflang Atom = 0x27008
Html Atom = 0x25504
HttpEquiv Atom = 0x2780a
I Atom = 0x601
Icon Atom = 0x42204
Id Atom = 0x8502
Iframe Atom = 0x29606
Image Atom = 0x29c05
Img Atom = 0x2a103
Input Atom = 0x3e805
Inputmode Atom = 0x3e809
Ins Atom = 0x1a803
Isindex Atom = 0x2a907
Ismap Atom = 0x2b005
Itemid Atom = 0x33c06
Itemprop Atom = 0x3c908
Itemref Atom = 0x5ad07
Itemscope Atom = 0x2b909
Itemtype Atom = 0x2c308
Kbd Atom = 0x1903
Keygen Atom = 0x3906
Keytype Atom = 0x53707
Kind Atom = 0x10904
Label Atom = 0xf005
Lang Atom = 0x27404
Legend Atom = 0x18206
Li Atom = 0x1202
Link Atom = 0x12804
List Atom = 0x44e04
Listing Atom = 0x44e07
Loop Atom = 0xf404
Low Atom = 0x11f03
Malignmark Atom = 0x100a
Manifest Atom = 0x5f108
Map Atom = 0x2b203
Mark Atom = 0x1604
Marquee Atom = 0x2cb07
Math Atom = 0x2d204
Max Atom = 0x2e103
Maxlength Atom = 0x2e109
Media Atom = 0x6e05
Mediagroup Atom = 0x6e0a
Menu Atom = 0x33804
Menuitem Atom = 0x33808
Meta Atom = 0x45d04
Meter Atom = 0x24205
Method Atom = 0x22c06
Mglyph Atom = 0x2a206
Mi Atom = 0x2eb02
Min Atom = 0x2eb03
Minlength Atom = 0x2eb09
Mn Atom = 0x23502
Mo Atom = 0x3ed02
Ms Atom = 0x2bc02
Mtext Atom = 0x2f505
Multiple Atom = 0x30308
Muted Atom = 0x30b05
Name Atom = 0x6c04
Nav Atom = 0x3e03
Nobr Atom = 0x5704
Noembed Atom = 0x6307
Noframes Atom = 0x9808
Noscript Atom = 0x3d208
Novalidate Atom = 0x2360a
Object Atom = 0x1ec06
Ol Atom = 0xc902
Onabort Atom = 0x13a07
Onafterprint Atom = 0x1c00c
Onautocomplete Atom = 0x1fa0e
Onautocompleteerror Atom = 0x1fa13
Onbeforeprint Atom = 0x6040d
Onbeforeunload Atom = 0x4e70e
Onblur Atom = 0xaa06
Oncancel Atom = 0xe908
Oncanplay Atom = 0x28509
Oncanplaythrough Atom = 0x28510
Onchange Atom = 0x3a708
Onclick Atom = 0x31007
Onclose Atom = 0x31707
Oncontextmenu Atom = 0x32f0d
Oncuechange Atom = 0x3420b
Ondblclick Atom = 0x34d0a
Ondrag Atom = 0x35706
Ondragend Atom = 0x35709
Ondragenter Atom = 0x3600b
Ondragleave Atom = 0x36b0b
Ondragover Atom = 0x3760a
Ondragstart Atom = 0x3800b
Ondrop Atom = 0x38f06
Ondurationchange Atom = 0x39f10
Onemptied Atom = 0x39609
Onended Atom = 0x3af07
Onerror Atom = 0x3b607
Onfocus Atom = 0x3bd07
Onhashchange Atom = 0x3da0c
Oninput Atom = 0x3e607
Oninvalid Atom = 0x3f209
Onkeydown Atom = 0x3fb09
Onkeypress Atom = 0x4080a
Onkeyup Atom = 0x41807
Onlanguagechange Atom = 0x43210
Onload Atom = 0x44206
Onloadeddata Atom = 0x4420c
Onloadedmetadata Atom = 0x45510
Onloadstart Atom = 0x46b0b
Onmessage Atom = 0x47609
Onmousedown Atom = 0x47f0b
Onmousemove Atom = 0x48a0b
Onmouseout Atom = 0x4950a
Onmouseover Atom = 0x4a20b
Onmouseup Atom = 0x4ad09
Onmousewheel Atom = 0x4b60c
Onoffline Atom = 0x4c209
Ononline Atom = 0x4cb08
Onpagehide Atom = 0x4d30a
Onpageshow Atom = 0x4fe0a
Onpause Atom = 0x50d07
Onplay Atom = 0x51706
Onplaying Atom = 0x51709
Onpopstate Atom = 0x5200a
Onprogress Atom = 0x52a0a
Onratechange Atom = 0x53e0c
Onreset Atom = 0x54a07
Onresize Atom = 0x55108
Onscroll Atom = 0x55f08
Onseeked Atom = 0x56708
Onseeking Atom = 0x56f09
Onselect Atom = 0x57808
Onshow Atom = 0x58206
Onsort Atom = 0x58b06
Onstalled Atom = 0x59509
Onstorage Atom = 0x59e09
Onsubmit Atom = 0x5a708
Onsuspend Atom = 0x5bb09
Ontimeupdate Atom = 0xdb0c
Ontoggle Atom = 0x5c408
Onunload Atom = 0x5cc08
Onvolumechange Atom = 0x5d40e
Onwaiting Atom = 0x5e209
Open Atom = 0x3cf04
Optgroup Atom = 0xf608
Optimum Atom = 0x5eb07
Option Atom = 0x60006
Output Atom = 0x49c06
P Atom = 0xc01
Param Atom = 0xc05
Pattern Atom = 0x5107
Ping Atom = 0x7704
Placeholder Atom = 0xc30b
Plaintext Atom = 0xfd09
Poster Atom = 0x15706
Pre Atom = 0x25e03
Preload Atom = 0x25e07
Progress Atom = 0x52c08
Prompt Atom = 0x5fa06
Public Atom = 0x41e06
Q Atom = 0x13101
Radiogroup Atom = 0x30a
Readonly Atom = 0x2fb08
Rel Atom = 0x25f03
Required Atom = 0x1d008
Reversed Atom = 0x5a08
Rows Atom = 0x9204
Rowspan Atom = 0x9207
Rp Atom = 0x1c602
Rt Atom = 0x13f02
Ruby Atom = 0xaf04
S Atom = 0x2c01
Samp Atom = 0x4e04
Sandbox Atom = 0xbb07
Scope Atom = 0x2bd05
Scoped Atom = 0x2bd06
Script Atom = 0x3d406
Seamless Atom = 0x31c08
Section Atom = 0x4e207
Select Atom = 0x57a06
Selected Atom = 0x57a08
Shape Atom = 0x4f905
Size Atom = 0x55504
Sizes Atom = 0x55505
Small Atom = 0x18f05
Sortable Atom = 0x58d08
Sorted Atom = 0x19906
Source Atom = 0x1aa06
Spacer Atom = 0x2db06
Span Atom = 0x9504
Spellcheck Atom = 0x3230a
Src Atom = 0x3c303
Srcdoc Atom = 0x3c306
Srclang Atom = 0x41107
Start Atom = 0x38605
Step Atom = 0x5f704
Strike Atom = 0x53306
Strong Atom = 0x55906
Style Atom = 0x61105
Sub Atom = 0x5a903
Summary Atom = 0x61607
Sup Atom = 0x61d03
Svg Atom = 0x62003
System Atom = 0x62306
Tabindex Atom = 0x46308
Table Atom = 0x42d05
Target Atom = 0x24b06
Tbody Atom = 0x2e05
Td Atom = 0x4702
Template Atom = 0x62608
Textarea Atom = 0x2f608
Tfoot Atom = 0x8c05
Th Atom = 0x22e02
Thead Atom = 0x2d405
Time Atom = 0xdd04
Title Atom = 0xa105
Tr Atom = 0x10502
Track Atom = 0x10505
Translate Atom = 0x14009
Tt Atom = 0x5302
Type Atom = 0x21404
Typemustmatch Atom = 0x2140d
U Atom = 0xb01
Ul Atom = 0x8a02
Usemap Atom = 0x51106
Value Atom = 0x4005
Var Atom = 0x11503
Video Atom = 0x28105
Wbr Atom = 0x12103
Width Atom = 0x50705
Wrap Atom = 0x58704
Xmp Atom = 0xc103
)
const hash0 = 0xc17da63e
const maxAtomLen = 19
var table = [1 << 9]Atom{
0x1: 0x48a0b, // onmousemove
0x2: 0x5e209, // onwaiting
0x3: 0x1fa13, // onautocompleteerror
0x4: 0x5fa06, // prompt
0x7: 0x5eb07, // optimum
0x8: 0x1604, // mark
0xa: 0x5ad07, // itemref
0xb: 0x4fe0a, // onpageshow
0xc: 0x57a06, // select
0xd: 0x17b09, // draggable
0xe: 0x3e03, // nav
0xf: 0x17507, // command
0x11: 0xb01, // u
0x14: 0x2d507, // headers
0x15: 0x44a08, // datalist
0x17: 0x4e04, // samp
0x1a: 0x3fb09, // onkeydown
0x1b: 0x55f08, // onscroll
0x1c: 0x15003, // col
0x20: 0x3c908, // itemprop
0x21: 0x2780a, // http-equiv
0x22: 0x61d03, // sup
0x24: 0x1d008, // required
0x2b: 0x25e07, // preload
0x2c: 0x6040d, // onbeforeprint
0x2d: 0x3600b, // ondragenter
0x2e: 0x50902, // dt
0x2f: 0x5a708, // onsubmit
0x30: 0x27002, // hr
0x31: 0x32f0d, // oncontextmenu
0x33: 0x29c05, // image
0x34: 0x50d07, // onpause
0x35: 0x25906, // hgroup
0x36: 0x7704, // ping
0x37: 0x57808, // onselect
0x3a: 0x11303, // div
0x3b: 0x1fa0e, // onautocomplete
0x40: 0x2eb02, // mi
0x41: 0x31c08, // seamless
0x42: 0x2807, // charset
0x43: 0x8502, // id
0x44: 0x5200a, // onpopstate
0x45: 0x3ef03, // del
0x46: 0x2cb07, // marquee
0x47: 0x3309, // accesskey
0x49: 0x8d06, // footer
0x4a: 0x44e04, // list
0x4b: 0x2b005, // ismap
0x51: 0x33804, // menu
0x52: 0x2f04, // body
0x55: 0x9a08, // frameset
0x56: 0x54a07, // onreset
0x57: 0x12705, // blink
0x58: 0xa105, // title
0x59: 0x38807, // article
0x5b: 0x22e02, // th
0x5d: 0x13101, // q
0x5e: 0x3cf04, // open
0x5f: 0x2fa04, // area
0x61: 0x44206, // onload
0x62: 0xda04, // font
0x63: 0xd604, // base
0x64: 0x16207, // colspan
0x65: 0x53707, // keytype
0x66: 0x11e02, // dl
0x68: 0x1b008, // fieldset
0x6a: 0x2eb03, // min
0x6b: 0x11503, // var
0x6f: 0x2d506, // header
0x70: 0x13f02, // rt
0x71: 0x15008, // colgroup
0x72: 0x23502, // mn
0x74: 0x13a07, // onabort
0x75: 0x3906, // keygen
0x76: 0x4c209, // onoffline
0x77: 0x21f09, // challenge
0x78: 0x2b203, // map
0x7a: 0x2e902, // h4
0x7b: 0x3b607, // onerror
0x7c: 0x2e109, // maxlength
0x7d: 0x2f505, // mtext
0x7e: 0xbb07, // sandbox
0x7f: 0x58b06, // onsort
0x80: 0x100a, // malignmark
0x81: 0x45d04, // meta
0x82: 0x7b05, // async
0x83: 0x2a702, // h3
0x84: 0x26702, // dd
0x85: 0x27004, // href
0x86: 0x6e0a, // mediagroup
0x87: 0x19406, // coords
0x88: 0x41107, // srclang
0x89: 0x34d0a, // ondblclick
0x8a: 0x4005, // value
0x8c: 0xe908, // oncancel
0x8e: 0x3230a, // spellcheck
0x8f: 0x9a05, // frame
0x91: 0x12403, // big
0x94: 0x1f606, // action
0x95: 0x6903, // dir
0x97: 0x2fb08, // readonly
0x99: 0x42d05, // table
0x9a: 0x61607, // summary
0x9b: 0x12103, // wbr
0x9c: 0x30a, // radiogroup
0x9d: 0x6c04, // name
0x9f: 0x62306, // system
0xa1: 0x15d05, // color
0xa2: 0x7f06, // canvas
0xa3: 0x25504, // html
0xa5: 0x56f09, // onseeking
0xac: 0x4f905, // shape
0xad: 0x25f03, // rel
0xae: 0x28510, // oncanplaythrough
0xaf: 0x3760a, // ondragover
0xb0: 0x62608, // template
0xb1: 0x1d80d, // foreignObject
0xb3: 0x9204, // rows
0xb6: 0x44e07, // listing
0xb7: 0x49c06, // output
0xb9: 0x3310b, // contextmenu
0xbb: 0x11f03, // low
0xbc: 0x1c602, // rp
0xbd: 0x5bb09, // onsuspend
0xbe: 0x13606, // button
0xbf: 0x4db04, // desc
0xc1: 0x4e207, // section
0xc2: 0x52a0a, // onprogress
0xc3: 0x59e09, // onstorage
0xc4: 0x2d204, // math
0xc5: 0x4503, // alt
0xc7: 0x8a02, // ul
0xc8: 0x5107, // pattern
0xc9: 0x4b60c, // onmousewheel
0xca: 0x35709, // ondragend
0xcb: 0xaf04, // ruby
0xcc: 0xc01, // p
0xcd: 0x31707, // onclose
0xce: 0x24205, // meter
0xcf: 0x11807, // bgsound
0xd2: 0x25106, // height
0xd4: 0x101, // b
0xd5: 0x2c308, // itemtype
0xd8: 0x1bb07, // caption
0xd9: 0x10c08, // disabled
0xdb: 0x33808, // menuitem
0xdc: 0x62003, // svg
0xdd: 0x18f05, // small
0xde: 0x44a04, // data
0xe0: 0x4cb08, // ononline
0xe1: 0x2a206, // mglyph
0xe3: 0x6505, // embed
0xe4: 0x10502, // tr
0xe5: 0x46b0b, // onloadstart
0xe7: 0x3c306, // srcdoc
0xeb: 0x5c408, // ontoggle
0xed: 0xe703, // bdo
0xee: 0x4702, // td
0xef: 0x8305, // aside
0xf0: 0x29402, // h2
0xf1: 0x52c08, // progress
0xf2: 0x12c0a, // blockquote
0xf4: 0xf005, // label
0xf5: 0x601, // i
0xf7: 0x9207, // rowspan
0xfb: 0x51709, // onplaying
0xfd: 0x2a103, // img
0xfe: 0xf608, // optgroup
0xff: 0x42307, // content
0x101: 0x53e0c, // onratechange
0x103: 0x3da0c, // onhashchange
0x104: 0x4807, // details
0x106: 0x40008, // download
0x109: 0x14009, // translate
0x10b: 0x4230f, // contenteditable
0x10d: 0x36b0b, // ondragleave
0x10e: 0x2106, // accept
0x10f: 0x57a08, // selected
0x112: 0x1f20a, // formaction
0x113: 0x5b506, // center
0x115: 0x45510, // onloadedmetadata
0x116: 0x12804, // link
0x117: 0xdd04, // time
0x118: 0x19f0b, // crossorigin
0x119: 0x3bd07, // onfocus
0x11a: 0x58704, // wrap
0x11b: 0x42204, // icon
0x11d: 0x28105, // video
0x11e: 0x4de05, // class
0x121: 0x5d40e, // onvolumechange
0x122: 0xaa06, // onblur
0x123: 0x2b909, // itemscope
0x124: 0x61105, // style
0x127: 0x41e06, // public
0x129: 0x2320e, // formnovalidate
0x12a: 0x58206, // onshow
0x12c: 0x51706, // onplay
0x12d: 0x3c804, // cite
0x12e: 0x2bc02, // ms
0x12f: 0xdb0c, // ontimeupdate
0x130: 0x10904, // kind
0x131: 0x2470a, // formtarget
0x135: 0x3af07, // onended
0x136: 0x26506, // hidden
0x137: 0x2c01, // s
0x139: 0x2280a, // formmethod
0x13a: 0x3e805, // input
0x13c: 0x50b02, // h6
0x13d: 0xc902, // ol
0x13e: 0x3420b, // oncuechange
0x13f: 0x1e50d, // foreignobject
0x143: 0x4e70e, // onbeforeunload
0x144: 0x2bd05, // scope
0x145: 0x39609, // onemptied
0x146: 0x14b05, // defer
0x147: 0xc103, // xmp
0x148: 0x39f10, // ondurationchange
0x149: 0x1903, // kbd
0x14c: 0x47609, // onmessage
0x14d: 0x60006, // option
0x14e: 0x2eb09, // minlength
0x14f: 0x32807, // checked
0x150: 0xce08, // autoplay
0x152: 0x202, // br
0x153: 0x2360a, // novalidate
0x156: 0x6307, // noembed
0x159: 0x31007, // onclick
0x15a: 0x47f0b, // onmousedown
0x15b: 0x3a708, // onchange
0x15e: 0x3f209, // oninvalid
0x15f: 0x2bd06, // scoped
0x160: 0x18808, // controls
0x161: 0x30b05, // muted
0x162: 0x58d08, // sortable
0x163: 0x51106, // usemap
0x164: 0x1b80a, // figcaption
0x165: 0x35706, // ondrag
0x166: 0x26b04, // high
0x168: 0x3c303, // src
0x169: 0x15706, // poster
0x16b: 0x1670e, // annotation-xml
0x16c: 0x5f704, // step
0x16d: 0x4, // abbr
0x16e: 0x1b06, // dialog
0x170: 0x1202, // li
0x172: 0x3ed02, // mo
0x175: 0x1d803, // for
0x176: 0x1a803, // ins
0x178: 0x55504, // size
0x179: 0x43210, // onlanguagechange
0x17a: 0x8607, // default
0x17b: 0x1a03, // bdi
0x17c: 0x4d30a, // onpagehide
0x17d: 0x6907, // dirname
0x17e: 0x21404, // type
0x17f: 0x1f204, // form
0x181: 0x28509, // oncanplay
0x182: 0x6103, // dfn
0x183: 0x46308, // tabindex
0x186: 0x6502, // em
0x187: 0x27404, // lang
0x189: 0x39108, // dropzone
0x18a: 0x4080a, // onkeypress
0x18b: 0x23c08, // datetime
0x18c: 0x16204, // cols
0x18d: 0x1, // a
0x18e: 0x4420c, // onloadeddata
0x190: 0xa605, // audio
0x192: 0x2e05, // tbody
0x193: 0x22c06, // method
0x195: 0xf404, // loop
0x196: 0x29606, // iframe
0x198: 0x2d504, // head
0x19e: 0x5f108, // manifest
0x19f: 0xb309, // autofocus
0x1a0: 0x14904, // code
0x1a1: 0x55906, // strong
0x1a2: 0x30308, // multiple
0x1a3: 0xc05, // param
0x1a6: 0x21107, // enctype
0x1a7: 0x5b304, // face
0x1a8: 0xfd09, // plaintext
0x1a9: 0x26e02, // h1
0x1aa: 0x59509, // onstalled
0x1ad: 0x3d406, // script
0x1ae: 0x2db06, // spacer
0x1af: 0x55108, // onresize
0x1b0: 0x4a20b, // onmouseover
0x1b1: 0x5cc08, // onunload
0x1b2: 0x56708, // onseeked
0x1b4: 0x2140d, // typemustmatch
0x1b5: 0x1cc06, // figure
0x1b6: 0x4950a, // onmouseout
0x1b7: 0x25e03, // pre
0x1b8: 0x50705, // width
0x1b9: 0x19906, // sorted
0x1bb: 0x5704, // nobr
0x1be: 0x5302, // tt
0x1bf: 0x1105, // align
0x1c0: 0x3e607, // oninput
0x1c3: 0x41807, // onkeyup
0x1c6: 0x1c00c, // onafterprint
0x1c7: 0x210e, // accept-charset
0x1c8: 0x33c06, // itemid
0x1c9: 0x3e809, // inputmode
0x1cb: 0x53306, // strike
0x1cc: 0x5a903, // sub
0x1cd: 0x10505, // track
0x1ce: 0x38605, // start
0x1d0: 0xd608, // basefont
0x1d6: 0x1aa06, // source
0x1d7: 0x18206, // legend
0x1d8: 0x2d405, // thead
0x1da: 0x8c05, // tfoot
0x1dd: 0x1ec06, // object
0x1de: 0x6e05, // media
0x1df: 0x1670a, // annotation
0x1e0: 0x20d0b, // formenctype
0x1e2: 0x3d208, // noscript
0x1e4: 0x55505, // sizes
0x1e5: 0x1fc0c, // autocomplete
0x1e6: 0x9504, // span
0x1e7: 0x9808, // noframes
0x1e8: 0x24b06, // target
0x1e9: 0x38f06, // ondrop
0x1ea: 0x2b306, // applet
0x1ec: 0x5a08, // reversed
0x1f0: 0x2a907, // isindex
0x1f3: 0x27008, // hreflang
0x1f5: 0x2f302, // h5
0x1f6: 0x4f307, // address
0x1fa: 0x2e103, // max
0x1fb: 0xc30b, // placeholder
0x1fc: 0x2f608, // textarea
0x1fe: 0x4ad09, // onmouseup
0x1ff: 0x3800b, // ondragstart
}
const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
"genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" +
"ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" +
"utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" +
"labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" +
"blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" +
"nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" +
"originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" +
"bjectforeignobjectformactionautocompleteerrorformenctypemust" +
"matchallengeformmethodformnovalidatetimeterformtargetheightm" +
"lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" +
"h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" +
"eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" +
"utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" +
"hangeondblclickondragendondragenterondragleaveondragoverondr" +
"agstarticleondropzonemptiedondurationchangeonendedonerroronf" +
"ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" +
"nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" +
"uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" +
"rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" +
"ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" +
"oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" +
"teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" +
"ollonseekedonseekingonselectedonshowraponsortableonstalledon" +
"storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" +
"changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" +
"mmarysupsvgsystemplate"

257
vendor/golang.org/x/net/html/charset/charset.go generated vendored Normal file
View file

@ -0,0 +1,257 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package charset provides common text encodings for HTML documents.
//
// The mapping from encoding labels to encodings is defined at
// https://encoding.spec.whatwg.org/.
package charset // import "golang.org/x/net/html/charset"
import (
"bytes"
"fmt"
"io"
"mime"
"strings"
"unicode/utf8"
"golang.org/x/net/html"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/htmlindex"
"golang.org/x/text/transform"
)
// Lookup returns the encoding with the specified label, and its canonical
// name. It returns nil and the empty string if label is not one of the
// standard encodings for HTML. Matching is case-insensitive and ignores
// leading and trailing whitespace. Encoders will use HTML escape sequences for
// runes that are not supported by the character set.
func Lookup(label string) (e encoding.Encoding, name string) {
e, err := htmlindex.Get(label)
if err != nil {
return nil, ""
}
name, _ = htmlindex.Name(e)
return &htmlEncoding{e}, name
}
type htmlEncoding struct{ encoding.Encoding }
func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
// HTML requires a non-terminating legacy encoder. We use HTML escapes to
// substitute unsupported code points.
return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
}
// DetermineEncoding determines the encoding of an HTML document by examining
// up to the first 1024 bytes of content and the declared Content-Type.
//
// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
if len(content) > 1024 {
content = content[:1024]
}
for _, b := range boms {
if bytes.HasPrefix(content, b.bom) {
e, name = Lookup(b.enc)
return e, name, true
}
}
if _, params, err := mime.ParseMediaType(contentType); err == nil {
if cs, ok := params["charset"]; ok {
if e, name = Lookup(cs); e != nil {
return e, name, true
}
}
}
if len(content) > 0 {
e, name = prescan(content)
if e != nil {
return e, name, false
}
}
// Try to detect UTF-8.
// First eliminate any partial rune at the end.
for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
b := content[i]
if b < 0x80 {
break
}
if utf8.RuneStart(b) {
content = content[:i]
break
}
}
hasHighBit := false
for _, c := range content {
if c >= 0x80 {
hasHighBit = true
break
}
}
if hasHighBit && utf8.Valid(content) {
return encoding.Nop, "utf-8", false
}
// TODO: change default depending on user's locale?
return charmap.Windows1252, "windows-1252", false
}
// NewReader returns an io.Reader that converts the content of r to UTF-8.
// It calls DetermineEncoding to find out what r's encoding is.
func NewReader(r io.Reader, contentType string) (io.Reader, error) {
preview := make([]byte, 1024)
n, err := io.ReadFull(r, preview)
switch {
case err == io.ErrUnexpectedEOF:
preview = preview[:n]
r = bytes.NewReader(preview)
case err != nil:
return nil, err
default:
r = io.MultiReader(bytes.NewReader(preview), r)
}
if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
r = transform.NewReader(r, e.NewDecoder())
}
return r, nil
}
// NewReaderLabel returns a reader that converts from the specified charset to
// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
// returns an error if Lookup returns nil. It is suitable for use as
// encoding/xml.Decoder's CharsetReader function.
func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
e, _ := Lookup(label)
if e == nil {
return nil, fmt.Errorf("unsupported charset: %q", label)
}
return transform.NewReader(input, e.NewDecoder()), nil
}
func prescan(content []byte) (e encoding.Encoding, name string) {
z := html.NewTokenizer(bytes.NewReader(content))
for {
switch z.Next() {
case html.ErrorToken:
return nil, ""
case html.StartTagToken, html.SelfClosingTagToken:
tagName, hasAttr := z.TagName()
if !bytes.Equal(tagName, []byte("meta")) {
continue
}
attrList := make(map[string]bool)
gotPragma := false
const (
dontKnow = iota
doNeedPragma
doNotNeedPragma
)
needPragma := dontKnow
name = ""
e = nil
for hasAttr {
var key, val []byte
key, val, hasAttr = z.TagAttr()
ks := string(key)
if attrList[ks] {
continue
}
attrList[ks] = true
for i, c := range val {
if 'A' <= c && c <= 'Z' {
val[i] = c + 0x20
}
}
switch ks {
case "http-equiv":
if bytes.Equal(val, []byte("content-type")) {
gotPragma = true
}
case "content":
if e == nil {
name = fromMetaElement(string(val))
if name != "" {
e, name = Lookup(name)
if e != nil {
needPragma = doNeedPragma
}
}
}
case "charset":
e, name = Lookup(string(val))
needPragma = doNotNeedPragma
}
}
if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
continue
}
if strings.HasPrefix(name, "utf-16") {
name = "utf-8"
e = encoding.Nop
}
if e != nil {
return e, name
}
}
}
}
func fromMetaElement(s string) string {
for s != "" {
csLoc := strings.Index(s, "charset")
if csLoc == -1 {
return ""
}
s = s[csLoc+len("charset"):]
s = strings.TrimLeft(s, " \t\n\f\r")
if !strings.HasPrefix(s, "=") {
continue
}
s = s[1:]
s = strings.TrimLeft(s, " \t\n\f\r")
if s == "" {
return ""
}
if q := s[0]; q == '"' || q == '\'' {
s = s[1:]
closeQuote := strings.IndexRune(s, rune(q))
if closeQuote == -1 {
return ""
}
return s[:closeQuote]
}
end := strings.IndexAny(s, "; \t\n\f\r")
if end == -1 {
end = len(s)
}
return s[:end]
}
return ""
}
var boms = []struct {
bom []byte
enc string
}{
{[]byte{0xfe, 0xff}, "utf-16be"},
{[]byte{0xff, 0xfe}, "utf-16le"},
{[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
}

102
vendor/golang.org/x/net/html/const.go generated vendored Normal file
View file

@ -0,0 +1,102 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
// Section 12.2.3.2 of the HTML5 specification says "The following elements
// have varying levels of special parsing rules".
// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
var isSpecialElementMap = map[string]bool{
"address": true,
"applet": true,
"area": true,
"article": true,
"aside": true,
"base": true,
"basefont": true,
"bgsound": true,
"blockquote": true,
"body": true,
"br": true,
"button": true,
"caption": true,
"center": true,
"col": true,
"colgroup": true,
"dd": true,
"details": true,
"dir": true,
"div": true,
"dl": true,
"dt": true,
"embed": true,
"fieldset": true,
"figcaption": true,
"figure": true,
"footer": true,
"form": true,
"frame": true,
"frameset": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"head": true,
"header": true,
"hgroup": true,
"hr": true,
"html": true,
"iframe": true,
"img": true,
"input": true,
"isindex": true,
"li": true,
"link": true,
"listing": true,
"marquee": true,
"menu": true,
"meta": true,
"nav": true,
"noembed": true,
"noframes": true,
"noscript": true,
"object": true,
"ol": true,
"p": true,
"param": true,
"plaintext": true,
"pre": true,
"script": true,
"section": true,
"select": true,
"source": true,
"style": true,
"summary": true,
"table": true,
"tbody": true,
"td": true,
"template": true,
"textarea": true,
"tfoot": true,
"th": true,
"thead": true,
"title": true,
"tr": true,
"track": true,
"ul": true,
"wbr": true,
"xmp": true,
}
func isSpecialElement(element *Node) bool {
switch element.Namespace {
case "", "html":
return isSpecialElementMap[element.Data]
case "svg":
return element.Data == "foreignObject"
}
return false
}

106
vendor/golang.org/x/net/html/doc.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package html implements an HTML5-compliant tokenizer and parser.
Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
caller's responsibility to ensure that r provides UTF-8 encoded HTML.
z := html.NewTokenizer(r)
Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
which parses the next token and returns its type, or an error:
for {
tt := z.Next()
if tt == html.ErrorToken {
// ...
return ...
}
// Process the current token.
}
There are two APIs for retrieving the current token. The high-level API is to
call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
allow optionally calling Raw after Next but before Token, Text, TagName, or
TagAttr. In EBNF notation, the valid call sequence per token is:
Next {Raw} [ Token | Text | TagName {TagAttr} ]
Token returns an independent data structure that completely describes a token.
Entities (such as "&lt;") are unescaped, tag names and attribute keys are
lower-cased, and attributes are collected into a []Attribute. For example:
for {
if z.Next() == html.ErrorToken {
// Returning io.EOF indicates success.
return z.Err()
}
emitToken(z.Token())
}
The low-level API performs fewer allocations and copies, but the contents of
the []byte values returned by Text, TagName and TagAttr may change on the next
call to Next. For example, to extract an HTML page's anchor text:
depth := 0
for {
tt := z.Next()
switch tt {
case ErrorToken:
return z.Err()
case TextToken:
if depth > 0 {
// emitBytes should copy the []byte it receives,
// if it doesn't process it immediately.
emitBytes(z.Text())
}
case StartTagToken, EndTagToken:
tn, _ := z.TagName()
if len(tn) == 1 && tn[0] == 'a' {
if tt == StartTagToken {
depth++
} else {
depth--
}
}
}
}
Parsing is done by calling Parse with an io.Reader, which returns the root of
the parse tree (the document element) as a *Node. It is the caller's
responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
example, to process each anchor node in depth-first order:
doc, err := html.Parse(r)
if err != nil {
// ...
}
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
// Do something with n...
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
f(doc)
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
*/
package html // import "golang.org/x/net/html"
// The tokenization algorithm implemented by this package is not a line-by-line
// transliteration of the relatively verbose state-machine in the WHATWG
// specification. A more direct approach is used instead, where the program
// counter implies the state, such as whether it is tokenizing a tag or a text
// node. Specification compliance is verified by checking expected and actual
// outputs over a test suite rather than aiming for algorithmic fidelity.
// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
// TODO(nigeltao): How does parsing interact with a JavaScript engine?

156
vendor/golang.org/x/net/html/doctype.go generated vendored Normal file
View file

@ -0,0 +1,156 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"strings"
)
// parseDoctype parses the data from a DoctypeToken into a name,
// public identifier, and system identifier. It returns a Node whose Type
// is DoctypeNode, whose Data is the name, and which has attributes
// named "system" and "public" for the two identifiers if they were present.
// quirks is whether the document should be parsed in "quirks mode".
func parseDoctype(s string) (n *Node, quirks bool) {
n = &Node{Type: DoctypeNode}
// Find the name.
space := strings.IndexAny(s, whitespace)
if space == -1 {
space = len(s)
}
n.Data = s[:space]
// The comparison to "html" is case-sensitive.
if n.Data != "html" {
quirks = true
}
n.Data = strings.ToLower(n.Data)
s = strings.TrimLeft(s[space:], whitespace)
if len(s) < 6 {
// It can't start with "PUBLIC" or "SYSTEM".
// Ignore the rest of the string.
return n, quirks || s != ""
}
key := strings.ToLower(s[:6])
s = s[6:]
for key == "public" || key == "system" {
s = strings.TrimLeft(s, whitespace)
if s == "" {
break
}
quote := s[0]
if quote != '"' && quote != '\'' {
break
}
s = s[1:]
q := strings.IndexRune(s, rune(quote))
var id string
if q == -1 {
id = s
s = ""
} else {
id = s[:q]
s = s[q+1:]
}
n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
if key == "public" {
key = "system"
} else {
key = ""
}
}
if key != "" || s != "" {
quirks = true
} else if len(n.Attr) > 0 {
if n.Attr[0].Key == "public" {
public := strings.ToLower(n.Attr[0].Val)
switch public {
case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
quirks = true
default:
for _, q := range quirkyIDs {
if strings.HasPrefix(public, q) {
quirks = true
break
}
}
}
// The following two public IDs only cause quirks mode if there is no system ID.
if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
quirks = true
}
}
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
quirks = true
}
}
return n, quirks
}
// quirkyIDs is a list of public doctype identifiers that cause a document
// to be interpreted in quirks mode. The identifiers should be in lower case.
var quirkyIDs = []string{
"+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//",
}

2253
vendor/golang.org/x/net/html/entity.go generated vendored Normal file

File diff suppressed because it is too large Load diff

258
vendor/golang.org/x/net/html/escape.go generated vendored Normal file
View file

@ -0,0 +1,258 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"bytes"
"strings"
"unicode/utf8"
)
// These replacements permit compatibility with old numeric entities that
// assumed Windows-1252 encoding.
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
var replacementTable = [...]rune{
'\u20AC', // First entry is what 0x80 should be replaced with.
'\u0081',
'\u201A',
'\u0192',
'\u201E',
'\u2026',
'\u2020',
'\u2021',
'\u02C6',
'\u2030',
'\u0160',
'\u2039',
'\u0152',
'\u008D',
'\u017D',
'\u008F',
'\u0090',
'\u2018',
'\u2019',
'\u201C',
'\u201D',
'\u2022',
'\u2013',
'\u2014',
'\u02DC',
'\u2122',
'\u0161',
'\u203A',
'\u0153',
'\u009D',
'\u017E',
'\u0178', // Last entry is 0x9F.
// 0x00->'\uFFFD' is handled programmatically.
// 0x0D->'\u000D' is a no-op.
}
// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
// Precondition: b[src] == '&' && dst <= src.
// attribute should be true if parsing an attribute value.
func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
// i starts at 1 because we already know that s[0] == '&'.
i, s := 1, b[src:]
if len(s) <= 1 {
b[dst] = b[src]
return dst + 1, src + 1
}
if s[i] == '#' {
if len(s) <= 3 { // We need to have at least "&#.".
b[dst] = b[src]
return dst + 1, src + 1
}
i++
c := s[i]
hex := false
if c == 'x' || c == 'X' {
hex = true
i++
}
x := '\x00'
for i < len(s) {
c = s[i]
i++
if hex {
if '0' <= c && c <= '9' {
x = 16*x + rune(c) - '0'
continue
} else if 'a' <= c && c <= 'f' {
x = 16*x + rune(c) - 'a' + 10
continue
} else if 'A' <= c && c <= 'F' {
x = 16*x + rune(c) - 'A' + 10
continue
}
} else if '0' <= c && c <= '9' {
x = 10*x + rune(c) - '0'
continue
}
if c != ';' {
i--
}
break
}
if i <= 3 { // No characters matched.
b[dst] = b[src]
return dst + 1, src + 1
}
if 0x80 <= x && x <= 0x9F {
// Replace characters from Windows-1252 with UTF-8 equivalents.
x = replacementTable[x-0x80]
} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
// Replace invalid characters with the replacement character.
x = '\uFFFD'
}
return dst + utf8.EncodeRune(b[dst:], x), src + i
}
// Consume the maximum number of characters possible, with the
// consumed characters matching one of the named references.
for i < len(s) {
c := s[i]
i++
// Lower-cased characters are more common in entities, so we check for them first.
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
continue
}
if c != ';' {
i--
}
break
}
entityName := string(s[1:i])
if entityName == "" {
// No-op.
} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
// No-op.
} else if x := entity[entityName]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + i
} else if x := entity2[entityName]; x[0] != 0 {
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
} else if !attribute {
maxLen := len(entityName) - 1
if maxLen > longestEntityWithoutSemicolon {
maxLen = longestEntityWithoutSemicolon
}
for j := maxLen; j > 1; j-- {
if x := entity[entityName[:j]]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
}
}
}
dst1, src1 = dst+i, src+i
copy(b[dst:dst1], b[src:src1])
return dst1, src1
}
// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
// attribute should be true if parsing an attribute value.
func unescape(b []byte, attribute bool) []byte {
for i, c := range b {
if c == '&' {
dst, src := unescapeEntity(b, i, i, attribute)
for src < len(b) {
c := b[src]
if c == '&' {
dst, src = unescapeEntity(b, dst, src, attribute)
} else {
b[dst] = c
dst, src = dst+1, src+1
}
}
return b[0:dst]
}
}
return b
}
// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
func lower(b []byte) []byte {
for i, c := range b {
if 'A' <= c && c <= 'Z' {
b[i] = c + 'a' - 'A'
}
}
return b
}
const escapedChars = "&'<>\"\r"
func escape(w writer, s string) error {
i := strings.IndexAny(s, escapedChars)
for i != -1 {
if _, err := w.WriteString(s[:i]); err != nil {
return err
}
var esc string
switch s[i] {
case '&':
esc = "&amp;"
case '\'':
// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
esc = "&#39;"
case '<':
esc = "&lt;"
case '>':
esc = "&gt;"
case '"':
// "&#34;" is shorter than "&quot;".
esc = "&#34;"
case '\r':
esc = "&#13;"
default:
panic("unrecognized escape character")
}
s = s[i+1:]
if _, err := w.WriteString(esc); err != nil {
return err
}
i = strings.IndexAny(s, escapedChars)
}
_, err := w.WriteString(s)
return err
}
// EscapeString escapes special characters like "<" to become "&lt;". It
// escapes only five such characters: <, >, &, ' and ".
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
// always true.
func EscapeString(s string) string {
if strings.IndexAny(s, escapedChars) == -1 {
return s
}
var buf bytes.Buffer
escape(&buf, s)
return buf.String()
}
// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
// larger range of entities than EscapeString escapes. For example, "&aacute;"
// unescapes to "á", as does "&#225;" and "&xE1;".
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
// always true.
func UnescapeString(s string) string {
for _, c := range s {
if c == '&' {
return string(unescape([]byte(s), false))
}
}
return s
}

226
vendor/golang.org/x/net/html/foreign.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"strings"
)
func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
for i := range aa {
if newName, ok := nameMap[aa[i].Key]; ok {
aa[i].Key = newName
}
}
}
func adjustForeignAttributes(aa []Attribute) {
for i, a := range aa {
if a.Key == "" || a.Key[0] != 'x' {
continue
}
switch a.Key {
case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
j := strings.Index(a.Key, ":")
aa[i].Namespace = a.Key[:j]
aa[i].Key = a.Key[j+1:]
}
}
}
func htmlIntegrationPoint(n *Node) bool {
if n.Type != ElementNode {
return false
}
switch n.Namespace {
case "math":
if n.Data == "annotation-xml" {
for _, a := range n.Attr {
if a.Key == "encoding" {
val := strings.ToLower(a.Val)
if val == "text/html" || val == "application/xhtml+xml" {
return true
}
}
}
}
case "svg":
switch n.Data {
case "desc", "foreignObject", "title":
return true
}
}
return false
}
func mathMLTextIntegrationPoint(n *Node) bool {
if n.Namespace != "math" {
return false
}
switch n.Data {
case "mi", "mo", "mn", "ms", "mtext":
return true
}
return false
}
// Section 12.2.5.5.
var breakout = map[string]bool{
"b": true,
"big": true,
"blockquote": true,
"body": true,
"br": true,
"center": true,
"code": true,
"dd": true,
"div": true,
"dl": true,
"dt": true,
"em": true,
"embed": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"head": true,
"hr": true,
"i": true,
"img": true,
"li": true,
"listing": true,
"menu": true,
"meta": true,
"nobr": true,
"ol": true,
"p": true,
"pre": true,
"ruby": true,
"s": true,
"small": true,
"span": true,
"strong": true,
"strike": true,
"sub": true,
"sup": true,
"table": true,
"tt": true,
"u": true,
"ul": true,
"var": true,
}
// Section 12.2.5.5.
var svgTagNameAdjustments = map[string]string{
"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath",
}
// Section 12.2.5.1
var mathMLAttributeAdjustments = map[string]string{
"definitionurl": "definitionURL",
}
var svgAttributeAdjustments = map[string]string{
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan",
}

193
vendor/golang.org/x/net/html/node.go generated vendored Normal file
View file

@ -0,0 +1,193 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"golang.org/x/net/html/atom"
)
// A NodeType is the type of a Node.
type NodeType uint32
const (
ErrorNode NodeType = iota
TextNode
DocumentNode
ElementNode
CommentNode
DoctypeNode
scopeMarkerNode
)
// Section 12.2.3.3 says "scope markers are inserted when entering applet
// elements, buttons, object elements, marquees, table cells, and table
// captions, and are used to prevent formatting from 'leaking'".
var scopeMarker = Node{Type: scopeMarkerNode}
// A Node consists of a NodeType and some Data (tag name for element nodes,
// content for text) and are part of a tree of Nodes. Element nodes may also
// have a Namespace and contain a slice of Attributes. Data is unescaped, so
// that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
// is the atom for Data, or zero if Data is not a known tag name.
//
// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
// "svg" is short for "http://www.w3.org/2000/svg".
type Node struct {
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
Type NodeType
DataAtom atom.Atom
Data string
Namespace string
Attr []Attribute
}
// InsertBefore inserts newChild as a child of n, immediately before oldChild
// in the sequence of n's children. oldChild may be nil, in which case newChild
// is appended to the end of n's children.
//
// It will panic if newChild already has a parent or siblings.
func (n *Node) InsertBefore(newChild, oldChild *Node) {
if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
panic("html: InsertBefore called for an attached child Node")
}
var prev, next *Node
if oldChild != nil {
prev, next = oldChild.PrevSibling, oldChild
} else {
prev = n.LastChild
}
if prev != nil {
prev.NextSibling = newChild
} else {
n.FirstChild = newChild
}
if next != nil {
next.PrevSibling = newChild
} else {
n.LastChild = newChild
}
newChild.Parent = n
newChild.PrevSibling = prev
newChild.NextSibling = next
}
// AppendChild adds a node c as a child of n.
//
// It will panic if c already has a parent or siblings.
func (n *Node) AppendChild(c *Node) {
if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
panic("html: AppendChild called for an attached child Node")
}
last := n.LastChild
if last != nil {
last.NextSibling = c
} else {
n.FirstChild = c
}
n.LastChild = c
c.Parent = n
c.PrevSibling = last
}
// RemoveChild removes a node c that is a child of n. Afterwards, c will have
// no parent and no siblings.
//
// It will panic if c's parent is not n.
func (n *Node) RemoveChild(c *Node) {
if c.Parent != n {
panic("html: RemoveChild called for a non-child Node")
}
if n.FirstChild == c {
n.FirstChild = c.NextSibling
}
if c.NextSibling != nil {
c.NextSibling.PrevSibling = c.PrevSibling
}
if n.LastChild == c {
n.LastChild = c.PrevSibling
}
if c.PrevSibling != nil {
c.PrevSibling.NextSibling = c.NextSibling
}
c.Parent = nil
c.PrevSibling = nil
c.NextSibling = nil
}
// reparentChildren reparents all of src's child nodes to dst.
func reparentChildren(dst, src *Node) {
for {
child := src.FirstChild
if child == nil {
break
}
src.RemoveChild(child)
dst.AppendChild(child)
}
}
// clone returns a new node with the same type, data and attributes.
// The clone has no parent, no siblings and no children.
func (n *Node) clone() *Node {
m := &Node{
Type: n.Type,
DataAtom: n.DataAtom,
Data: n.Data,
Attr: make([]Attribute, len(n.Attr)),
}
copy(m.Attr, n.Attr)
return m
}
// nodeStack is a stack of nodes.
type nodeStack []*Node
// pop pops the stack. It will panic if s is empty.
func (s *nodeStack) pop() *Node {
i := len(*s)
n := (*s)[i-1]
*s = (*s)[:i-1]
return n
}
// top returns the most recently pushed node, or nil if s is empty.
func (s *nodeStack) top() *Node {
if i := len(*s); i > 0 {
return (*s)[i-1]
}
return nil
}
// index returns the index of the top-most occurrence of n in the stack, or -1
// if n is not present.
func (s *nodeStack) index(n *Node) int {
for i := len(*s) - 1; i >= 0; i-- {
if (*s)[i] == n {
return i
}
}
return -1
}
// insert inserts a node at the given index.
func (s *nodeStack) insert(i int, n *Node) {
(*s) = append(*s, nil)
copy((*s)[i+1:], (*s)[i:])
(*s)[i] = n
}
// remove removes a node from the stack. It is a no-op if n is not present.
func (s *nodeStack) remove(n *Node) {
i := s.index(n)
if i == -1 {
return
}
copy((*s)[i:], (*s)[i+1:])
j := len(*s) - 1
(*s)[j] = nil
*s = (*s)[:j]
}

2094
vendor/golang.org/x/net/html/parse.go generated vendored Normal file

File diff suppressed because it is too large Load diff

271
vendor/golang.org/x/net/html/render.go generated vendored Normal file
View file

@ -0,0 +1,271 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import (
"bufio"
"errors"
"fmt"
"io"
"strings"
)
type writer interface {
io.Writer
io.ByteWriter
WriteString(string) (int, error)
}
// Render renders the parse tree n to the given writer.
//
// Rendering is done on a 'best effort' basis: calling Parse on the output of
// Render will always result in something similar to the original tree, but it
// is not necessarily an exact clone unless the original tree was 'well-formed'.
// 'Well-formed' is not easily specified; the HTML5 specification is
// complicated.
//
// Calling Parse on arbitrary input typically results in a 'well-formed' parse
// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
// For example, in a 'well-formed' parse tree, no <a> element is a child of
// another <a> element: parsing "<a><a>" results in two sibling elements.
// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
// children; the <a> is reparented to the <table>'s parent. However, calling
// Parse on "<a><table><a>" does not return an error, but the result has an <a>
// element with an <a> child, and is therefore not 'well-formed'.
//
// Programmatically constructed trees are typically also 'well-formed', but it
// is possible to construct a tree that looks innocuous but, when rendered and
// re-parsed, results in a different tree. A simple example is that a solitary
// text node would become a tree containing <html>, <head> and <body> elements.
// Another example is that the programmatic equivalent of "a<head>b</head>c"
// becomes "<html><head><head/><body>abc</body></html>".
func Render(w io.Writer, n *Node) error {
if x, ok := w.(writer); ok {
return render(x, n)
}
buf := bufio.NewWriter(w)
if err := render(buf, n); err != nil {
return err
}
return buf.Flush()
}
// plaintextAbort is returned from render1 when a <plaintext> element
// has been rendered. No more end tags should be rendered after that.
var plaintextAbort = errors.New("html: internal error (plaintext abort)")
func render(w writer, n *Node) error {
err := render1(w, n)
if err == plaintextAbort {
err = nil
}
return err
}
func render1(w writer, n *Node) error {
// Render non-element nodes; these are the easy cases.
switch n.Type {
case ErrorNode:
return errors.New("html: cannot render an ErrorNode node")
case TextNode:
return escape(w, n.Data)
case DocumentNode:
for c := n.FirstChild; c != nil; c = c.NextSibling {
if err := render1(w, c); err != nil {
return err
}
}
return nil
case ElementNode:
// No-op.
case CommentNode:
if _, err := w.WriteString("<!--"); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
if _, err := w.WriteString("-->"); err != nil {
return err
}
return nil
case DoctypeNode:
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
if n.Attr != nil {
var p, s string
for _, a := range n.Attr {
switch a.Key {
case "public":
p = a.Val
case "system":
s = a.Val
}
}
if p != "" {
if _, err := w.WriteString(" PUBLIC "); err != nil {
return err
}
if err := writeQuoted(w, p); err != nil {
return err
}
if s != "" {
if err := w.WriteByte(' '); err != nil {
return err
}
if err := writeQuoted(w, s); err != nil {
return err
}
}
} else if s != "" {
if _, err := w.WriteString(" SYSTEM "); err != nil {
return err
}
if err := writeQuoted(w, s); err != nil {
return err
}
}
}
return w.WriteByte('>')
default:
return errors.New("html: unknown node type")
}
// Render the <xxx> opening tag.
if err := w.WriteByte('<'); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
for _, a := range n.Attr {
if err := w.WriteByte(' '); err != nil {
return err
}
if a.Namespace != "" {
if _, err := w.WriteString(a.Namespace); err != nil {
return err
}
if err := w.WriteByte(':'); err != nil {
return err
}
}
if _, err := w.WriteString(a.Key); err != nil {
return err
}
if _, err := w.WriteString(`="`); err != nil {
return err
}
if err := escape(w, a.Val); err != nil {
return err
}
if err := w.WriteByte('"'); err != nil {
return err
}
}
if voidElements[n.Data] {
if n.FirstChild != nil {
return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
}
_, err := w.WriteString("/>")
return err
}
if err := w.WriteByte('>'); err != nil {
return err
}
// Add initial newline where there is danger of a newline beging ignored.
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
switch n.Data {
case "pre", "listing", "textarea":
if err := w.WriteByte('\n'); err != nil {
return err
}
}
}
// Render any child nodes.
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == TextNode {
if _, err := w.WriteString(c.Data); err != nil {
return err
}
} else {
if err := render1(w, c); err != nil {
return err
}
}
}
if n.Data == "plaintext" {
// Don't render anything else. <plaintext> must be the
// last element in the file, with no closing tag.
return plaintextAbort
}
default:
for c := n.FirstChild; c != nil; c = c.NextSibling {
if err := render1(w, c); err != nil {
return err
}
}
}
// Render the </xxx> closing tag.
if _, err := w.WriteString("</"); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
return err
}
return w.WriteByte('>')
}
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
// quotes, but if s contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration.
// In valid HTML, they can't contain both types of quotes.
func writeQuoted(w writer, s string) error {
var q byte = '"'
if strings.Contains(s, `"`) {
q = '\''
}
if err := w.WriteByte(q); err != nil {
return err
}
if _, err := w.WriteString(s); err != nil {
return err
}
if err := w.WriteByte(q); err != nil {
return err
}
return nil
}
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
// are those that can't have any contents.
var voidElements = map[string]bool{
"area": true,
"base": true,
"br": true,
"col": true,
"command": true,
"embed": true,
"hr": true,
"img": true,
"input": true,
"keygen": true,
"link": true,
"meta": true,
"param": true,
"source": true,
"track": true,
"wbr": true,
}

1219
vendor/golang.org/x/net/html/token.go generated vendored Normal file

File diff suppressed because it is too large Load diff

27
vendor/golang.org/x/text/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/text/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

209
vendor/golang.org/x/text/encoding/charmap/charmap.go generated vendored Normal file
View file

@ -0,0 +1,209 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run maketables.go
// Package charmap provides simple character encodings such as IBM Code Page 437
// and Windows 1252.
package charmap // import "golang.org/x/text/encoding/charmap"
import (
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// These encodings vary only in the way clients should interpret them. Their
// coded character set is identical and a single implementation can be shared.
var (
// ISO8859_6E is the ISO 8859-6E encoding.
ISO8859_6E encoding.Encoding = &iso8859_6E
// ISO8859_6I is the ISO 8859-6I encoding.
ISO8859_6I encoding.Encoding = &iso8859_6I
// ISO8859_8E is the ISO 8859-8E encoding.
ISO8859_8E encoding.Encoding = &iso8859_8E
// ISO8859_8I is the ISO 8859-8I encoding.
ISO8859_8I encoding.Encoding = &iso8859_8I
iso8859_6E = internal.Encoding{
ISO8859_6,
"ISO-8859-6E",
identifier.ISO88596E,
}
iso8859_6I = internal.Encoding{
ISO8859_6,
"ISO-8859-6I",
identifier.ISO88596I,
}
iso8859_8E = internal.Encoding{
ISO8859_8,
"ISO-8859-8E",
identifier.ISO88598E,
}
iso8859_8I = internal.Encoding{
ISO8859_8,
"ISO-8859-8I",
identifier.ISO88598I,
}
)
// All is a list of all defined encodings in this package.
var All = listAll
// TODO: implement these encodings, in order of importance.
// ASCII, ISO8859_1: Rather common. Close to Windows 1252.
// ISO8859_9: Close to Windows 1254.
// utf8Enc holds a rune's UTF-8 encoding in data[:len].
type utf8Enc struct {
len uint8
data [3]byte
}
// charmap describes an 8-bit character set encoding.
type charmap struct {
// name is the encoding's name.
name string
// mib is the encoding type of this encoder.
mib identifier.MIB
// asciiSuperset states whether the encoding is a superset of ASCII.
asciiSuperset bool
// low is the lower bound of the encoded byte for a non-ASCII rune. If
// charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00.
low uint8
// replacement is the encoded replacement character.
replacement byte
// decode is the map from encoded byte to UTF-8.
decode [256]utf8Enc
// encoding is the map from runes to encoded bytes. Each entry is a
// uint32: the high 8 bits are the encoded byte and the low 24 bits are
// the rune. The table entries are sorted by ascending rune.
encode [256]uint32
}
func (m *charmap) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: charmapDecoder{charmap: m}}
}
func (m *charmap) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: charmapEncoder{charmap: m}}
}
func (m *charmap) String() string {
return m.name
}
func (m *charmap) ID() (mib identifier.MIB, other string) {
return m.mib, ""
}
// charmapDecoder implements transform.Transformer by decoding to UTF-8.
type charmapDecoder struct {
transform.NopResetter
charmap *charmap
}
func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for i, c := range src {
if m.charmap.asciiSuperset && c < utf8.RuneSelf {
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = c
nDst++
nSrc = i + 1
continue
}
decode := &m.charmap.decode[c]
n := int(decode.len)
if nDst+n > len(dst) {
err = transform.ErrShortDst
break
}
// It's 15% faster to avoid calling copy for these tiny slices.
for j := 0; j < n; j++ {
dst[nDst] = decode.data[j]
nDst++
}
nSrc = i + 1
}
return nDst, nSrc, err
}
// charmapEncoder implements transform.Transformer by encoding from UTF-8.
type charmapEncoder struct {
transform.NopResetter
charmap *charmap
}
func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for nSrc < len(src) {
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
if m.charmap.asciiSuperset {
nSrc++
dst[nDst] = uint8(r)
nDst++
continue
}
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
} else {
err = internal.RepertoireError(m.charmap.replacement)
}
break
}
}
// Binary search in [low, high) for that rune in the m.charmap.encode table.
for low, high := int(m.charmap.low), 0x100; ; {
if low >= high {
err = internal.RepertoireError(m.charmap.replacement)
break loop
}
mid := (low + high) / 2
got := m.charmap.encode[mid]
gotRune := rune(got & (1<<24 - 1))
if gotRune < r {
low = mid + 1
} else if gotRune > r {
high = mid
} else {
dst[nDst] = byte(got >> 24)
nDst++
break
}
}
nSrc += size
}
return nDst, nSrc, err
}

524
vendor/golang.org/x/text/encoding/charmap/maketables.go generated vendored Normal file
View file

@ -0,0 +1,524 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bufio"
"fmt"
"log"
"net/http"
"sort"
"strings"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/internal/gen"
)
const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
` !"#$%&'()*+,-./0123456789:;<=>?` +
`@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
"`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
var encodings = []struct {
name string
mib string
comment string
varName string
replacement byte
mapping string
}{
{
"IBM Code Page 437",
"PC8CodePage437",
"",
"CodePage437",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm",
},
{
"IBM Code Page 850",
"PC850Multilingual",
"",
"CodePage850",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm",
},
{
"IBM Code Page 852",
"PCp852",
"",
"CodePage852",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm",
},
{
"IBM Code Page 855",
"IBM855",
"",
"CodePage855",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm",
},
{
"Windows Code Page 858", // PC latin1 with Euro
"IBM00858",
"",
"CodePage858",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm",
},
{
"IBM Code Page 860",
"IBM860",
"",
"CodePage860",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm",
},
{
"IBM Code Page 862",
"PC862LatinHebrew",
"",
"CodePage862",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm",
},
{
"IBM Code Page 863",
"IBM863",
"",
"CodePage863",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm",
},
{
"IBM Code Page 865",
"IBM865",
"",
"CodePage865",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm",
},
{
"IBM Code Page 866",
"IBM866",
"",
"CodePage866",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-ibm866.txt",
},
{
"ISO 8859-1",
"ISOLatin1",
"",
"ISO8859_1",
encoding.ASCIISub,
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm",
},
{
"ISO 8859-2",
"ISOLatin2",
"",
"ISO8859_2",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
},
{
"ISO 8859-3",
"ISOLatin3",
"",
"ISO8859_3",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
},
{
"ISO 8859-4",
"ISOLatin4",
"",
"ISO8859_4",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
},
{
"ISO 8859-5",
"ISOLatinCyrillic",
"",
"ISO8859_5",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
},
{
"ISO 8859-6",
"ISOLatinArabic",
"",
"ISO8859_6,ISO8859_6E,ISO8859_6I",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
},
{
"ISO 8859-7",
"ISOLatinGreek",
"",
"ISO8859_7",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
},
{
"ISO 8859-8",
"ISOLatinHebrew",
"",
"ISO8859_8,ISO8859_8E,ISO8859_8I",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
},
{
"ISO 8859-10",
"ISOLatin6",
"",
"ISO8859_10",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
},
{
"ISO 8859-13",
"ISO885913",
"",
"ISO8859_13",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
},
{
"ISO 8859-14",
"ISO885914",
"",
"ISO8859_14",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
},
{
"ISO 8859-15",
"ISO885915",
"",
"ISO8859_15",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
},
{
"ISO 8859-16",
"ISO885916",
"",
"ISO8859_16",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
},
{
"KOI8-R",
"KOI8R",
"",
"KOI8R",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-koi8-r.txt",
},
{
"KOI8-U",
"KOI8U",
"",
"KOI8U",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-koi8-u.txt",
},
{
"Macintosh",
"Macintosh",
"",
"Macintosh",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-macintosh.txt",
},
{
"Macintosh Cyrillic",
"MacintoshCyrillic",
"",
"MacintoshCyrillic",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
},
{
"Windows 874",
"Windows874",
"",
"Windows874",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-874.txt",
},
{
"Windows 1250",
"Windows1250",
"",
"Windows1250",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1250.txt",
},
{
"Windows 1251",
"Windows1251",
"",
"Windows1251",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1251.txt",
},
{
"Windows 1252",
"Windows1252",
"",
"Windows1252",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1252.txt",
},
{
"Windows 1253",
"Windows1253",
"",
"Windows1253",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1253.txt",
},
{
"Windows 1254",
"Windows1254",
"",
"Windows1254",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1254.txt",
},
{
"Windows 1255",
"Windows1255",
"",
"Windows1255",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1255.txt",
},
{
"Windows 1256",
"Windows1256",
"",
"Windows1256",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1256.txt",
},
{
"Windows 1257",
"Windows1257",
"",
"Windows1257",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1257.txt",
},
{
"Windows 1258",
"Windows1258",
"",
"Windows1258",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1258.txt",
},
{
"X-User-Defined",
"XUserDefined",
"It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
"XUserDefined",
encoding.ASCIISub,
ascii +
"\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
"\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
"\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
"\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
"\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
"\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
"\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
"\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
"\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
"\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
"\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
"\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
"\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
"\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
"\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
"\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
},
}
func getWHATWG(url string) string {
res, err := http.Get(url)
if err != nil {
log.Fatalf("%q: Get: %v", url, err)
}
defer res.Body.Close()
mapping := make([]rune, 128)
for i := range mapping {
mapping[i] = '\ufffd'
}
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := 0, 0
if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
log.Fatalf("could not parse %q", s)
}
if x < 0 || 128 <= x {
log.Fatalf("code %d is out of range", x)
}
if 0x80 <= y && y < 0xa0 {
// We diverge from the WHATWG spec by mapping control characters
// in the range [0x80, 0xa0) to U+FFFD.
continue
}
mapping[x] = rune(y)
}
return ascii + string(mapping)
}
func getUCM(url string) string {
res, err := http.Get(url)
if err != nil {
log.Fatalf("%q: Get: %v", url, err)
}
defer res.Body.Close()
mapping := make([]rune, 256)
for i := range mapping {
mapping[i] = '\ufffd'
}
charsFound := 0
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
var c byte
var r rune
if _, err := fmt.Sscanf(s, `<U%x> \x%x |0`, &r, &c); err != nil {
continue
}
mapping[c] = r
charsFound++
}
if charsFound < 200 {
log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound)
}
return string(mapping)
}
func main() {
mibs := map[string]bool{}
all := []string{}
w := gen.NewCodeWriter()
defer w.WriteGoFile("tables.go", "charmap")
printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) }
printf("import (\n")
printf("\t\"golang.org/x/text/encoding\"\n")
printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
printf(")\n\n")
for _, e := range encodings {
varNames := strings.Split(e.varName, ",")
all = append(all, varNames...)
varName := varNames[0]
switch {
case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"):
e.mapping = getWHATWG(e.mapping)
case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"):
e.mapping = getUCM(e.mapping)
}
asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
if asciiSuperset {
low = 0x80
}
lvn := 1
if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
lvn = 3
}
lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
printf("// %s is the %s encoding.\n", varName, e.name)
if e.comment != "" {
printf("//\n// %s\n", e.comment)
}
printf("var %s encoding.Encoding = &%s\n\nvar %s = charmap{\nname: %q,\n",
varName, lowerVarName, lowerVarName, e.name)
if mibs[e.mib] {
log.Fatalf("MIB type %q declared multiple times.", e.mib)
}
printf("mib: identifier.%s,\n", e.mib)
printf("asciiSuperset: %t,\n", asciiSuperset)
printf("low: 0x%02x,\n", low)
printf("replacement: 0x%02x,\n", e.replacement)
printf("decode: [256]utf8Enc{\n")
i, backMapping := 0, map[rune]byte{}
for _, c := range e.mapping {
if _, ok := backMapping[c]; !ok && c != utf8.RuneError {
backMapping[c] = byte(i)
}
var buf [8]byte
n := utf8.EncodeRune(buf[:], c)
if n > 3 {
panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
}
printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
if i%2 == 1 {
printf("\n")
}
i++
}
printf("},\n")
printf("encode: [256]uint32{\n")
encode := make([]uint32, 0, 256)
for c, i := range backMapping {
encode = append(encode, uint32(i)<<24|uint32(c))
}
sort.Sort(byRune(encode))
for len(encode) < cap(encode) {
encode = append(encode, encode[len(encode)-1])
}
for i, enc := range encode {
printf("0x%08x,", enc)
if i%8 == 7 {
printf("\n")
}
}
printf("},\n}\n")
// Add an estimate of the size of a single charmap{} struct value, which
// includes two 256 elem arrays of 4 bytes and some extra fields, which
// align to 3 uint64s on 64-bit architectures.
w.Size += 2*4*256 + 3*8
}
// TODO: add proper line breaking.
printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
}
type byRune []uint32
func (b byRune) Len() int { return len(b) }
func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }

6706
vendor/golang.org/x/text/encoding/charmap/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

335
vendor/golang.org/x/text/encoding/encoding.go generated vendored Normal file
View file

@ -0,0 +1,335 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package encoding defines an interface for character encodings, such as Shift
// JIS and Windows 1252, that can convert to and from UTF-8.
//
// Encoding implementations are provided in other packages, such as
// golang.org/x/text/encoding/charmap and
// golang.org/x/text/encoding/japanese.
package encoding // import "golang.org/x/text/encoding"
import (
"errors"
"io"
"strconv"
"unicode/utf8"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// TODO:
// - There seems to be some inconsistency in when decoders return errors
// and when not. Also documentation seems to suggest they shouldn't return
// errors at all (except for UTF-16).
// - Encoders seem to rely on or at least benefit from the input being in NFC
// normal form. Perhaps add an example how users could prepare their output.
// Encoding is a character set encoding that can be transformed to and from
// UTF-8.
type Encoding interface {
// NewDecoder returns a Decoder.
NewDecoder() *Decoder
// NewEncoder returns an Encoder.
NewEncoder() *Encoder
}
// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
//
// Transforming source bytes that are not of that encoding will not result in an
// error per se. Each byte that cannot be transcoded will be represented in the
// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
type Decoder struct {
transform.Transformer
// This forces external creators of Decoders to use names in struct
// initializers, allowing for future extendibility without having to break
// code.
_ struct{}
}
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
// bytes or nil, err if any error occurred.
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(d, b)
if err != nil {
return nil, err
}
return b, nil
}
// String converts the given encoded string to UTF-8. It returns the converted
// string or "", err if any error occurred.
func (d *Decoder) String(s string) (string, error) {
s, _, err := transform.String(d, s)
if err != nil {
return "", err
}
return s, nil
}
// Reader wraps another Reader to decode its bytes.
//
// The Decoder may not be used for any other operation as long as the returned
// Reader is in use.
func (d *Decoder) Reader(r io.Reader) io.Reader {
return transform.NewReader(r, d)
}
// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
//
// Each rune that cannot be transcoded will result in an error. In this case,
// the transform will consume all source byte up to, not including the offending
// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
// `\uFFFD`. To return early with an error instead, use transform.Chain to
// preprocess the data with a UTF8Validator.
type Encoder struct {
transform.Transformer
// This forces external creators of Encoders to use names in struct
// initializers, allowing for future extendibility without having to break
// code.
_ struct{}
}
// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
// any error occurred.
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(e, b)
if err != nil {
return nil, err
}
return b, nil
}
// String converts a string from UTF-8. It returns the converted string or
// "", err if any error occurred.
func (e *Encoder) String(s string) (string, error) {
s, _, err := transform.String(e, s)
if err != nil {
return "", err
}
return s, nil
}
// Writer wraps another Writer to encode its UTF-8 output.
//
// The Encoder may not be used for any other operation as long as the returned
// Writer is in use.
func (e *Encoder) Writer(w io.Writer) io.Writer {
return transform.NewWriter(w, e)
}
// ASCIISub is the ASCII substitute character, as recommended by
// http://unicode.org/reports/tr36/#Text_Comparison
const ASCIISub = '\x1a'
// Nop is the nop encoding. Its transformed bytes are the same as the source
// bytes; it does not replace invalid UTF-8 sequences.
var Nop Encoding = nop{}
type nop struct{}
func (nop) NewDecoder() *Decoder {
return &Decoder{Transformer: transform.Nop}
}
func (nop) NewEncoder() *Encoder {
return &Encoder{Transformer: transform.Nop}
}
// Replacement is the replacement encoding. Decoding from the replacement
// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
// the replacement encoding yields the same as the source bytes except that
// invalid UTF-8 is converted to '\uFFFD'.
//
// It is defined at http://encoding.spec.whatwg.org/#replacement
var Replacement Encoding = replacement{}
type replacement struct{}
func (replacement) NewDecoder() *Decoder {
return &Decoder{Transformer: replacementDecoder{}}
}
func (replacement) NewEncoder() *Encoder {
return &Encoder{Transformer: replacementEncoder{}}
}
func (replacement) ID() (mib identifier.MIB, other string) {
return identifier.Replacement, ""
}
type replacementDecoder struct{ transform.NopResetter }
func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(dst) < 3 {
return 0, 0, transform.ErrShortDst
}
if atEOF {
const fffd = "\ufffd"
dst[0] = fffd[0]
dst[1] = fffd[1]
dst[2] = fffd[2]
nDst = 3
}
return nDst, len(src), nil
}
type replacementEncoder struct{ transform.NopResetter }
func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
r = '\ufffd'
}
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
return nDst, nSrc, err
}
// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
// repertoire of the destination encoding with HTML escape sequences.
//
// This wrapper exists to comply to URL and HTML forms requiring a
// non-terminating legacy encoder. The produced sequences may lead to data
// loss as they are indistinguishable from legitimate input. To avoid this
// issue, use UTF-8 encodings whenever possible.
func HTMLEscapeUnsupported(e *Encoder) *Encoder {
return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
}
// ReplaceUnsupported wraps encoders to replace source runes outside the
// repertoire of the destination encoding with an encoding-specific
// replacement.
//
// This wrapper is only provided for backwards compatibility and legacy
// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
func ReplaceUnsupported(e *Encoder) *Encoder {
return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
}
type errorHandler struct {
*Encoder
handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
}
// TODO: consider making this error public in some form.
type repertoireError interface {
Replacement() byte
}
func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
for err != nil {
rerr, ok := err.(repertoireError)
if !ok {
return nDst, nSrc, err
}
r, sz := utf8.DecodeRune(src[nSrc:])
n, ok := h.handler(dst[nDst:], r, rerr)
if !ok {
return nDst, nSrc, transform.ErrShortDst
}
err = nil
nDst += n
if nSrc += sz; nSrc < len(src) {
var dn, sn int
dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
nDst += dn
nSrc += sn
}
}
return nDst, nSrc, err
}
func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
buf := [8]byte{}
b := strconv.AppendUint(buf[:0], uint64(r), 10)
if n = len(b) + len("&#;"); n >= len(dst) {
return 0, false
}
dst[0] = '&'
dst[1] = '#'
dst[copy(dst[2:], b)+2] = ';'
return n, true
}
func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
if len(dst) == 0 {
return 0, false
}
dst[0] = err.Replacement()
return 1, true
}
// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
// input byte that is not valid UTF-8.
var UTF8Validator transform.Transformer = utf8Validator{}
type utf8Validator struct{ transform.NopResetter }
func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
if n > len(dst) {
n = len(dst)
}
for i := 0; i < n; {
if c := src[i]; c < utf8.RuneSelf {
dst[i] = c
i++
continue
}
_, size := utf8.DecodeRune(src[i:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
err = ErrInvalidUTF8
if !atEOF && !utf8.FullRune(src[i:]) {
err = transform.ErrShortSrc
}
return i, i, err
}
if i+size > len(dst) {
return i, i, transform.ErrShortDst
}
for ; size > 0; size-- {
dst[i] = src[i]
i++
}
}
if len(src) > len(dst) {
err = transform.ErrShortDst
}
return n, n, err
}

167
vendor/golang.org/x/text/encoding/htmlindex/gen.go generated vendored Normal file
View file

@ -0,0 +1,167 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"encoding/json"
"fmt"
"log"
"strings"
"golang.org/x/text/internal/gen"
)
type group struct {
Encodings []struct {
Labels []string
Name string
}
}
func main() {
gen.Init()
r := gen.Open("http://www.w3.org/TR", "w3", "encoding/indexes/encodings.json")
var groups []group
if err := json.NewDecoder(r).Decode(&groups); err != nil {
log.Fatalf("Error reading encodings.json: %v", err)
}
w := &bytes.Buffer{}
fmt.Fprintln(w, "type htmlEncoding byte")
fmt.Fprintln(w, "const (")
for i, g := range groups {
for _, e := range g.Encodings {
name := consts[e.Name]
if name == "" {
log.Fatalf("No const defined for %s.", e.Name)
}
if i == 0 {
fmt.Fprintf(w, "%s htmlEncoding = iota\n", name)
} else {
fmt.Fprintf(w, "%s\n", name)
}
}
}
fmt.Fprintln(w, "numEncodings")
fmt.Fprint(w, ")\n\n")
fmt.Fprintln(w, "var canonical = [numEncodings]string{")
for _, g := range groups {
for _, e := range g.Encodings {
fmt.Fprintf(w, "%q,\n", e.Name)
}
}
fmt.Fprint(w, "}\n\n")
fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{")
for _, g := range groups {
for _, e := range g.Encodings {
for _, l := range e.Labels {
fmt.Fprintf(w, "%q: %s,\n", l, consts[e.Name])
}
}
}
fmt.Fprint(w, "}\n\n")
var tags []string
fmt.Fprintln(w, "var localeMap = []htmlEncoding{")
for _, loc := range locales {
tags = append(tags, loc.tag)
fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag)
}
fmt.Fprint(w, "}\n\n")
fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " "))
gen.WriteGoFile("tables.go", "htmlindex", w.Bytes())
}
// consts maps canonical encoding name to internal constant.
var consts = map[string]string{
"utf-8": "utf8",
"ibm866": "ibm866",
"iso-8859-2": "iso8859_2",
"iso-8859-3": "iso8859_3",
"iso-8859-4": "iso8859_4",
"iso-8859-5": "iso8859_5",
"iso-8859-6": "iso8859_6",
"iso-8859-7": "iso8859_7",
"iso-8859-8": "iso8859_8",
"iso-8859-8-i": "iso8859_8I",
"iso-8859-10": "iso8859_10",
"iso-8859-13": "iso8859_13",
"iso-8859-14": "iso8859_14",
"iso-8859-15": "iso8859_15",
"iso-8859-16": "iso8859_16",
"koi8-r": "koi8r",
"koi8-u": "koi8u",
"macintosh": "macintosh",
"windows-874": "windows874",
"windows-1250": "windows1250",
"windows-1251": "windows1251",
"windows-1252": "windows1252",
"windows-1253": "windows1253",
"windows-1254": "windows1254",
"windows-1255": "windows1255",
"windows-1256": "windows1256",
"windows-1257": "windows1257",
"windows-1258": "windows1258",
"x-mac-cyrillic": "macintoshCyrillic",
"gbk": "gbk",
"gb18030": "gb18030",
// "hz-gb-2312": "hzgb2312", // Was removed from WhatWG
"big5": "big5",
"euc-jp": "eucjp",
"iso-2022-jp": "iso2022jp",
"shift_jis": "shiftJIS",
"euc-kr": "euckr",
"replacement": "replacement",
"utf-16be": "utf16be",
"utf-16le": "utf16le",
"x-user-defined": "xUserDefined",
}
// locales is taken from
// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
var locales = []struct{ tag, name string }{
{"und", "windows-1252"}, // The default value.
{"ar", "windows-1256"},
{"ba", "windows-1251"},
{"be", "windows-1251"},
{"bg", "windows-1251"},
{"cs", "windows-1250"},
{"el", "iso-8859-7"},
{"et", "windows-1257"},
{"fa", "windows-1256"},
{"he", "windows-1255"},
{"hr", "windows-1250"},
{"hu", "iso-8859-2"},
{"ja", "shift_jis"},
{"kk", "windows-1251"},
{"ko", "euc-kr"},
{"ku", "windows-1254"},
{"ky", "windows-1251"},
{"lt", "windows-1257"},
{"lv", "windows-1257"},
{"mk", "windows-1251"},
{"pl", "iso-8859-2"},
{"ru", "windows-1251"},
{"sah", "windows-1251"},
{"sk", "windows-1250"},
{"sl", "iso-8859-2"},
{"sr", "windows-1251"},
{"tg", "windows-1251"},
{"th", "windows-874"},
{"tr", "windows-1254"},
{"tt", "windows-1251"},
{"uk", "windows-1251"},
{"vi", "windows-1258"},
{"zh-hans", "gb18030"},
{"zh-hant", "big5"},
}

View file

@ -0,0 +1,86 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package htmlindex maps character set encoding names to Encodings as
// recommended by the W3C for use in HTML 5. See http://www.w3.org/TR/encoding.
package htmlindex
// TODO: perhaps have a "bare" version of the index (used by this package) that
// is not pre-loaded with all encodings. Global variables in encodings prevent
// the linker from being able to purge unneeded tables. This means that
// referencing all encodings, as this package does for the default index, links
// in all encodings unconditionally.
//
// This issue can be solved by either solving the linking issue (see
// https://github.com/golang/go/issues/6330) or refactoring the encoding tables
// (e.g. moving the tables to internal packages that do not use global
// variables).
// TODO: allow canonicalizing names
import (
"errors"
"strings"
"sync"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/language"
)
var (
errInvalidName = errors.New("htmlindex: invalid encoding name")
errUnknown = errors.New("htmlindex: unknown Encoding")
errUnsupported = errors.New("htmlindex: this encoding is not supported")
)
var (
matcherOnce sync.Once
matcher language.Matcher
)
// LanguageDefault returns the canonical name of the default encoding for a
// given language.
func LanguageDefault(tag language.Tag) string {
matcherOnce.Do(func() {
tags := []language.Tag{}
for _, t := range strings.Split(locales, " ") {
tags = append(tags, language.MustParse(t))
}
matcher = language.NewMatcher(tags)
})
_, i, _ := matcher.Match(tag)
return canonical[localeMap[i]] // Default is Windows-1252.
}
// Get returns an Encoding for one of the names listed in
// http://www.w3.org/TR/encoding using the Default Index. Matching is case-
// insensitive.
func Get(name string) (encoding.Encoding, error) {
x, ok := nameMap[strings.ToLower(strings.TrimSpace(name))]
if !ok {
return nil, errInvalidName
}
return encodings[x], nil
}
// Name reports the canonical name of the given Encoding. It will return
// an error if e is not associated with a supported encoding scheme.
func Name(e encoding.Encoding) (string, error) {
id, ok := e.(identifier.Interface)
if !ok {
return "", errUnknown
}
mib, _ := id.ID()
if mib == 0 {
return "", errUnknown
}
v, ok := mibMap[mib]
if !ok {
return "", errUnsupported
}
return canonical[v], nil
}

105
vendor/golang.org/x/text/encoding/htmlindex/map.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package htmlindex
import (
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/encoding/korean"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/encoding/traditionalchinese"
"golang.org/x/text/encoding/unicode"
)
// mibMap maps a MIB identifier to an htmlEncoding index.
var mibMap = map[identifier.MIB]htmlEncoding{
identifier.UTF8: utf8,
identifier.UTF16BE: utf16be,
identifier.UTF16LE: utf16le,
identifier.IBM866: ibm866,
identifier.ISOLatin2: iso8859_2,
identifier.ISOLatin3: iso8859_3,
identifier.ISOLatin4: iso8859_4,
identifier.ISOLatinCyrillic: iso8859_5,
identifier.ISOLatinArabic: iso8859_6,
identifier.ISOLatinGreek: iso8859_7,
identifier.ISOLatinHebrew: iso8859_8,
identifier.ISO88598I: iso8859_8I,
identifier.ISOLatin6: iso8859_10,
identifier.ISO885913: iso8859_13,
identifier.ISO885914: iso8859_14,
identifier.ISO885915: iso8859_15,
identifier.ISO885916: iso8859_16,
identifier.KOI8R: koi8r,
identifier.KOI8U: koi8u,
identifier.Macintosh: macintosh,
identifier.MacintoshCyrillic: macintoshCyrillic,
identifier.Windows874: windows874,
identifier.Windows1250: windows1250,
identifier.Windows1251: windows1251,
identifier.Windows1252: windows1252,
identifier.Windows1253: windows1253,
identifier.Windows1254: windows1254,
identifier.Windows1255: windows1255,
identifier.Windows1256: windows1256,
identifier.Windows1257: windows1257,
identifier.Windows1258: windows1258,
identifier.XUserDefined: xUserDefined,
identifier.GBK: gbk,
identifier.GB18030: gb18030,
identifier.Big5: big5,
identifier.EUCPkdFmtJapanese: eucjp,
identifier.ISO2022JP: iso2022jp,
identifier.ShiftJIS: shiftJIS,
identifier.EUCKR: euckr,
identifier.Replacement: replacement,
}
// encodings maps the internal htmlEncoding to an Encoding.
// TODO: consider using a reusable index in encoding/internal.
var encodings = [numEncodings]encoding.Encoding{
utf8: unicode.UTF8,
ibm866: charmap.CodePage866,
iso8859_2: charmap.ISO8859_2,
iso8859_3: charmap.ISO8859_3,
iso8859_4: charmap.ISO8859_4,
iso8859_5: charmap.ISO8859_5,
iso8859_6: charmap.ISO8859_6,
iso8859_7: charmap.ISO8859_7,
iso8859_8: charmap.ISO8859_8,
iso8859_8I: charmap.ISO8859_8I,
iso8859_10: charmap.ISO8859_10,
iso8859_13: charmap.ISO8859_13,
iso8859_14: charmap.ISO8859_14,
iso8859_15: charmap.ISO8859_15,
iso8859_16: charmap.ISO8859_16,
koi8r: charmap.KOI8R,
koi8u: charmap.KOI8U,
macintosh: charmap.Macintosh,
windows874: charmap.Windows874,
windows1250: charmap.Windows1250,
windows1251: charmap.Windows1251,
windows1252: charmap.Windows1252,
windows1253: charmap.Windows1253,
windows1254: charmap.Windows1254,
windows1255: charmap.Windows1255,
windows1256: charmap.Windows1256,
windows1257: charmap.Windows1257,
windows1258: charmap.Windows1258,
macintoshCyrillic: charmap.MacintoshCyrillic,
gbk: simplifiedchinese.GBK,
gb18030: simplifiedchinese.GB18030,
big5: traditionalchinese.Big5,
eucjp: japanese.EUCJP,
iso2022jp: japanese.ISO2022JP,
shiftJIS: japanese.ShiftJIS,
euckr: korean.EUCKR,
replacement: encoding.Replacement,
utf16be: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
utf16le: unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
xUserDefined: charmap.XUserDefined,
}

352
vendor/golang.org/x/text/encoding/htmlindex/tables.go generated vendored Normal file
View file

@ -0,0 +1,352 @@
// This file was generated by go generate; DO NOT EDIT
package htmlindex
type htmlEncoding byte
const (
utf8 htmlEncoding = iota
ibm866
iso8859_2
iso8859_3
iso8859_4
iso8859_5
iso8859_6
iso8859_7
iso8859_8
iso8859_8I
iso8859_10
iso8859_13
iso8859_14
iso8859_15
iso8859_16
koi8r
koi8u
macintosh
windows874
windows1250
windows1251
windows1252
windows1253
windows1254
windows1255
windows1256
windows1257
windows1258
macintoshCyrillic
gbk
gb18030
big5
eucjp
iso2022jp
shiftJIS
euckr
replacement
utf16be
utf16le
xUserDefined
numEncodings
)
var canonical = [numEncodings]string{
"utf-8",
"ibm866",
"iso-8859-2",
"iso-8859-3",
"iso-8859-4",
"iso-8859-5",
"iso-8859-6",
"iso-8859-7",
"iso-8859-8",
"iso-8859-8-i",
"iso-8859-10",
"iso-8859-13",
"iso-8859-14",
"iso-8859-15",
"iso-8859-16",
"koi8-r",
"koi8-u",
"macintosh",
"windows-874",
"windows-1250",
"windows-1251",
"windows-1252",
"windows-1253",
"windows-1254",
"windows-1255",
"windows-1256",
"windows-1257",
"windows-1258",
"x-mac-cyrillic",
"gbk",
"gb18030",
"big5",
"euc-jp",
"iso-2022-jp",
"shift_jis",
"euc-kr",
"replacement",
"utf-16be",
"utf-16le",
"x-user-defined",
}
var nameMap = map[string]htmlEncoding{
"unicode-1-1-utf-8": utf8,
"utf-8": utf8,
"utf8": utf8,
"866": ibm866,
"cp866": ibm866,
"csibm866": ibm866,
"ibm866": ibm866,
"csisolatin2": iso8859_2,
"iso-8859-2": iso8859_2,
"iso-ir-101": iso8859_2,
"iso8859-2": iso8859_2,
"iso88592": iso8859_2,
"iso_8859-2": iso8859_2,
"iso_8859-2:1987": iso8859_2,
"l2": iso8859_2,
"latin2": iso8859_2,
"csisolatin3": iso8859_3,
"iso-8859-3": iso8859_3,
"iso-ir-109": iso8859_3,
"iso8859-3": iso8859_3,
"iso88593": iso8859_3,
"iso_8859-3": iso8859_3,
"iso_8859-3:1988": iso8859_3,
"l3": iso8859_3,
"latin3": iso8859_3,
"csisolatin4": iso8859_4,
"iso-8859-4": iso8859_4,
"iso-ir-110": iso8859_4,
"iso8859-4": iso8859_4,
"iso88594": iso8859_4,
"iso_8859-4": iso8859_4,
"iso_8859-4:1988": iso8859_4,
"l4": iso8859_4,
"latin4": iso8859_4,
"csisolatincyrillic": iso8859_5,
"cyrillic": iso8859_5,
"iso-8859-5": iso8859_5,
"iso-ir-144": iso8859_5,
"iso8859-5": iso8859_5,
"iso88595": iso8859_5,
"iso_8859-5": iso8859_5,
"iso_8859-5:1988": iso8859_5,
"arabic": iso8859_6,
"asmo-708": iso8859_6,
"csiso88596e": iso8859_6,
"csiso88596i": iso8859_6,
"csisolatinarabic": iso8859_6,
"ecma-114": iso8859_6,
"iso-8859-6": iso8859_6,
"iso-8859-6-e": iso8859_6,
"iso-8859-6-i": iso8859_6,
"iso-ir-127": iso8859_6,
"iso8859-6": iso8859_6,
"iso88596": iso8859_6,
"iso_8859-6": iso8859_6,
"iso_8859-6:1987": iso8859_6,
"csisolatingreek": iso8859_7,
"ecma-118": iso8859_7,
"elot_928": iso8859_7,
"greek": iso8859_7,
"greek8": iso8859_7,
"iso-8859-7": iso8859_7,
"iso-ir-126": iso8859_7,
"iso8859-7": iso8859_7,
"iso88597": iso8859_7,
"iso_8859-7": iso8859_7,
"iso_8859-7:1987": iso8859_7,
"sun_eu_greek": iso8859_7,
"csiso88598e": iso8859_8,
"csisolatinhebrew": iso8859_8,
"hebrew": iso8859_8,
"iso-8859-8": iso8859_8,
"iso-8859-8-e": iso8859_8,
"iso-ir-138": iso8859_8,
"iso8859-8": iso8859_8,
"iso88598": iso8859_8,
"iso_8859-8": iso8859_8,
"iso_8859-8:1988": iso8859_8,
"visual": iso8859_8,
"csiso88598i": iso8859_8I,
"iso-8859-8-i": iso8859_8I,
"logical": iso8859_8I,
"csisolatin6": iso8859_10,
"iso-8859-10": iso8859_10,
"iso-ir-157": iso8859_10,
"iso8859-10": iso8859_10,
"iso885910": iso8859_10,
"l6": iso8859_10,
"latin6": iso8859_10,
"iso-8859-13": iso8859_13,
"iso8859-13": iso8859_13,
"iso885913": iso8859_13,
"iso-8859-14": iso8859_14,
"iso8859-14": iso8859_14,
"iso885914": iso8859_14,
"csisolatin9": iso8859_15,
"iso-8859-15": iso8859_15,
"iso8859-15": iso8859_15,
"iso885915": iso8859_15,
"iso_8859-15": iso8859_15,
"l9": iso8859_15,
"iso-8859-16": iso8859_16,
"cskoi8r": koi8r,
"koi": koi8r,
"koi8": koi8r,
"koi8-r": koi8r,
"koi8_r": koi8r,
"koi8-ru": koi8u,
"koi8-u": koi8u,
"csmacintosh": macintosh,
"mac": macintosh,
"macintosh": macintosh,
"x-mac-roman": macintosh,
"dos-874": windows874,
"iso-8859-11": windows874,
"iso8859-11": windows874,
"iso885911": windows874,
"tis-620": windows874,
"windows-874": windows874,
"cp1250": windows1250,
"windows-1250": windows1250,
"x-cp1250": windows1250,
"cp1251": windows1251,
"windows-1251": windows1251,
"x-cp1251": windows1251,
"ansi_x3.4-1968": windows1252,
"ascii": windows1252,
"cp1252": windows1252,
"cp819": windows1252,
"csisolatin1": windows1252,
"ibm819": windows1252,
"iso-8859-1": windows1252,
"iso-ir-100": windows1252,
"iso8859-1": windows1252,
"iso88591": windows1252,
"iso_8859-1": windows1252,
"iso_8859-1:1987": windows1252,
"l1": windows1252,
"latin1": windows1252,
"us-ascii": windows1252,
"windows-1252": windows1252,
"x-cp1252": windows1252,
"cp1253": windows1253,
"windows-1253": windows1253,
"x-cp1253": windows1253,
"cp1254": windows1254,
"csisolatin5": windows1254,
"iso-8859-9": windows1254,
"iso-ir-148": windows1254,
"iso8859-9": windows1254,
"iso88599": windows1254,
"iso_8859-9": windows1254,
"iso_8859-9:1989": windows1254,
"l5": windows1254,
"latin5": windows1254,
"windows-1254": windows1254,
"x-cp1254": windows1254,
"cp1255": windows1255,
"windows-1255": windows1255,
"x-cp1255": windows1255,
"cp1256": windows1256,
"windows-1256": windows1256,
"x-cp1256": windows1256,
"cp1257": windows1257,
"windows-1257": windows1257,
"x-cp1257": windows1257,
"cp1258": windows1258,
"windows-1258": windows1258,
"x-cp1258": windows1258,
"x-mac-cyrillic": macintoshCyrillic,
"x-mac-ukrainian": macintoshCyrillic,
"chinese": gbk,
"csgb2312": gbk,
"csiso58gb231280": gbk,
"gb2312": gbk,
"gb_2312": gbk,
"gb_2312-80": gbk,
"gbk": gbk,
"iso-ir-58": gbk,
"x-gbk": gbk,
"gb18030": gb18030,
"big5": big5,
"big5-hkscs": big5,
"cn-big5": big5,
"csbig5": big5,
"x-x-big5": big5,
"cseucpkdfmtjapanese": eucjp,
"euc-jp": eucjp,
"x-euc-jp": eucjp,
"csiso2022jp": iso2022jp,
"iso-2022-jp": iso2022jp,
"csshiftjis": shiftJIS,
"ms932": shiftJIS,
"ms_kanji": shiftJIS,
"shift-jis": shiftJIS,
"shift_jis": shiftJIS,
"sjis": shiftJIS,
"windows-31j": shiftJIS,
"x-sjis": shiftJIS,
"cseuckr": euckr,
"csksc56011987": euckr,
"euc-kr": euckr,
"iso-ir-149": euckr,
"korean": euckr,
"ks_c_5601-1987": euckr,
"ks_c_5601-1989": euckr,
"ksc5601": euckr,
"ksc_5601": euckr,
"windows-949": euckr,
"csiso2022kr": replacement,
"hz-gb-2312": replacement,
"iso-2022-cn": replacement,
"iso-2022-cn-ext": replacement,
"iso-2022-kr": replacement,
"utf-16be": utf16be,
"utf-16": utf16le,
"utf-16le": utf16le,
"x-user-defined": xUserDefined,
}
var localeMap = []htmlEncoding{
windows1252, // und
windows1256, // ar
windows1251, // ba
windows1251, // be
windows1251, // bg
windows1250, // cs
iso8859_7, // el
windows1257, // et
windows1256, // fa
windows1255, // he
windows1250, // hr
iso8859_2, // hu
shiftJIS, // ja
windows1251, // kk
euckr, // ko
windows1254, // ku
windows1251, // ky
windows1257, // lt
windows1257, // lv
windows1251, // mk
iso8859_2, // pl
windows1251, // ru
windows1251, // sah
windows1250, // sk
iso8859_2, // sl
windows1251, // sr
windows1251, // tg
windows874, // th
windows1254, // tr
windows1251, // tt
windows1251, // uk
windows1258, // vi
gb18030, // zh-hans
big5, // zh-hant
}
const locales = "und ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant"

View file

@ -0,0 +1,137 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"log"
"strings"
"golang.org/x/text/internal/gen"
)
type registry struct {
XMLName xml.Name `xml:"registry"`
Updated string `xml:"updated"`
Registry []struct {
ID string `xml:"id,attr"`
Record []struct {
Name string `xml:"name"`
Xref []struct {
Type string `xml:"type,attr"`
Data string `xml:"data,attr"`
} `xml:"xref"`
Desc struct {
Data string `xml:",innerxml"`
// Any []struct {
// Data string `xml:",chardata"`
// } `xml:",any"`
// Data string `xml:",chardata"`
} `xml:"description,"`
MIB string `xml:"value"`
Alias []string `xml:"alias"`
MIME string `xml:"preferred_alias"`
} `xml:"record"`
} `xml:"registry"`
}
func main() {
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
reg := &registry{}
if err := xml.NewDecoder(r).Decode(&reg); err != nil && err != io.EOF {
log.Fatalf("Error decoding charset registry: %v", err)
}
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
}
w := &bytes.Buffer{}
fmt.Fprintf(w, "const (\n")
for _, rec := range reg.Registry[0].Record {
constName := ""
for _, a := range rec.Alias {
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
// Some of the constant definitions have comments in them. Strip those.
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
}
}
if constName == "" {
switch rec.MIB {
case "2085":
constName = "HZGB2312" // Not listed as alias for some reason.
default:
log.Fatalf("No cs alias defined for %s.", rec.MIB)
}
}
if rec.MIME != "" {
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
}
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
if len(rec.Desc.Data) > 0 {
fmt.Fprint(w, "// ")
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
inElem := true
attr := ""
for {
t, err := d.Token()
if err != nil {
if err != io.EOF {
log.Fatal(err)
}
break
}
switch x := t.(type) {
case xml.CharData:
attr = "" // Don't need attribute info.
a := bytes.Split([]byte(x), []byte("\n"))
for i, b := range a {
if b = bytes.TrimSpace(b); len(b) != 0 {
if !inElem && i > 0 {
fmt.Fprint(w, "\n// ")
}
inElem = false
fmt.Fprintf(w, "%s ", string(b))
}
}
case xml.StartElement:
if x.Name.Local == "xref" {
inElem = true
use := false
for _, a := range x.Attr {
if a.Name.Local == "type" {
use = use || a.Value != "person"
}
if a.Name.Local == "data" && use {
attr = a.Value + " "
}
}
}
case xml.EndElement:
inElem = false
fmt.Fprint(w, attr)
}
}
fmt.Fprint(w, "\n")
}
for _, x := range rec.Xref {
switch x.Type {
case "rfc":
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
case "uri":
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
}
}
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
fmt.Fprintln(w)
}
fmt.Fprintln(w, ")")
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
}

View file

@ -0,0 +1,81 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package identifier defines the contract between implementations of Encoding
// and Index by defining identifiers that uniquely identify standardized coded
// character sets (CCS) and character encoding schemes (CES), which we will
// together refer to as encodings, for which Encoding implementations provide
// converters to and from UTF-8. This package is typically only of concern to
// implementers of Indexes and Encodings.
//
// One part of the identifier is the MIB code, which is defined by IANA and
// uniquely identifies a CCS or CES. Each code is associated with data that
// references authorities, official documentation as well as aliases and MIME
// names.
//
// Not all CESs are covered by the IANA registry. The "other" string that is
// returned by ID can be used to identify other character sets or versions of
// existing ones.
//
// It is recommended that each package that provides a set of Encodings provide
// the All and Common variables to reference all supported encodings and
// commonly used subset. This allows Index implementations to include all
// available encodings without explicitly referencing or knowing about them.
package identifier
// Note: this package is internal, but could be made public if there is a need
// for writing third-party Indexes and Encodings.
// References:
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
// - http://www.ietf.org/rfc/rfc2978.txt
// - http://www.unicode.org/reports/tr22/
// - http://www.w3.org/TR/encoding/
// - http://www.w3.org/TR/encoding/indexes/encodings.json
// - https://encoding.spec.whatwg.org/
// - https://tools.ietf.org/html/rfc6657#section-5
// Interface can be implemented by Encodings to define the CCS or CES for which
// it implements conversions.
type Interface interface {
// ID returns an encoding identifier. Exactly one of the mib and other
// values should be non-zero.
//
// In the usual case it is only necessary to indicate the MIB code. The
// other string can be used to specify encodings for which there is no MIB,
// such as "x-mac-dingbat".
//
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
ID() (mib MIB, other string)
// NOTE: the restrictions on the encoding are to allow extending the syntax
// with additional information such as versions, vendors and other variants.
}
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
// some identifiers for some encodings that are not covered by the IANA
// standard.
//
// See http://www.iana.org/assignments/ianacharset-mib.
type MIB uint16
// These additional MIB types are not defined in IANA. They are added because
// they are common and defined within the text repo.
const (
// Unofficial marks the start of encodings not registered by IANA.
Unofficial MIB = 10000 + iota
// Replacement is the WhatWG replacement encoding.
Replacement
// XUserDefined is the code for x-user-defined.
XUserDefined
// MacintoshCyrillic is the code for x-mac-cyrillic.
MacintoshCyrillic
)

File diff suppressed because it is too large Load diff

75
vendor/golang.org/x/text/encoding/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains code that is shared among encoding implementations.
package internal
import (
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// Encoding is an implementation of the Encoding interface that adds the String
// and ID methods to an existing encoding.
type Encoding struct {
encoding.Encoding
Name string
MIB identifier.MIB
}
// _ verifies that Encoding implements identifier.Interface.
var _ identifier.Interface = (*Encoding)(nil)
func (e *Encoding) String() string {
return e.Name
}
func (e *Encoding) ID() (mib identifier.MIB, other string) {
return e.MIB, ""
}
// SimpleEncoding is an Encoding that combines two Transformers.
type SimpleEncoding struct {
Decoder transform.Transformer
Encoder transform.Transformer
}
func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: e.Decoder}
}
func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: e.Encoder}
}
// FuncEncoding is an Encoding that combines two functions returning a new
// Transformer.
type FuncEncoding struct {
Decoder func() transform.Transformer
Encoder func() transform.Transformer
}
func (e FuncEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: e.Decoder()}
}
func (e FuncEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: e.Encoder()}
}
// A RepertoireError indicates a rune is not in the repertoire of a destination
// encoding. It is associated with an encoding-specific suggested replacement
// byte.
type RepertoireError byte
// Error implements the error interrface.
func (r RepertoireError) Error() string {
return "encoding: rune not supported by encoding."
}
// Replacement returns the replacement string associated with this error.
func (r RepertoireError) Replacement() byte { return byte(r) }
var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)

12
vendor/golang.org/x/text/encoding/japanese/all.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package japanese
import (
"golang.org/x/text/encoding"
)
// All is a list of all defined encodings in this package.
var All = []encoding.Encoding{EUCJP, ISO2022JP, ShiftJIS}

211
vendor/golang.org/x/text/encoding/japanese/eucjp.go generated vendored Normal file
View file

@ -0,0 +1,211 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package japanese
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// EUCJP is the EUC-JP encoding.
var EUCJP encoding.Encoding = &eucJP
var eucJP = internal.Encoding{
&internal.SimpleEncoding{eucJPDecoder{}, eucJPEncoder{}},
"EUC-JP",
identifier.EUCPkdFmtJapanese,
}
var errInvalidEUCJP = errors.New("japanese: invalid EUC-JP encoding")
type eucJPDecoder struct{ transform.NopResetter }
func (eucJPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
switch c0 := src[nSrc]; {
case c0 < utf8.RuneSelf:
r, size = rune(c0), 1
case c0 == 0x8e:
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
if c1 < 0xa1 || 0xdf < c1 {
err = errInvalidEUCJP
break loop
}
r, size = rune(c1)+(0xff61-0xa1), 2
case c0 == 0x8f:
if nSrc+2 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
if c1 < 0xa1 || 0xfe < c1 {
err = errInvalidEUCJP
break loop
}
c2 := src[nSrc+2]
if c2 < 0xa1 || 0xfe < c2 {
err = errInvalidEUCJP
break loop
}
r, size = '\ufffd', 3
if i := int(c1-0xa1)*94 + int(c2-0xa1); i < len(jis0212Decode) {
r = rune(jis0212Decode[i])
if r == 0 {
r = '\ufffd'
}
}
case 0xa1 <= c0 && c0 <= 0xfe:
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
if c1 < 0xa1 || 0xfe < c1 {
err = errInvalidEUCJP
break loop
}
r, size = '\ufffd', 2
if i := int(c0-0xa1)*94 + int(c1-0xa1); i < len(jis0208Decode) {
r = rune(jis0208Decode[i])
if r == 0 {
r = '\ufffd'
}
}
default:
err = errInvalidEUCJP
break loop
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
if atEOF && err == transform.ErrShortSrc {
err = errInvalidEUCJP
}
return nDst, nSrc, err
}
type eucJPEncoder struct{ transform.NopResetter }
func (eucJPEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
// func init checks that the switch covers all tables.
switch {
case encode0Low <= r && r < encode0High:
if r = rune(encode0[r-encode0Low]); r != 0 {
goto write2or3
}
case encode1Low <= r && r < encode1High:
if r = rune(encode1[r-encode1Low]); r != 0 {
goto write2or3
}
case encode2Low <= r && r < encode2High:
if r = rune(encode2[r-encode2Low]); r != 0 {
goto write2or3
}
case encode3Low <= r && r < encode3High:
if r = rune(encode3[r-encode3Low]); r != 0 {
goto write2or3
}
case encode4Low <= r && r < encode4High:
if r = rune(encode4[r-encode4Low]); r != 0 {
goto write2or3
}
case encode5Low <= r && r < encode5High:
if 0xff61 <= r && r < 0xffa0 {
goto write2
}
if r = rune(encode5[r-encode5Low]); r != 0 {
goto write2or3
}
}
err = internal.ErrASCIIReplacement
break
}
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst++
continue
write2or3:
if r>>tableShift == jis0208 {
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
} else {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = 0x8f
nDst++
}
dst[nDst+0] = 0xa1 + uint8(r>>codeShift)&codeMask
dst[nDst+1] = 0xa1 + uint8(r)&codeMask
nDst += 2
continue
write2:
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = 0x8e
dst[nDst+1] = uint8(r - (0xff61 - 0xa1))
nDst += 2
continue
}
return nDst, nSrc, err
}
func init() {
// Check that the hard-coded encode switch covers all tables.
if numEncodeTables != 6 {
panic("bad numEncodeTables")
}
}

296
vendor/golang.org/x/text/encoding/japanese/iso2022jp.go generated vendored Normal file
View file

@ -0,0 +1,296 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package japanese
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// ISO2022JP is the ISO-2022-JP encoding.
var ISO2022JP encoding.Encoding = &iso2022JP
var iso2022JP = internal.Encoding{
internal.FuncEncoding{iso2022JPNewDecoder, iso2022JPNewEncoder},
"ISO-2022-JP",
identifier.ISO2022JP,
}
func iso2022JPNewDecoder() transform.Transformer {
return new(iso2022JPDecoder)
}
func iso2022JPNewEncoder() transform.Transformer {
return new(iso2022JPEncoder)
}
var errInvalidISO2022JP = errors.New("japanese: invalid ISO-2022-JP encoding")
const (
asciiState = iota
katakanaState
jis0208State
jis0212State
)
const asciiEsc = 0x1b
type iso2022JPDecoder int
func (d *iso2022JPDecoder) Reset() {
*d = asciiState
}
func (d *iso2022JPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
c0 := src[nSrc]
if c0 >= utf8.RuneSelf {
err = errInvalidISO2022JP
break loop
}
if c0 == asciiEsc {
if nSrc+2 >= len(src) {
err = transform.ErrShortSrc
break loop
}
size = 3
c1 := src[nSrc+1]
c2 := src[nSrc+2]
switch {
case c1 == '$' && (c2 == '@' || c2 == 'B'):
*d = jis0208State
continue
case c1 == '$' && c2 == '(':
if nSrc+3 >= len(src) {
err = transform.ErrShortSrc
break loop
}
size = 4
if src[nSrc]+3 == 'D' {
*d = jis0212State
continue
}
case c1 == '(' && (c2 == 'B' || c2 == 'J'):
*d = asciiState
continue
case c1 == '(' && c2 == 'I':
*d = katakanaState
continue
}
err = errInvalidISO2022JP
break loop
}
switch *d {
case asciiState:
r, size = rune(c0), 1
case katakanaState:
if c0 < 0x21 || 0x60 <= c0 {
err = errInvalidISO2022JP
break loop
}
r, size = rune(c0)+(0xff61-0x21), 1
default:
if c0 == 0x0a {
*d = asciiState
r, size = rune(c0), 1
break
}
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
size = 2
c1 := src[nSrc+1]
i := int(c0-0x21)*94 + int(c1-0x21)
if *d == jis0208State && i < len(jis0208Decode) {
r = rune(jis0208Decode[i])
} else if *d == jis0212State && i < len(jis0212Decode) {
r = rune(jis0212Decode[i])
} else {
r = '\ufffd'
break
}
if r == 0 {
r = '\ufffd'
}
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
if atEOF && err == transform.ErrShortSrc {
err = errInvalidISO2022JP
}
return nDst, nSrc, err
}
type iso2022JPEncoder int
func (e *iso2022JPEncoder) Reset() {
*e = asciiState
}
func (e *iso2022JPEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
// func init checks that the switch covers all tables.
//
// http://encoding.spec.whatwg.org/#iso-2022-jp says that "the index jis0212
// is not used by the iso-2022-jp encoder due to lack of widespread support".
//
// TODO: do we have to special-case U+00A5 and U+203E, as per
// http://encoding.spec.whatwg.org/#iso-2022-jp
// Doing so would mean that "\u00a5" would not be preserved
// after an encode-decode round trip.
switch {
case encode0Low <= r && r < encode0High:
if r = rune(encode0[r-encode0Low]); r>>tableShift == jis0208 {
goto writeJIS
}
case encode1Low <= r && r < encode1High:
if r = rune(encode1[r-encode1Low]); r>>tableShift == jis0208 {
goto writeJIS
}
case encode2Low <= r && r < encode2High:
if r = rune(encode2[r-encode2Low]); r>>tableShift == jis0208 {
goto writeJIS
}
case encode3Low <= r && r < encode3High:
if r = rune(encode3[r-encode3Low]); r>>tableShift == jis0208 {
goto writeJIS
}
case encode4Low <= r && r < encode4High:
if r = rune(encode4[r-encode4Low]); r>>tableShift == jis0208 {
goto writeJIS
}
case encode5Low <= r && r < encode5High:
if 0xff61 <= r && r < 0xffa0 {
goto writeKatakana
}
if r = rune(encode5[r-encode5Low]); r>>tableShift == jis0208 {
goto writeJIS
}
}
// Switch back to ASCII state in case of error so that an ASCII
// replacement character can be written in the correct state.
if *e != asciiState {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
*e = asciiState
dst[nDst+0] = asciiEsc
dst[nDst+1] = '('
dst[nDst+2] = 'B'
nDst += 3
}
err = internal.ErrASCIIReplacement
break
}
if *e != asciiState {
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
*e = asciiState
dst[nDst+0] = asciiEsc
dst[nDst+1] = '('
dst[nDst+2] = 'B'
nDst += 3
} else if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst++
continue
writeJIS:
if *e != jis0208State {
if nDst+5 > len(dst) {
err = transform.ErrShortDst
break
}
*e = jis0208State
dst[nDst+0] = asciiEsc
dst[nDst+1] = '$'
dst[nDst+2] = 'B'
nDst += 3
} else if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = 0x21 + uint8(r>>codeShift)&codeMask
dst[nDst+1] = 0x21 + uint8(r)&codeMask
nDst += 2
continue
writeKatakana:
if *e != katakanaState {
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
*e = katakanaState
dst[nDst+0] = asciiEsc
dst[nDst+1] = '('
dst[nDst+2] = 'I'
nDst += 3
} else if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r - (0xff61 - 0x21))
nDst++
continue
}
if atEOF && err == nil && *e != asciiState {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
} else {
*e = asciiState
dst[nDst+0] = asciiEsc
dst[nDst+1] = '('
dst[nDst+2] = 'B'
nDst += 3
}
}
return nDst, nSrc, err
}

View file

@ -0,0 +1,161 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This program generates tables.go:
// go run maketables.go | gofmt > tables.go
// TODO: Emoji extensions?
// http://www.unicode.org/faq/emoji_dingbats.html
// http://www.unicode.org/Public/UNIDATA/EmojiSources.txt
import (
"bufio"
"fmt"
"log"
"net/http"
"sort"
"strings"
)
type entry struct {
jisCode, table int
}
func main() {
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n")
fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n")
reverse := [65536]entry{}
for i := range reverse {
reverse[i].table = -1
}
tables := []struct {
url string
name string
}{
{"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"},
{"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"},
}
for i, table := range tables {
res, err := http.Get(table.url)
if err != nil {
log.Fatalf("%q: Get: %v", table.url, err)
}
defer res.Body.Close()
mapping := [65536]uint16{}
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := 0, uint16(0)
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
log.Fatalf("%q: could not parse %q", table.url, s)
}
if x < 0 || 120*94 <= x {
log.Fatalf("%q: JIS code %d is out of range", table.url, x)
}
mapping[x] = y
if reverse[y].table == -1 {
reverse[y] = entry{jisCode: x, table: i}
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("%q: scanner error: %v", table.url, err)
}
fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n",
table.name, table.name, table.url)
fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name)
for i, m := range mapping {
if m != 0 {
fmt.Printf("\t%d: 0x%04X,\n", i, m)
}
}
fmt.Printf("}\n\n")
}
// Any run of at least separation continuous zero entries in the reverse map will
// be a separate encode table.
const separation = 1024
intervals := []interval(nil)
low, high := -1, -1
for i, v := range reverse {
if v.table == -1 {
continue
}
if low < 0 {
low = i
} else if i-high >= separation {
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
low = i
}
high = i + 1
}
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
sort.Sort(byDecreasingLength(intervals))
fmt.Printf("const (\n")
fmt.Printf("\tjis0208 = 1\n")
fmt.Printf("\tjis0212 = 2\n")
fmt.Printf("\tcodeMask = 0x7f\n")
fmt.Printf("\tcodeShift = 7\n")
fmt.Printf("\ttableShift = 14\n")
fmt.Printf(")\n\n")
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n")
fmt.Printf("// sorted by decreasing length.\n")
for i, v := range intervals {
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
}
fmt.Printf("//\n")
fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n")
fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n")
fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n")
fmt.Printf("// JIS code (94*j1 + j2) within that table.\n")
fmt.Printf("\n")
for i, v := range intervals {
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
fmt.Printf("var encode%d = [...]uint16{\n", i)
for j := v.low; j < v.high; j++ {
x := reverse[j]
if x.table == -1 {
continue
}
fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n",
j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94)
}
fmt.Printf("}\n\n")
}
}
// interval is a half-open interval [low, high).
type interval struct {
low, high int
}
func (i interval) len() int { return i.high - i.low }
// byDecreasingLength sorts intervals by decreasing length.
type byDecreasingLength []interval
func (b byDecreasingLength) Len() int { return len(b) }
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }

189
vendor/golang.org/x/text/encoding/japanese/shiftjis.go generated vendored Normal file
View file

@ -0,0 +1,189 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package japanese
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// ShiftJIS is the Shift JIS encoding, also known as Code Page 932 and
// Windows-31J.
var ShiftJIS encoding.Encoding = &shiftJIS
var shiftJIS = internal.Encoding{
&internal.SimpleEncoding{shiftJISDecoder{}, shiftJISEncoder{}},
"Shift JIS",
identifier.ShiftJIS,
}
var errInvalidShiftJIS = errors.New("japanese: invalid Shift JIS encoding")
type shiftJISDecoder struct{ transform.NopResetter }
func (shiftJISDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
switch c0 := src[nSrc]; {
case c0 < utf8.RuneSelf:
r, size = rune(c0), 1
case 0xa1 <= c0 && c0 < 0xe0:
r, size = rune(c0)+(0xff61-0xa1), 1
case (0x81 <= c0 && c0 < 0xa0) || (0xe0 <= c0 && c0 < 0xfd):
if c0 <= 0x9f {
c0 -= 0x70
} else {
c0 -= 0xb0
}
c0 = 2*c0 - 0x21
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
switch {
case c1 < 0x40:
err = errInvalidShiftJIS
break loop
case c1 < 0x7f:
c0--
c1 -= 0x40
case c1 == 0x7f:
err = errInvalidShiftJIS
break loop
case c1 < 0x9f:
c0--
c1 -= 0x41
case c1 < 0xfd:
c1 -= 0x9f
default:
err = errInvalidShiftJIS
break loop
}
r, size = '\ufffd', 2
if i := int(c0)*94 + int(c1); i < len(jis0208Decode) {
r = rune(jis0208Decode[i])
if r == 0 {
r = '\ufffd'
}
}
default:
err = errInvalidShiftJIS
break loop
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
if atEOF && err == transform.ErrShortSrc {
err = errInvalidShiftJIS
}
return nDst, nSrc, err
}
type shiftJISEncoder struct{ transform.NopResetter }
func (shiftJISEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break loop
}
}
// func init checks that the switch covers all tables.
switch {
case encode0Low <= r && r < encode0High:
if r = rune(encode0[r-encode0Low]); r>>tableShift == jis0208 {
goto write2
}
case encode1Low <= r && r < encode1High:
if r = rune(encode1[r-encode1Low]); r>>tableShift == jis0208 {
goto write2
}
case encode2Low <= r && r < encode2High:
if r = rune(encode2[r-encode2Low]); r>>tableShift == jis0208 {
goto write2
}
case encode3Low <= r && r < encode3High:
if r = rune(encode3[r-encode3Low]); r>>tableShift == jis0208 {
goto write2
}
case encode4Low <= r && r < encode4High:
if r = rune(encode4[r-encode4Low]); r>>tableShift == jis0208 {
goto write2
}
case encode5Low <= r && r < encode5High:
if 0xff61 <= r && r < 0xffa0 {
r -= 0xff61 - 0xa1
goto write1
}
if r = rune(encode5[r-encode5Low]); r>>tableShift == jis0208 {
goto write2
}
}
err = internal.ErrASCIIReplacement
break
}
write1:
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst++
continue
write2:
j1 := uint8(r>>codeShift) & codeMask
j2 := uint8(r) & codeMask
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break loop
}
if j1 <= 61 {
dst[nDst+0] = 129 + j1/2
} else {
dst[nDst+0] = 193 + j1/2
}
if j1&1 == 0 {
dst[nDst+1] = j2 + j2/63 + 64
} else {
dst[nDst+1] = j2 + 159
}
nDst += 2
continue
}
return nDst, nSrc, err
}

26971
vendor/golang.org/x/text/encoding/japanese/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

178
vendor/golang.org/x/text/encoding/korean/euckr.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package korean
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// All is a list of all defined encodings in this package.
var All = []encoding.Encoding{EUCKR}
// EUCKR is the EUC-KR encoding, also known as Code Page 949.
var EUCKR encoding.Encoding = &eucKR
var eucKR = internal.Encoding{
&internal.SimpleEncoding{eucKRDecoder{}, eucKREncoder{}},
"EUC-KR",
identifier.EUCKR,
}
var errInvalidEUCKR = errors.New("korean: invalid EUC-KR encoding")
type eucKRDecoder struct{ transform.NopResetter }
func (eucKRDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
switch c0 := src[nSrc]; {
case c0 < utf8.RuneSelf:
r, size = rune(c0), 1
case 0x81 <= c0 && c0 < 0xff:
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
if c0 < 0xc7 {
r = 178 * rune(c0-0x81)
switch {
case 0x41 <= c1 && c1 < 0x5b:
r += rune(c1) - (0x41 - 0*26)
case 0x61 <= c1 && c1 < 0x7b:
r += rune(c1) - (0x61 - 1*26)
case 0x81 <= c1 && c1 < 0xff:
r += rune(c1) - (0x81 - 2*26)
default:
err = errInvalidEUCKR
break loop
}
} else if 0xa1 <= c1 && c1 < 0xff {
r = 178*(0xc7-0x81) + rune(c0-0xc7)*94 + rune(c1-0xa1)
} else {
err = errInvalidEUCKR
break loop
}
if int(r) < len(decode) {
r = rune(decode[r])
if r == 0 {
r = '\ufffd'
}
} else {
r = '\ufffd'
}
size = 2
default:
err = errInvalidEUCKR
break loop
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
if atEOF && err == transform.ErrShortSrc {
err = errInvalidEUCKR
}
return nDst, nSrc, err
}
type eucKREncoder struct{ transform.NopResetter }
func (eucKREncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst++
continue
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
// func init checks that the switch covers all tables.
switch {
case encode0Low <= r && r < encode0High:
if r = rune(encode0[r-encode0Low]); r != 0 {
goto write2
}
case encode1Low <= r && r < encode1High:
if r = rune(encode1[r-encode1Low]); r != 0 {
goto write2
}
case encode2Low <= r && r < encode2High:
if r = rune(encode2[r-encode2Low]); r != 0 {
goto write2
}
case encode3Low <= r && r < encode3High:
if r = rune(encode3[r-encode3Low]); r != 0 {
goto write2
}
case encode4Low <= r && r < encode4High:
if r = rune(encode4[r-encode4Low]); r != 0 {
goto write2
}
case encode5Low <= r && r < encode5High:
if r = rune(encode5[r-encode5Low]); r != 0 {
goto write2
}
case encode6Low <= r && r < encode6High:
if r = rune(encode6[r-encode6Low]); r != 0 {
goto write2
}
}
err = internal.ErrASCIIReplacement
break
}
write2:
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r >> 8)
dst[nDst+1] = uint8(r)
nDst += 2
continue
}
return nDst, nSrc, err
}
func init() {
// Check that the hard-coded encode switch covers all tables.
if numEncodeTables != 7 {
panic("bad numEncodeTables")
}
}

143
vendor/golang.org/x/text/encoding/korean/maketables.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This program generates tables.go:
// go run maketables.go | gofmt > tables.go
import (
"bufio"
"fmt"
"log"
"net/http"
"sort"
"strings"
)
func main() {
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n")
fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n")
res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt")
if err != nil {
log.Fatalf("Get: %v", err)
}
defer res.Body.Close()
mapping := [65536]uint16{}
reverse := [65536]uint16{}
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := uint16(0), uint16(0)
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
log.Fatalf("could not parse %q", s)
}
if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x {
log.Fatalf("EUC-KR code %d is out of range", x)
}
mapping[x] = y
if reverse[y] == 0 {
c0, c1 := uint16(0), uint16(0)
if x < 178*(0xc7-0x81) {
c0 = uint16(x/178) + 0x81
c1 = uint16(x % 178)
switch {
case c1 < 1*26:
c1 += 0x41
case c1 < 2*26:
c1 += 0x47
default:
c1 += 0x4d
}
} else {
x -= 178 * (0xc7 - 0x81)
c0 = uint16(x/94) + 0xc7
c1 = uint16(x%94) + 0xa1
}
reverse[y] = c0<<8 | c1
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("scanner error: %v", err)
}
fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n")
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n")
fmt.Printf("var decode = [...]uint16{\n")
for i, v := range mapping {
if v != 0 {
fmt.Printf("\t%d: 0x%04X,\n", i, v)
}
}
fmt.Printf("}\n\n")
// Any run of at least separation continuous zero entries in the reverse map will
// be a separate encode table.
const separation = 1024
intervals := []interval(nil)
low, high := -1, -1
for i, v := range reverse {
if v == 0 {
continue
}
if low < 0 {
low = i
} else if i-high >= separation {
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
low = i
}
high = i + 1
}
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
sort.Sort(byDecreasingLength(intervals))
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n")
fmt.Printf("// sorted by decreasing length.\n")
for i, v := range intervals {
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
}
fmt.Printf("\n")
for i, v := range intervals {
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
fmt.Printf("var encode%d = [...]uint16{\n", i)
for j := v.low; j < v.high; j++ {
x := reverse[j]
if x == 0 {
continue
}
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
}
fmt.Printf("}\n\n")
}
}
// interval is a half-open interval [low, high).
type interval struct {
low, high int
}
func (i interval) len() int { return i.high - i.low }
// byDecreasingLength sorts intervals by decreasing length.
type byDecreasingLength []interval
func (b byDecreasingLength) Len() int { return len(b) }
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }

34152
vendor/golang.org/x/text/encoding/korean/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,12 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package simplifiedchinese
import (
"golang.org/x/text/encoding"
)
// All is a list of all defined encodings in this package.
var All = []encoding.Encoding{GB18030, GBK, HZGB2312}

View file

@ -0,0 +1,281 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package simplifiedchinese
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
var (
// GB18030 is the GB18030 encoding.
GB18030 encoding.Encoding = &gbk18030
// GBK is the GBK encoding. It encodes an extension of the GB2312 character set
// and is also known as Code Page 936.
GBK encoding.Encoding = &gbk
)
var gbk = internal.Encoding{
&internal.SimpleEncoding{
gbkDecoder{gb18030: false},
gbkEncoder{gb18030: false},
},
"GBK",
identifier.GBK,
}
var gbk18030 = internal.Encoding{
&internal.SimpleEncoding{
gbkDecoder{gb18030: true},
gbkEncoder{gb18030: true},
},
"GB18030",
identifier.GB18030,
}
var (
errInvalidGB18030 = errors.New("simplifiedchinese: invalid GB18030 encoding")
errInvalidGBK = errors.New("simplifiedchinese: invalid GBK encoding")
)
type gbkDecoder struct {
transform.NopResetter
gb18030 bool
}
func (d gbkDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
switch c0 := src[nSrc]; {
case c0 < utf8.RuneSelf:
r, size = rune(c0), 1
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
// says to treat "gbk" as Code Page 936.
case c0 == 0x80:
r, size = '€', 1
case c0 < 0xff:
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
switch {
case 0x40 <= c1 && c1 < 0x7f:
c1 -= 0x40
case 0x80 <= c1 && c1 < 0xff:
c1 -= 0x41
case d.gb18030 && 0x30 <= c1 && c1 < 0x40:
if nSrc+3 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c2 := src[nSrc+2]
if c2 < 0x81 || 0xff <= c2 {
err = errInvalidGB18030
break loop
}
c3 := src[nSrc+3]
if c3 < 0x30 || 0x3a <= c3 {
err = errInvalidGB18030
break loop
}
size = 4
r = ((rune(c0-0x81)*10+rune(c1-0x30))*126+rune(c2-0x81))*10 + rune(c3-0x30)
if r < 39420 {
i, j := 0, len(gb18030)
for i < j {
h := i + (j-i)/2
if r >= rune(gb18030[h][0]) {
i = h + 1
} else {
j = h
}
}
dec := &gb18030[i-1]
r += rune(dec[1]) - rune(dec[0])
goto write
}
r -= 189000
if 0 <= r && r < 0x100000 {
r += 0x10000
goto write
}
err = errInvalidGB18030
break loop
default:
if d.gb18030 {
err = errInvalidGB18030
} else {
err = errInvalidGBK
}
break loop
}
r, size = '\ufffd', 2
if i := int(c0-0x81)*190 + int(c1); i < len(decode) {
r = rune(decode[i])
if r == 0 {
r = '\ufffd'
}
}
default:
if d.gb18030 {
err = errInvalidGB18030
} else {
err = errInvalidGBK
}
break loop
}
write:
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
if atEOF && err == transform.ErrShortSrc {
if d.gb18030 {
err = errInvalidGB18030
} else {
err = errInvalidGBK
}
}
return nDst, nSrc, err
}
type gbkEncoder struct {
transform.NopResetter
gb18030 bool
}
func (e gbkEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, r2, size := rune(0), rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
// func init checks that the switch covers all tables.
switch {
case encode0Low <= r && r < encode0High:
if r2 = rune(encode0[r-encode0Low]); r2 != 0 {
goto write2
}
case encode1Low <= r && r < encode1High:
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
// says to treat "gbk" as Code Page 936.
if r == '€' {
r = 0x80
goto write1
}
if r2 = rune(encode1[r-encode1Low]); r2 != 0 {
goto write2
}
case encode2Low <= r && r < encode2High:
if r2 = rune(encode2[r-encode2Low]); r2 != 0 {
goto write2
}
case encode3Low <= r && r < encode3High:
if r2 = rune(encode3[r-encode3Low]); r2 != 0 {
goto write2
}
case encode4Low <= r && r < encode4High:
if r2 = rune(encode4[r-encode4Low]); r2 != 0 {
goto write2
}
}
if e.gb18030 {
if r < 0x10000 {
i, j := 0, len(gb18030)
for i < j {
h := i + (j-i)/2
if r >= rune(gb18030[h][1]) {
i = h + 1
} else {
j = h
}
}
dec := &gb18030[i-1]
r += rune(dec[0]) - rune(dec[1])
goto write4
} else if r < 0x110000 {
r += 189000 - 0x10000
goto write4
}
}
err = internal.ErrASCIIReplacement
break
}
write1:
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst++
continue
write2:
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r2 >> 8)
dst[nDst+1] = uint8(r2)
nDst += 2
continue
write4:
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+3] = uint8(r%10 + 0x30)
r /= 10
dst[nDst+2] = uint8(r%126 + 0x81)
r /= 126
dst[nDst+1] = uint8(r%10 + 0x30)
r /= 10
dst[nDst+0] = uint8(r + 0x81)
nDst += 4
continue
}
return nDst, nSrc, err
}
func init() {
// Check that the hard-coded encode switch covers all tables.
if numEncodeTables != 5 {
panic("bad numEncodeTables")
}
}

View file

@ -0,0 +1,240 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package simplifiedchinese
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// HZGB2312 is the HZ-GB2312 encoding.
var HZGB2312 encoding.Encoding = &hzGB2312
var hzGB2312 = internal.Encoding{
internal.FuncEncoding{hzGB2312NewDecoder, hzGB2312NewEncoder},
"HZ-GB2312",
identifier.HZGB2312,
}
func hzGB2312NewDecoder() transform.Transformer {
return new(hzGB2312Decoder)
}
func hzGB2312NewEncoder() transform.Transformer {
return new(hzGB2312Encoder)
}
var errInvalidHZGB2312 = errors.New("simplifiedchinese: invalid HZ-GB2312 encoding")
const (
asciiState = iota
gbState
)
type hzGB2312Decoder int
func (d *hzGB2312Decoder) Reset() {
*d = asciiState
}
func (d *hzGB2312Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
loop:
for ; nSrc < len(src); nSrc += size {
c0 := src[nSrc]
if c0 >= utf8.RuneSelf {
err = errInvalidHZGB2312
break loop
}
if c0 == '~' {
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
size = 2
switch src[nSrc+1] {
case '{':
*d = gbState
continue
case '}':
*d = asciiState
continue
case '~':
if nDst >= len(dst) {
err = transform.ErrShortDst
break loop
}
dst[nDst] = '~'
nDst++
continue
case '\n':
continue
default:
err = errInvalidHZGB2312
break loop
}
}
if *d == asciiState {
r, size = rune(c0), 1
} else {
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
err = errInvalidHZGB2312
break loop
}
r, size = '\ufffd', 2
if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) {
r = rune(decode[i])
if r == 0 {
r = '\ufffd'
}
}
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
if atEOF && err == transform.ErrShortSrc {
err = errInvalidHZGB2312
}
return nDst, nSrc, err
}
type hzGB2312Encoder int
func (d *hzGB2312Encoder) Reset() {
*d = asciiState
}
func (e *hzGB2312Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
if r == '~' {
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = '~'
dst[nDst+1] = '~'
nDst += 2
continue
} else if *e != asciiState {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
*e = asciiState
dst[nDst+0] = '~'
dst[nDst+1] = '}'
nDst += 2
} else if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst += 1
continue
}
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
// func init checks that the switch covers all tables.
switch {
case encode0Low <= r && r < encode0High:
if r = rune(encode0[r-encode0Low]); r != 0 {
goto writeGB
}
case encode1Low <= r && r < encode1High:
if r = rune(encode1[r-encode1Low]); r != 0 {
goto writeGB
}
case encode2Low <= r && r < encode2High:
if r = rune(encode2[r-encode2Low]); r != 0 {
goto writeGB
}
case encode3Low <= r && r < encode3High:
if r = rune(encode3[r-encode3Low]); r != 0 {
goto writeGB
}
case encode4Low <= r && r < encode4High:
if r = rune(encode4[r-encode4Low]); r != 0 {
goto writeGB
}
}
terminateInASCIIState:
// Switch back to ASCII state in case of error so that an ASCII
// replacement character can be written in the correct state.
if *e != asciiState {
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = '~'
dst[nDst+1] = '}'
nDst += 2
}
err = internal.ErrASCIIReplacement
break
writeGB:
c0 := uint8(r>>8) - 0x80
c1 := uint8(r) - 0x80
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
goto terminateInASCIIState
}
if *e == asciiState {
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
*e = gbState
dst[nDst+0] = '~'
dst[nDst+1] = '{'
nDst += 2
} else if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = c0
dst[nDst+1] = c1
nDst += 2
continue
}
// TODO: should one always terminate in ASCII state to make it safe to
// concatenate two HZ-GB2312-encoded strings?
return nDst, nSrc, err
}

View file

@ -0,0 +1,161 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This program generates tables.go:
// go run maketables.go | gofmt > tables.go
import (
"bufio"
"fmt"
"log"
"net/http"
"sort"
"strings"
)
func main() {
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
printGB18030()
printGBK()
}
func printGB18030() {
res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
if err != nil {
log.Fatalf("Get: %v", err)
}
defer res.Body.Close()
fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
fmt.Printf("var gb18030 = [...][2]uint16{\n")
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := uint32(0), uint32(0)
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
log.Fatalf("could not parse %q", s)
}
if x < 0x10000 && y < 0x10000 {
fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
}
}
fmt.Printf("}\n\n")
}
func printGBK() {
res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
if err != nil {
log.Fatalf("Get: %v", err)
}
defer res.Body.Close()
mapping := [65536]uint16{}
reverse := [65536]uint16{}
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := uint16(0), uint16(0)
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
log.Fatalf("could not parse %q", s)
}
if x < 0 || 126*190 <= x {
log.Fatalf("GBK code %d is out of range", x)
}
mapping[x] = y
if reverse[y] == 0 {
c0, c1 := x/190, x%190
if c1 >= 0x3f {
c1++
}
reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("scanner error: %v", err)
}
fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
fmt.Printf("var decode = [...]uint16{\n")
for i, v := range mapping {
if v != 0 {
fmt.Printf("\t%d: 0x%04X,\n", i, v)
}
}
fmt.Printf("}\n\n")
// Any run of at least separation continuous zero entries in the reverse map will
// be a separate encode table.
const separation = 1024
intervals := []interval(nil)
low, high := -1, -1
for i, v := range reverse {
if v == 0 {
continue
}
if low < 0 {
low = i
} else if i-high >= separation {
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
low = i
}
high = i + 1
}
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
sort.Sort(byDecreasingLength(intervals))
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
fmt.Printf("// sorted by decreasing length.\n")
for i, v := range intervals {
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
}
fmt.Printf("\n")
for i, v := range intervals {
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
fmt.Printf("var encode%d = [...]uint16{\n", i)
for j := v.low; j < v.high; j++ {
x := reverse[j]
if x == 0 {
continue
}
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
}
fmt.Printf("}\n\n")
}
}
// interval is a half-open interval [low, high).
type interval struct {
low, high int
}
func (i interval) len() int { return i.high - i.low }
// byDecreasingLength sorts intervals by decreasing length.
type byDecreasingLength []interval
func (b byDecreasingLength) Len() int { return len(b) }
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,198 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traditionalchinese
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// All is a list of all defined encodings in this package.
var All = []encoding.Encoding{Big5}
// Big5 is the Big5 encoding, also known as Code Page 950.
var Big5 encoding.Encoding = &big5
var big5 = internal.Encoding{
&internal.SimpleEncoding{big5Decoder{}, big5Encoder{}},
"Big5",
identifier.Big5,
}
var errInvalidBig5 = errors.New("traditionalchinese: invalid Big5 encoding")
type big5Decoder struct{ transform.NopResetter }
func (big5Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size, s := rune(0), 0, ""
loop:
for ; nSrc < len(src); nSrc += size {
switch c0 := src[nSrc]; {
case c0 < utf8.RuneSelf:
r, size = rune(c0), 1
case 0x81 <= c0 && c0 < 0xff:
if nSrc+1 >= len(src) {
err = transform.ErrShortSrc
break loop
}
c1 := src[nSrc+1]
switch {
case 0x40 <= c1 && c1 < 0x7f:
c1 -= 0x40
case 0xa1 <= c1 && c1 < 0xff:
c1 -= 0x62
default:
err = errInvalidBig5
break loop
}
r, size = '\ufffd', 2
if i := int(c0-0x81)*157 + int(c1); i < len(decode) {
if 1133 <= i && i < 1167 {
// The two-rune special cases for LATIN CAPITAL / SMALL E WITH CIRCUMFLEX
// AND MACRON / CARON are from http://encoding.spec.whatwg.org/#big5
switch i {
case 1133:
s = "\u00CA\u0304"
goto writeStr
case 1135:
s = "\u00CA\u030C"
goto writeStr
case 1164:
s = "\u00EA\u0304"
goto writeStr
case 1166:
s = "\u00EA\u030C"
goto writeStr
}
}
r = rune(decode[i])
if r == 0 {
r = '\ufffd'
}
}
default:
err = errInvalidBig5
break loop
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += utf8.EncodeRune(dst[nDst:], r)
continue loop
writeStr:
if nDst+len(s) > len(dst) {
err = transform.ErrShortDst
break loop
}
nDst += copy(dst[nDst:], s)
continue loop
}
if atEOF && err == transform.ErrShortSrc {
err = errInvalidBig5
}
return nDst, nSrc, err
}
type big5Encoder struct{ transform.NopResetter }
func (big5Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
if nDst >= len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = uint8(r)
nDst++
continue
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
}
if r >= utf8.RuneSelf {
// func init checks that the switch covers all tables.
switch {
case encode0Low <= r && r < encode0High:
if r = rune(encode0[r-encode0Low]); r != 0 {
goto write2
}
case encode1Low <= r && r < encode1High:
if r = rune(encode1[r-encode1Low]); r != 0 {
goto write2
}
case encode2Low <= r && r < encode2High:
if r = rune(encode2[r-encode2Low]); r != 0 {
goto write2
}
case encode3Low <= r && r < encode3High:
if r = rune(encode3[r-encode3Low]); r != 0 {
goto write2
}
case encode4Low <= r && r < encode4High:
if r = rune(encode4[r-encode4Low]); r != 0 {
goto write2
}
case encode5Low <= r && r < encode5High:
if r = rune(encode5[r-encode5Low]); r != 0 {
goto write2
}
case encode6Low <= r && r < encode6High:
if r = rune(encode6[r-encode6Low]); r != 0 {
goto write2
}
case encode7Low <= r && r < encode7High:
if r = rune(encode7[r-encode7Low]); r != 0 {
goto write2
}
}
err = internal.ErrASCIIReplacement
break
}
write2:
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r >> 8)
dst[nDst+1] = uint8(r)
nDst += 2
continue
}
return nDst, nSrc, err
}
func init() {
// Check that the hard-coded encode switch covers all tables.
if numEncodeTables != 8 {
panic("bad numEncodeTables")
}
}

View file

@ -0,0 +1,140 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This program generates tables.go:
// go run maketables.go | gofmt > tables.go
import (
"bufio"
"fmt"
"log"
"net/http"
"sort"
"strings"
)
func main() {
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n")
fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n")
res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt")
if err != nil {
log.Fatalf("Get: %v", err)
}
defer res.Body.Close()
mapping := [65536]uint32{}
reverse := [65536 * 4]uint16{}
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := uint16(0), uint32(0)
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
log.Fatalf("could not parse %q", s)
}
if x < 0 || 126*157 <= x {
log.Fatalf("Big5 code %d is out of range", x)
}
mapping[x] = y
// The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that
// "The index pointer for code point in index is the first pointer
// corresponding to code point in index", which would normally mean
// that the code below should be guarded by "if reverse[y] == 0", but
// last instead of first seems to match the behavior of
// "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in
// http://encoding.spec.whatwg.org/index-big5.txt, as index 2148
// (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc")
// and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc".
c0, c1 := x/157, x%157
if c1 < 0x3f {
c1 += 0x40
} else {
c1 += 0x62
}
reverse[y] = (0x81+c0)<<8 | c1
}
if err := scanner.Err(); err != nil {
log.Fatalf("scanner error: %v", err)
}
fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n")
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n")
fmt.Printf("var decode = [...]uint32{\n")
for i, v := range mapping {
if v != 0 {
fmt.Printf("\t%d: 0x%08X,\n", i, v)
}
}
fmt.Printf("}\n\n")
// Any run of at least separation continuous zero entries in the reverse map will
// be a separate encode table.
const separation = 1024
intervals := []interval(nil)
low, high := -1, -1
for i, v := range reverse {
if v == 0 {
continue
}
if low < 0 {
low = i
} else if i-high >= separation {
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
low = i
}
high = i + 1
}
if high >= 0 {
intervals = append(intervals, interval{low, high})
}
sort.Sort(byDecreasingLength(intervals))
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n")
fmt.Printf("// sorted by decreasing length.\n")
for i, v := range intervals {
fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high)
}
fmt.Printf("\n")
for i, v := range intervals {
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
fmt.Printf("var encode%d = [...]uint16{\n", i)
for j := v.low; j < v.high; j++ {
x := reverse[j]
if x == 0 {
continue
}
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
}
fmt.Printf("}\n\n")
}
}
// interval is a half-open interval [low, high).
type interval struct {
low, high int
}
func (i interval) len() int { return i.high - i.low }
// byDecreasingLength sorts intervals by decreasing length.
type byDecreasingLength []interval
func (b byDecreasingLength) Len() int { return len(b) }
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }

File diff suppressed because it is too large Load diff

82
vendor/golang.org/x/text/encoding/unicode/override.go generated vendored Normal file
View file

@ -0,0 +1,82 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode
import (
"golang.org/x/text/transform"
)
// BOMOverride returns a new decoder transformer that is identical to fallback,
// except that the presence of a Byte Order Mark at the start of the input
// causes it to switch to the corresponding Unicode decoding. It will only
// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
//
// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
// just UTF-16 variants, and allowing falling back to any encoding scheme.
//
// This technique is recommended by the W3C for use in HTML 5: "For
// compatibility with deployed content, the byte order mark (also known as BOM)
// is considered more authoritative than anything else."
// http://www.w3.org/TR/encoding/#specification-hooks
//
// Using BOMOverride is mostly intended for use cases where the first characters
// of a fallback encoding are known to not be a BOM, for example, for valid HTML
// and most encodings.
func BOMOverride(fallback transform.Transformer) transform.Transformer {
// TODO: possibly allow a variadic argument of unicode encodings to allow
// specifying details of which fallbacks are supported as well as
// specifying the details of the implementations. This would also allow for
// support for UTF-32, which should not be supported by default.
return &bomOverride{fallback: fallback}
}
type bomOverride struct {
fallback transform.Transformer
current transform.Transformer
}
func (d *bomOverride) Reset() {
d.current = nil
d.fallback.Reset()
}
var (
// TODO: we could use decode functions here, instead of allocating a new
// decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
utf16le = UTF16(LittleEndian, IgnoreBOM)
utf16be = UTF16(BigEndian, IgnoreBOM)
)
const utf8BOM = "\ufeff"
func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if d.current != nil {
return d.current.Transform(dst, src, atEOF)
}
if len(src) < 3 && !atEOF {
return 0, 0, transform.ErrShortSrc
}
d.current = d.fallback
bomSize := 0
if len(src) >= 2 {
if src[0] == 0xFF && src[1] == 0xFE {
d.current = utf16le.NewDecoder()
bomSize = 2
} else if src[0] == 0xFE && src[1] == 0xFF {
d.current = utf16be.NewDecoder()
bomSize = 2
} else if len(src) >= 3 &&
src[0] == utf8BOM[0] &&
src[1] == utf8BOM[1] &&
src[2] == utf8BOM[2] {
d.current = transform.Nop
bomSize = 3
}
}
if bomSize < len(src) {
nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
}
return nDst, nSrc + bomSize, err
}

434
vendor/golang.org/x/text/encoding/unicode/unicode.go generated vendored Normal file
View file

@ -0,0 +1,434 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unicode provides Unicode encodings such as UTF-16.
package unicode // import "golang.org/x/text/encoding/unicode"
import (
"errors"
"unicode/utf16"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/internal/utf8internal"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
)
// TODO: I think the Transformers really should return errors on unmatched
// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
// which leaves it open, but is suggested by WhatWG. It will allow for all error
// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
// the introduction of some kind of error type for conveying the erroneous code
// point.
// UTF8 is the UTF-8 encoding.
var UTF8 encoding.Encoding = utf8enc
var utf8enc = &internal.Encoding{
&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
"UTF-8",
identifier.UTF8,
}
type utf8Decoder struct{ transform.NopResetter }
func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var pSrc int // point from which to start copy in src
var accept utf8internal.AcceptRange
// The decoder can only make the input larger, not smaller.
n := len(src)
if len(dst) < n {
err = transform.ErrShortDst
n = len(dst)
atEOF = false
}
for nSrc < n {
c := src[nSrc]
if c < utf8.RuneSelf {
nSrc++
continue
}
first := utf8internal.First[c]
size := int(first & utf8internal.SizeMask)
if first == utf8internal.FirstInvalid {
goto handleInvalid // invalid starter byte
}
accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
if nSrc+size > n {
if !atEOF {
// We may stop earlier than necessary here if the short sequence
// has invalid bytes. Not checking for this simplifies the code
// and may avoid duplicate computations in certain conditions.
if err == nil {
err = transform.ErrShortSrc
}
break
}
// Determine the maximal subpart of an ill-formed subsequence.
switch {
case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
size = 1
case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
size = 2
default:
size = 3 // As we are short, the maximum is 3.
}
goto handleInvalid
}
if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
size = 1
goto handleInvalid // invalid continuation byte
} else if size == 2 {
} else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
size = 2
goto handleInvalid // invalid continuation byte
} else if size == 3 {
} else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
size = 3
goto handleInvalid // invalid continuation byte
}
nSrc += size
continue
handleInvalid:
// Copy the scanned input so far.
nDst += copy(dst[nDst:], src[pSrc:nSrc])
// Append RuneError to the destination.
const runeError = "\ufffd"
if nDst+len(runeError) > len(dst) {
return nDst, nSrc, transform.ErrShortDst
}
nDst += copy(dst[nDst:], runeError)
// Skip the maximal subpart of an ill-formed subsequence according to
// the W3C standard way instead of the Go way. This Transform is
// probably the only place in the text repo where it is warranted.
nSrc += size
pSrc = nSrc
// Recompute the maximum source length.
if sz := len(dst) - nDst; sz < len(src)-nSrc {
err = transform.ErrShortDst
n = nSrc + sz
atEOF = false
}
}
return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
}
// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
// order mark (BOM) policy.
//
// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
// the endianness used for decoding, and will instead be output as their
// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
// Instead, it overrides the default endianness e for the remainder of the
// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
// affect the endianness used, and will instead be output as their standard
// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
// with the default Endianness. For ExpectBOM, in that case, the transformation
// will return early with an ErrMissingBOM error.
//
// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
// be inserted. The UTF-8 input does not need to contain a BOM.
//
// There is no concept of a 'native' endianness. If the UTF-16 data is produced
// and consumed in a greater context that implies a certain endianness, use
// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
//
// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
// corresponds to "Where the precise type of the data stream is known... the
// BOM should not be used" and ExpectBOM corresponds to "A particular
// protocol... may require use of the BOM".
func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
}
// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
// some configurations map to the same MIB identifier. RFC 2781 has requirements
// and recommendations. Some of the "configurations" are merely recommendations,
// so multiple configurations could match.
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
BigEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16BE,
UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
},
LittleEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16LE,
UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
},
// ExpectBOM is not widely used and has no valid MIB identifier.
}
// All lists a configuration for each IANA-defined UTF-16 variant.
var All = []encoding.Encoding{
UTF8,
UTF16(BigEndian, UseBOM),
UTF16(BigEndian, IgnoreBOM),
UTF16(LittleEndian, IgnoreBOM),
}
// BOMPolicy is a UTF-16 encoding's byte order mark policy.
type BOMPolicy uint8
const (
writeBOM BOMPolicy = 0x01
acceptBOM BOMPolicy = 0x02
requireBOM BOMPolicy = 0x04
bomMask BOMPolicy = 0x07
// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
// map of an array of length 8 of a type that is also used as a key or value
// in another map). See golang.org/issue/11354.
// TODO: consider changing this value back to 8 if the use of 1.4.* has
// been minimized.
numBOMValues = 8 + 1
// IgnoreBOM means to ignore any byte order marks.
IgnoreBOM BOMPolicy = 0
// Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
// UseBOM means that the UTF-16 form may start with a byte order mark, which
// will be used to override the default encoding.
UseBOM BOMPolicy = writeBOM | acceptBOM
// Common and RFC 2781-compliant interpretation for UTF-16.
// ExpectBOM means that the UTF-16 form must start with a byte order mark,
// which will be used to override the default encoding.
ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
// Used in Java as Unicode (not to be confused with Java's UTF-16) and
// ICU's UTF-16,version=1. Not compliant with RFC 2781.
// TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
// - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
// (UnicodeBig and UnicodeLittle in Java)
// - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
// acceptBOM | strictBOM (e.g. assigned to CheckBOM).
// This addition would be consistent with supporting ExpectBOM.
)
// Endianness is a UTF-16 encoding's default endianness.
type Endianness bool
const (
// BigEndian is UTF-16BE.
BigEndian Endianness = false
// LittleEndian is UTF-16LE.
LittleEndian Endianness = true
)
// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
// starting byte order mark.
var ErrMissingBOM = errors.New("encoding: missing byte order mark")
type utf16Encoding struct {
config
mib identifier.MIB
}
type config struct {
endianness Endianness
bomPolicy BOMPolicy
}
func (u utf16Encoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: &utf16Decoder{
initial: u.config,
current: u.config,
}}
}
func (u utf16Encoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: &utf16Encoder{
endianness: u.endianness,
initialBOMPolicy: u.bomPolicy,
currentBOMPolicy: u.bomPolicy,
}}
}
func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
return u.mib, ""
}
func (u utf16Encoding) String() string {
e, b := "B", ""
if u.endianness == LittleEndian {
e = "L"
}
switch u.bomPolicy {
case ExpectBOM:
b = "Expect"
case UseBOM:
b = "Use"
case IgnoreBOM:
b = "Ignore"
}
return "UTF-16" + e + "E (" + b + " BOM)"
}
type utf16Decoder struct {
initial config
current config
}
func (u *utf16Decoder) Reset() {
u.current = u.initial
}
func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(src) == 0 {
if atEOF && u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
return 0, 0, nil
}
if u.current.bomPolicy&acceptBOM != 0 {
if len(src) < 2 {
return 0, 0, transform.ErrShortSrc
}
switch {
case src[0] == 0xfe && src[1] == 0xff:
u.current.endianness = BigEndian
nSrc = 2
case src[0] == 0xff && src[1] == 0xfe:
u.current.endianness = LittleEndian
nSrc = 2
default:
if u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
}
u.current.bomPolicy = IgnoreBOM
}
var r rune
var dSize, sSize int
for nSrc < len(src) {
if nSrc+1 < len(src) {
x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
if u.current.endianness == LittleEndian {
x = x>>8 | x<<8
}
r, sSize = rune(x), 2
if utf16.IsSurrogate(r) {
if nSrc+3 < len(src) {
x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
if u.current.endianness == LittleEndian {
x = x>>8 | x<<8
}
// Save for next iteration if it is not a high surrogate.
if isHighSurrogate(rune(x)) {
r, sSize = utf16.DecodeRune(r, rune(x)), 4
}
} else if !atEOF {
err = transform.ErrShortSrc
break
}
}
if dSize = utf8.RuneLen(r); dSize < 0 {
r, dSize = utf8.RuneError, 3
}
} else if atEOF {
// Single trailing byte.
r, dSize, sSize = utf8.RuneError, 3, 1
} else {
err = transform.ErrShortSrc
break
}
if nDst+dSize > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
nSrc += sSize
}
return nDst, nSrc, err
}
func isHighSurrogate(r rune) bool {
return 0xDC00 <= r && r <= 0xDFFF
}
type utf16Encoder struct {
endianness Endianness
initialBOMPolicy BOMPolicy
currentBOMPolicy BOMPolicy
}
func (u *utf16Encoder) Reset() {
u.currentBOMPolicy = u.initialBOMPolicy
}
func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if u.currentBOMPolicy&writeBOM != 0 {
if len(dst) < 2 {
return 0, 0, transform.ErrShortDst
}
dst[0], dst[1] = 0xfe, 0xff
u.currentBOMPolicy = IgnoreBOM
nDst = 2
}
r, size := rune(0), 0
for nSrc < len(src) {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
}
if r <= 0xffff {
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r >> 8)
dst[nDst+1] = uint8(r)
nDst += 2
} else {
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
r1, r2 := utf16.EncodeRune(r)
dst[nDst+0] = uint8(r1 >> 8)
dst[nDst+1] = uint8(r1)
dst[nDst+2] = uint8(r2 >> 8)
dst[nDst+3] = uint8(r2)
nDst += 4
}
nSrc += size
}
if u.endianness == LittleEndian {
for i := 0; i < nDst; i += 2 {
dst[i], dst[i+1] = dst[i+1], dst[i]
}
}
return nDst, nSrc, err
}

100
vendor/golang.org/x/text/internal/tag/tag.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tag contains functionality handling tags and related data.
package tag // import "golang.org/x/text/internal/tag"
import "sort"
// An Index converts tags to a compact numeric value.
//
// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
// be used to store additional information about the tag.
type Index string
// Elem returns the element data at the given index.
func (s Index) Elem(x int) string {
return string(s[x*4 : x*4+4])
}
// Index reports the index of the given key or -1 if it could not be found.
// Only the first len(key) bytes from the start of the 4-byte entries will be
// considered for the search and the first match in Index will be returned.
func (s Index) Index(key []byte) int {
n := len(key)
// search the index of the first entry with an equal or higher value than
// key in s.
index := sort.Search(len(s)/4, func(i int) bool {
return cmp(s[i*4:i*4+n], key) != -1
})
i := index * 4
if cmp(s[i:i+len(key)], key) != 0 {
return -1
}
return index
}
// Next finds the next occurrence of key after index x, which must have been
// obtained from a call to Index using the same key. It returns x+1 or -1.
func (s Index) Next(key []byte, x int) int {
if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
return x
}
return -1
}
// cmp returns an integer comparing a and b lexicographically.
func cmp(a Index, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i, c := range b[:n] {
switch {
case a[i] > c:
return 1
case a[i] < c:
return -1
}
}
switch {
case len(a) < len(b):
return -1
case len(a) > len(b):
return 1
}
return 0
}
// Compare returns an integer comparing a and b lexicographically.
func Compare(a string, b []byte) int {
return cmp(Index(a), b)
}
// FixCase reformats b to the same pattern of cases as form.
// If returns false if string b is malformed.
func FixCase(form string, b []byte) bool {
if len(form) != len(b) {
return false
}
for i, c := range b {
if form[i] <= 'Z' {
if c >= 'a' {
c -= 'z' - 'Z'
}
if c < 'A' || 'Z' < c {
return false
}
} else {
if c <= 'Z' {
c += 'z' - 'Z'
}
if c < 'a' || 'z' < c {
return false
}
}
b[i] = c
}
return true
}

View file

@ -0,0 +1,87 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utf8internal contains low-level utf8-related constants, tables, etc.
// that are used internally by the text package.
package utf8internal
// The default lowest and highest continuation byte.
const (
LoCB = 0x80 // 1000 0000
HiCB = 0xBF // 1011 1111
)
// Constants related to getting information of first bytes of UTF-8 sequences.
const (
// ASCII identifies a UTF-8 byte as ASCII.
ASCII = as
// FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
// sequence.
FirstInvalid = xx
// SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
SizeMask = 7
// AcceptShift is the right-shift count for the first byte info byte to get
// the index into the AcceptRanges table. See AcceptRanges.
AcceptShift = 4
// The names of these constants are chosen to give nice alignment in the
// table below. The first nibble is an index into acceptRanges or F for
// special one-byte cases. The second nibble is the Rune length or the
// Status for the special one-byte case.
xx = 0xF1 // invalid: size 1
as = 0xF0 // ASCII: size 1
s1 = 0x02 // accept 0, size 2
s2 = 0x13 // accept 1, size 3
s3 = 0x03 // accept 0, size 3
s4 = 0x23 // accept 2, size 3
s5 = 0x34 // accept 3, size 4
s6 = 0x04 // accept 0, size 4
s7 = 0x44 // accept 4, size 4
)
// First is information about the first byte in a UTF-8 sequence.
var First = [256]uint8{
// 1 2 3 4 5 6 7 8 9 A B C D E F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
// 1 2 3 4 5 6 7 8 9 A B C D E F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
}
// AcceptRange gives the range of valid values for the second byte in a UTF-8
// sequence for any value for First that is not ASCII or FirstInvalid.
type AcceptRange struct {
Lo uint8 // lowest value for second byte.
Hi uint8 // highest value for second byte.
}
// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
//
// AcceptRanges[First[b[0]]>>AcceptShift]
//
// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
// at b[0].
var AcceptRanges = [...]AcceptRange{
0: {LoCB, HiCB},
1: {0xA0, HiCB},
2: {LoCB, 0x9F},
3: {0x90, HiCB},
4: {LoCB, 0x8F},
}

16
vendor/golang.org/x/text/language/Makefile generated vendored Normal file
View file

@ -0,0 +1,16 @@
# Copyright 2013 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
CLEANFILES+=maketables
maketables: maketables.go
go build $^
tables: maketables
./maketables > tables.go
gofmt -w -s tables.go
# Build (but do not run) maketables during testing,
# just to make sure it still compiles.
testshort: maketables

16
vendor/golang.org/x/text/language/common.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// This file was generated by go generate; DO NOT EDIT
package language
// This file contains code common to the maketables.go and the package code.
// langAliasType is the type of an alias in langAliasMap.
type langAliasType int8
const (
langDeprecated langAliasType = iota
langMacro
langLegacy
langAliasTypeUnknown langAliasType = -1
)

197
vendor/golang.org/x/text/language/coverage.go generated vendored Normal file
View file

@ -0,0 +1,197 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package language
import (
"fmt"
"sort"
)
// The Coverage interface is used to define the level of coverage of an
// internationalization service. Note that not all types are supported by all
// services. As lists may be generated on the fly, it is recommended that users
// of a Coverage cache the results.
type Coverage interface {
// Tags returns the list of supported tags.
Tags() []Tag
// BaseLanguages returns the list of supported base languages.
BaseLanguages() []Base
// Scripts returns the list of supported scripts.
Scripts() []Script
// Regions returns the list of supported regions.
Regions() []Region
}
var (
// Supported defines a Coverage that lists all supported subtags. Tags
// always returns nil.
Supported Coverage = allSubtags{}
)
// TODO:
// - Support Variants, numbering systems.
// - CLDR coverage levels.
// - Set of common tags defined in this package.
type allSubtags struct{}
// Regions returns the list of supported regions. As all regions are in a
// consecutive range, it simply returns a slice of numbers in increasing order.
// The "undefined" region is not returned.
func (s allSubtags) Regions() []Region {
reg := make([]Region, numRegions)
for i := range reg {
reg[i] = Region{regionID(i + 1)}
}
return reg
}
// Scripts returns the list of supported scripts. As all scripts are in a
// consecutive range, it simply returns a slice of numbers in increasing order.
// The "undefined" script is not returned.
func (s allSubtags) Scripts() []Script {
scr := make([]Script, numScripts)
for i := range scr {
scr[i] = Script{scriptID(i + 1)}
}
return scr
}
// BaseLanguages returns the list of all supported base languages. It generates
// the list by traversing the internal structures.
func (s allSubtags) BaseLanguages() []Base {
base := make([]Base, 0, numLanguages)
for i := 0; i < langNoIndexOffset; i++ {
// We included "und" already for the value 0.
if i != nonCanonicalUnd {
base = append(base, Base{langID(i)})
}
}
i := langNoIndexOffset
for _, v := range langNoIndex {
for k := 0; k < 8; k++ {
if v&1 == 1 {
base = append(base, Base{langID(i)})
}
v >>= 1
i++
}
}
return base
}
// Tags always returns nil.
func (s allSubtags) Tags() []Tag {
return nil
}
// coverage is used used by NewCoverage which is used as a convenient way for
// creating Coverage implementations for partially defined data. Very often a
// package will only need to define a subset of slices. coverage provides a
// convenient way to do this. Moreover, packages using NewCoverage, instead of
// their own implementation, will not break if later new slice types are added.
type coverage struct {
tags func() []Tag
bases func() []Base
scripts func() []Script
regions func() []Region
}
func (s *coverage) Tags() []Tag {
if s.tags == nil {
return nil
}
return s.tags()
}
// bases implements sort.Interface and is used to sort base languages.
type bases []Base
func (b bases) Len() int {
return len(b)
}
func (b bases) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b bases) Less(i, j int) bool {
return b[i].langID < b[j].langID
}
// BaseLanguages returns the result from calling s.bases if it is specified or
// otherwise derives the set of supported base languages from tags.
func (s *coverage) BaseLanguages() []Base {
if s.bases == nil {
tags := s.Tags()
if len(tags) == 0 {
return nil
}
a := make([]Base, len(tags))
for i, t := range tags {
a[i] = Base{langID(t.lang)}
}
sort.Sort(bases(a))
k := 0
for i := 1; i < len(a); i++ {
if a[k] != a[i] {
k++
a[k] = a[i]
}
}
return a[:k+1]
}
return s.bases()
}
func (s *coverage) Scripts() []Script {
if s.scripts == nil {
return nil
}
return s.scripts()
}
func (s *coverage) Regions() []Region {
if s.regions == nil {
return nil
}
return s.regions()
}
// NewCoverage returns a Coverage for the given lists. It is typically used by
// packages providing internationalization services to define their level of
// coverage. A list may be of type []T or func() []T, where T is either Tag,
// Base, Script or Region. The returned Coverage derives the value for Bases
// from Tags if no func or slice for []Base is specified. For other unspecified
// types the returned Coverage will return nil for the respective methods.
func NewCoverage(list ...interface{}) Coverage {
s := &coverage{}
for _, x := range list {
switch v := x.(type) {
case func() []Base:
s.bases = v
case func() []Script:
s.scripts = v
case func() []Region:
s.regions = v
case func() []Tag:
s.tags = v
case []Base:
s.bases = func() []Base { return v }
case []Script:
s.scripts = func() []Script { return v }
case []Region:
s.regions = func() []Region { return v }
case []Tag:
s.tags = func() []Tag { return v }
default:
panic(fmt.Sprintf("language: unsupported set type %T", v))
}
}
return s
}

20
vendor/golang.org/x/text/language/gen_common.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This file contains code common to the maketables.go and the package code.
// langAliasType is the type of an alias in langAliasMap.
type langAliasType int8
const (
langDeprecated langAliasType = iota
langMacro
langLegacy
langAliasTypeUnknown langAliasType = -1
)

162
vendor/golang.org/x/text/language/gen_index.go generated vendored Normal file
View file

@ -0,0 +1,162 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This file generates derivative tables based on the language package itself.
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"reflect"
"sort"
"strings"
"golang.org/x/text/internal/gen"
"golang.org/x/text/language"
"golang.org/x/text/unicode/cldr"
)
var (
test = flag.Bool("test", false,
"test existing tables; can be used to compare web data with package data.")
draft = flag.String("draft",
"contributed",
`Minimal draft requirements (approved, contributed, provisional, unconfirmed).`)
)
func main() {
gen.Init()
// Read the CLDR zip file.
r := gen.OpenCLDRCoreZip()
defer r.Close()
d := &cldr.Decoder{}
data, err := d.DecodeZip(r)
if err != nil {
log.Fatalf("DecodeZip: %v", err)
}
w := gen.NewCodeWriter()
defer func() {
buf := &bytes.Buffer{}
if _, err = w.WriteGo(buf, "language"); err != nil {
log.Fatalf("Error formatting file index.go: %v", err)
}
// Since we're generating a table for our own package we need to rewrite
// doing the equivalent of go fmt -r 'language.b -> b'. Using
// bytes.Replace will do.
out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1)
if err := ioutil.WriteFile("index.go", out, 0600); err != nil {
log.Fatalf("Could not create file index.go: %v", err)
}
}()
m := map[language.Tag]bool{}
for _, lang := range data.Locales() {
// We include all locales unconditionally to be consistent with en_US.
// We want en_US, even though it has no data associated with it.
// TODO: put any of the languages for which no data exists at the end
// of the index. This allows all components based on ICU to use that
// as the cutoff point.
// if x := data.RawLDML(lang); false ||
// x.LocaleDisplayNames != nil ||
// x.Characters != nil ||
// x.Delimiters != nil ||
// x.Measurement != nil ||
// x.Dates != nil ||
// x.Numbers != nil ||
// x.Units != nil ||
// x.ListPatterns != nil ||
// x.Collations != nil ||
// x.Segmentations != nil ||
// x.Rbnf != nil ||
// x.Annotations != nil ||
// x.Metadata != nil {
// TODO: support POSIX natively, albeit non-standard.
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
m[tag] = true
// }
}
// Include locales for plural rules, which uses a different structure.
for _, plurals := range data.Supplemental().Plurals {
for _, rules := range plurals.PluralRules {
for _, lang := range strings.Split(rules.Locales, " ") {
m[language.Make(lang)] = true
}
}
}
var core, special []language.Tag
for t := range m {
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
log.Fatalf("Unexpected extension %v in %v", x, t)
}
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
core = append(core, t)
} else {
special = append(special, t)
}
}
w.WriteComment(`
NumCompactTags is the number of common tags. The maximum tag is
NumCompactTags-1.`)
w.WriteConst("NumCompactTags", len(core)+len(special))
sort.Sort(byAlpha(special))
w.WriteVar("specialTags", special)
// TODO: order by frequency?
sort.Sort(byAlpha(core))
// Size computations are just an estimate.
w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size())
w.Size += len(core) * 6 // size of uint32 and uint16
fmt.Fprintln(w)
fmt.Fprintln(w, "var coreTags = map[uint32]uint16{")
fmt.Fprintln(w, "0x0: 0, // und")
i := len(special) + 1 // Und and special tags already written.
for _, t := range core {
if t == language.Und {
continue
}
fmt.Fprint(w.Hash, t, i)
b, s, r := t.Raw()
fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n",
getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number
getIndex(s, 2),
getIndex(r, 3),
i, t)
i++
}
fmt.Fprintln(w, "}")
}
// getIndex prints the subtag type and extracts its index of size nibble.
// If the index is less than n nibbles, the result is prefixed with 0s.
func getIndex(x interface{}, n int) string {
s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00}
s = s[strings.Index(s, "0x")+2 : len(s)-1]
return strings.Repeat("0", n-len(s)) + s
}
type byAlpha []language.Tag
func (a byAlpha) Len() int { return len(a) }
func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() }

38
vendor/golang.org/x/text/language/go1_1.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.2
package language
import "sort"
func sortStable(s sort.Interface) {
ss := stableSort{
s: s,
pos: make([]int, s.Len()),
}
for i := range ss.pos {
ss.pos[i] = i
}
sort.Sort(&ss)
}
type stableSort struct {
s sort.Interface
pos []int
}
func (s *stableSort) Len() int {
return len(s.pos)
}
func (s *stableSort) Less(i, j int) bool {
return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j]
}
func (s *stableSort) Swap(i, j int) {
s.s.Swap(i, j)
s.pos[i], s.pos[j] = s.pos[j], s.pos[i]
}

11
vendor/golang.org/x/text/language/go1_2.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.2
package language
import "sort"
var sortStable = sort.Stable

767
vendor/golang.org/x/text/language/index.go generated vendored Normal file
View file

@ -0,0 +1,767 @@
// This file was generated by go generate; DO NOT EDIT
package language
// NumCompactTags is the number of common tags. The maximum tag is
// NumCompactTags-1.
const NumCompactTags = 752
var specialTags = []Tag{ // 2 elements
0: {lang: 0xd5, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
1: {lang: 0x134, region: 0x134, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
} // Size: 72 bytes
var coreTags = map[uint32]uint16{
0x0: 0, // und
0x01500000: 3, // af
0x015000d1: 4, // af-NA
0x01500160: 5, // af-ZA
0x01b00000: 6, // agq
0x01b00051: 7, // agq-CM
0x02000000: 8, // ak
0x0200007f: 9, // ak-GH
0x02600000: 10, // am
0x0260006e: 11, // am-ET
0x03900000: 12, // ar
0x03900001: 13, // ar-001
0x03900022: 14, // ar-AE
0x03900038: 15, // ar-BH
0x03900061: 16, // ar-DJ
0x03900066: 17, // ar-DZ
0x0390006a: 18, // ar-EG
0x0390006b: 19, // ar-EH
0x0390006c: 20, // ar-ER
0x03900096: 21, // ar-IL
0x0390009a: 22, // ar-IQ
0x039000a0: 23, // ar-JO
0x039000a7: 24, // ar-KM
0x039000ab: 25, // ar-KW
0x039000af: 26, // ar-LB
0x039000b8: 27, // ar-LY
0x039000b9: 28, // ar-MA
0x039000c8: 29, // ar-MR
0x039000e0: 30, // ar-OM
0x039000ec: 31, // ar-PS
0x039000f2: 32, // ar-QA
0x03900107: 33, // ar-SA
0x0390010a: 34, // ar-SD
0x03900114: 35, // ar-SO
0x03900116: 36, // ar-SS
0x0390011b: 37, // ar-SY
0x0390011f: 38, // ar-TD
0x03900127: 39, // ar-TN
0x0390015d: 40, // ar-YE
0x03f00000: 41, // ars
0x04200000: 42, // as
0x04200098: 43, // as-IN
0x04300000: 44, // asa
0x0430012e: 45, // asa-TZ
0x04700000: 46, // ast
0x0470006d: 47, // ast-ES
0x05700000: 48, // az
0x0571e000: 49, // az-Cyrl
0x0571e031: 50, // az-Cyrl-AZ
0x05752000: 51, // az-Latn
0x05752031: 52, // az-Latn-AZ
0x05d00000: 53, // bas
0x05d00051: 54, // bas-CM
0x07000000: 55, // be
0x07000046: 56, // be-BY
0x07400000: 57, // bem
0x07400161: 58, // bem-ZM
0x07800000: 59, // bez
0x0780012e: 60, // bez-TZ
0x07d00000: 61, // bg
0x07d00037: 62, // bg-BG
0x08100000: 63, // bh
0x09e00000: 64, // bm
0x09e000c2: 65, // bm-ML
0x0a300000: 66, // bn
0x0a300034: 67, // bn-BD
0x0a300098: 68, // bn-IN
0x0a700000: 69, // bo
0x0a700052: 70, // bo-CN
0x0a700098: 71, // bo-IN
0x0b000000: 72, // br
0x0b000077: 73, // br-FR
0x0b300000: 74, // brx
0x0b300098: 75, // brx-IN
0x0b500000: 76, // bs
0x0b51e000: 77, // bs-Cyrl
0x0b51e032: 78, // bs-Cyrl-BA
0x0b552000: 79, // bs-Latn
0x0b552032: 80, // bs-Latn-BA
0x0d500000: 81, // ca
0x0d500021: 82, // ca-AD
0x0d50006d: 83, // ca-ES
0x0d500077: 84, // ca-FR
0x0d50009d: 85, // ca-IT
0x0da00000: 86, // ce
0x0da00105: 87, // ce-RU
0x0dd00000: 88, // cgg
0x0dd00130: 89, // cgg-UG
0x0e300000: 90, // chr
0x0e300134: 91, // chr-US
0x0e700000: 92, // ckb
0x0e70009a: 93, // ckb-IQ
0x0e70009b: 94, // ckb-IR
0x0f600000: 95, // cs
0x0f60005d: 96, // cs-CZ
0x0fa00000: 97, // cu
0x0fa00105: 98, // cu-RU
0x0fc00000: 99, // cy
0x0fc0007a: 100, // cy-GB
0x0fd00000: 101, // da
0x0fd00062: 102, // da-DK
0x0fd00081: 103, // da-GL
0x10400000: 104, // dav
0x104000a3: 105, // dav-KE
0x10900000: 106, // de
0x1090002d: 107, // de-AT
0x10900035: 108, // de-BE
0x1090004d: 109, // de-CH
0x1090005f: 110, // de-DE
0x1090009d: 111, // de-IT
0x109000b1: 112, // de-LI
0x109000b6: 113, // de-LU
0x11300000: 114, // dje
0x113000d3: 115, // dje-NE
0x11b00000: 116, // dsb
0x11b0005f: 117, // dsb-DE
0x12000000: 118, // dua
0x12000051: 119, // dua-CM
0x12400000: 120, // dv
0x12700000: 121, // dyo
0x12700113: 122, // dyo-SN
0x12900000: 123, // dz
0x12900042: 124, // dz-BT
0x12b00000: 125, // ebu
0x12b000a3: 126, // ebu-KE
0x12c00000: 127, // ee
0x12c0007f: 128, // ee-GH
0x12c00121: 129, // ee-TG
0x13100000: 130, // el
0x1310005c: 131, // el-CY
0x13100086: 132, // el-GR
0x13400000: 133, // en
0x13400001: 134, // en-001
0x1340001a: 135, // en-150
0x13400024: 136, // en-AG
0x13400025: 137, // en-AI
0x1340002c: 138, // en-AS
0x1340002d: 139, // en-AT
0x1340002e: 140, // en-AU
0x13400033: 141, // en-BB
0x13400035: 142, // en-BE
0x13400039: 143, // en-BI
0x1340003c: 144, // en-BM
0x13400041: 145, // en-BS
0x13400045: 146, // en-BW
0x13400047: 147, // en-BZ
0x13400048: 148, // en-CA
0x13400049: 149, // en-CC
0x1340004d: 150, // en-CH
0x1340004f: 151, // en-CK
0x13400051: 152, // en-CM
0x1340005b: 153, // en-CX
0x1340005c: 154, // en-CY
0x1340005f: 155, // en-DE
0x13400060: 156, // en-DG
0x13400062: 157, // en-DK
0x13400063: 158, // en-DM
0x1340006c: 159, // en-ER
0x13400071: 160, // en-FI
0x13400072: 161, // en-FJ
0x13400073: 162, // en-FK
0x13400074: 163, // en-FM
0x1340007a: 164, // en-GB
0x1340007b: 165, // en-GD
0x1340007e: 166, // en-GG
0x1340007f: 167, // en-GH
0x13400080: 168, // en-GI
0x13400082: 169, // en-GM
0x13400089: 170, // en-GU
0x1340008b: 171, // en-GY
0x1340008c: 172, // en-HK
0x13400095: 173, // en-IE
0x13400096: 174, // en-IL
0x13400097: 175, // en-IM
0x13400098: 176, // en-IN
0x13400099: 177, // en-IO
0x1340009e: 178, // en-JE
0x1340009f: 179, // en-JM
0x134000a3: 180, // en-KE
0x134000a6: 181, // en-KI
0x134000a8: 182, // en-KN
0x134000ac: 183, // en-KY
0x134000b0: 184, // en-LC
0x134000b3: 185, // en-LR
0x134000b4: 186, // en-LS
0x134000be: 187, // en-MG
0x134000bf: 188, // en-MH
0x134000c5: 189, // en-MO
0x134000c6: 190, // en-MP
0x134000c9: 191, // en-MS
0x134000ca: 192, // en-MT
0x134000cb: 193, // en-MU
0x134000cd: 194, // en-MW
0x134000cf: 195, // en-MY
0x134000d1: 196, // en-NA
0x134000d4: 197, // en-NF
0x134000d5: 198, // en-NG
0x134000d8: 199, // en-NL
0x134000dc: 200, // en-NR
0x134000de: 201, // en-NU
0x134000df: 202, // en-NZ
0x134000e5: 203, // en-PG
0x134000e6: 204, // en-PH
0x134000e7: 205, // en-PK
0x134000ea: 206, // en-PN
0x134000eb: 207, // en-PR
0x134000ef: 208, // en-PW
0x13400106: 209, // en-RW
0x13400108: 210, // en-SB
0x13400109: 211, // en-SC
0x1340010a: 212, // en-SD
0x1340010b: 213, // en-SE
0x1340010c: 214, // en-SG
0x1340010d: 215, // en-SH
0x1340010e: 216, // en-SI
0x13400111: 217, // en-SL
0x13400116: 218, // en-SS
0x1340011a: 219, // en-SX
0x1340011c: 220, // en-SZ
0x1340011e: 221, // en-TC
0x13400124: 222, // en-TK
0x13400128: 223, // en-TO
0x1340012b: 224, // en-TT
0x1340012c: 225, // en-TV
0x1340012e: 226, // en-TZ
0x13400130: 227, // en-UG
0x13400132: 228, // en-UM
0x13400134: 229, // en-US
0x13400138: 230, // en-VC
0x1340013b: 231, // en-VG
0x1340013c: 232, // en-VI
0x1340013e: 233, // en-VU
0x13400141: 234, // en-WS
0x13400160: 235, // en-ZA
0x13400161: 236, // en-ZM
0x13400163: 237, // en-ZW
0x13700000: 238, // eo
0x13700001: 239, // eo-001
0x13900000: 240, // es
0x1390001e: 241, // es-419
0x1390002b: 242, // es-AR
0x1390003e: 243, // es-BO
0x13900040: 244, // es-BR
0x13900050: 245, // es-CL
0x13900053: 246, // es-CO
0x13900055: 247, // es-CR
0x13900058: 248, // es-CU
0x13900064: 249, // es-DO
0x13900067: 250, // es-EA
0x13900068: 251, // es-EC
0x1390006d: 252, // es-ES
0x13900085: 253, // es-GQ
0x13900088: 254, // es-GT
0x1390008e: 255, // es-HN
0x13900093: 256, // es-IC
0x139000ce: 257, // es-MX
0x139000d7: 258, // es-NI
0x139000e1: 259, // es-PA
0x139000e3: 260, // es-PE
0x139000e6: 261, // es-PH
0x139000eb: 262, // es-PR
0x139000f0: 263, // es-PY
0x13900119: 264, // es-SV
0x13900134: 265, // es-US
0x13900135: 266, // es-UY
0x1390013a: 267, // es-VE
0x13b00000: 268, // et
0x13b00069: 269, // et-EE
0x14000000: 270, // eu
0x1400006d: 271, // eu-ES
0x14100000: 272, // ewo
0x14100051: 273, // ewo-CM
0x14300000: 274, // fa
0x14300023: 275, // fa-AF
0x1430009b: 276, // fa-IR
0x14900000: 277, // ff
0x14900051: 278, // ff-CM
0x14900083: 279, // ff-GN
0x149000c8: 280, // ff-MR
0x14900113: 281, // ff-SN
0x14c00000: 282, // fi
0x14c00071: 283, // fi-FI
0x14e00000: 284, // fil
0x14e000e6: 285, // fil-PH
0x15300000: 286, // fo
0x15300062: 287, // fo-DK
0x15300075: 288, // fo-FO
0x15900000: 289, // fr
0x15900035: 290, // fr-BE
0x15900036: 291, // fr-BF
0x15900039: 292, // fr-BI
0x1590003a: 293, // fr-BJ
0x1590003b: 294, // fr-BL
0x15900048: 295, // fr-CA
0x1590004a: 296, // fr-CD
0x1590004b: 297, // fr-CF
0x1590004c: 298, // fr-CG
0x1590004d: 299, // fr-CH
0x1590004e: 300, // fr-CI
0x15900051: 301, // fr-CM
0x15900061: 302, // fr-DJ
0x15900066: 303, // fr-DZ
0x15900077: 304, // fr-FR
0x15900079: 305, // fr-GA
0x1590007d: 306, // fr-GF
0x15900083: 307, // fr-GN
0x15900084: 308, // fr-GP
0x15900085: 309, // fr-GQ
0x15900090: 310, // fr-HT
0x159000a7: 311, // fr-KM
0x159000b6: 312, // fr-LU
0x159000b9: 313, // fr-MA
0x159000ba: 314, // fr-MC
0x159000bd: 315, // fr-MF
0x159000be: 316, // fr-MG
0x159000c2: 317, // fr-ML
0x159000c7: 318, // fr-MQ
0x159000c8: 319, // fr-MR
0x159000cb: 320, // fr-MU
0x159000d2: 321, // fr-NC
0x159000d3: 322, // fr-NE
0x159000e4: 323, // fr-PF
0x159000e9: 324, // fr-PM
0x15900101: 325, // fr-RE
0x15900106: 326, // fr-RW
0x15900109: 327, // fr-SC
0x15900113: 328, // fr-SN
0x1590011b: 329, // fr-SY
0x1590011f: 330, // fr-TD
0x15900121: 331, // fr-TG
0x15900127: 332, // fr-TN
0x1590013e: 333, // fr-VU
0x1590013f: 334, // fr-WF
0x1590015e: 335, // fr-YT
0x16400000: 336, // fur
0x1640009d: 337, // fur-IT
0x16800000: 338, // fy
0x168000d8: 339, // fy-NL
0x16900000: 340, // ga
0x16900095: 341, // ga-IE
0x17800000: 342, // gd
0x1780007a: 343, // gd-GB
0x18a00000: 344, // gl
0x18a0006d: 345, // gl-ES
0x19c00000: 346, // gsw
0x19c0004d: 347, // gsw-CH
0x19c00077: 348, // gsw-FR
0x19c000b1: 349, // gsw-LI
0x19d00000: 350, // gu
0x19d00098: 351, // gu-IN
0x1a200000: 352, // guw
0x1a400000: 353, // guz
0x1a4000a3: 354, // guz-KE
0x1a500000: 355, // gv
0x1a500097: 356, // gv-IM
0x1ad00000: 357, // ha
0x1ad0007f: 358, // ha-GH
0x1ad000d3: 359, // ha-NE
0x1ad000d5: 360, // ha-NG
0x1b100000: 361, // haw
0x1b100134: 362, // haw-US
0x1b500000: 363, // he
0x1b500096: 364, // he-IL
0x1b700000: 365, // hi
0x1b700098: 366, // hi-IN
0x1ca00000: 367, // hr
0x1ca00032: 368, // hr-BA
0x1ca0008f: 369, // hr-HR
0x1cb00000: 370, // hsb
0x1cb0005f: 371, // hsb-DE
0x1ce00000: 372, // hu
0x1ce00091: 373, // hu-HU
0x1d000000: 374, // hy
0x1d000027: 375, // hy-AM
0x1da00000: 376, // id
0x1da00094: 377, // id-ID
0x1df00000: 378, // ig
0x1df000d5: 379, // ig-NG
0x1e200000: 380, // ii
0x1e200052: 381, // ii-CN
0x1f000000: 382, // is
0x1f00009c: 383, // is-IS
0x1f100000: 384, // it
0x1f10004d: 385, // it-CH
0x1f10009d: 386, // it-IT
0x1f100112: 387, // it-SM
0x1f200000: 388, // iu
0x1f800000: 389, // ja
0x1f8000a1: 390, // ja-JP
0x1fb00000: 391, // jbo
0x1ff00000: 392, // jgo
0x1ff00051: 393, // jgo-CM
0x20200000: 394, // jmc
0x2020012e: 395, // jmc-TZ
0x20600000: 396, // jv
0x20800000: 397, // ka
0x2080007c: 398, // ka-GE
0x20a00000: 399, // kab
0x20a00066: 400, // kab-DZ
0x20e00000: 401, // kaj
0x20f00000: 402, // kam
0x20f000a3: 403, // kam-KE
0x21700000: 404, // kcg
0x21b00000: 405, // kde
0x21b0012e: 406, // kde-TZ
0x21f00000: 407, // kea
0x21f00059: 408, // kea-CV
0x22c00000: 409, // khq
0x22c000c2: 410, // khq-ML
0x23100000: 411, // ki
0x231000a3: 412, // ki-KE
0x23a00000: 413, // kk
0x23a000ad: 414, // kk-KZ
0x23c00000: 415, // kkj
0x23c00051: 416, // kkj-CM
0x23d00000: 417, // kl
0x23d00081: 418, // kl-GL
0x23e00000: 419, // kln
0x23e000a3: 420, // kln-KE
0x24200000: 421, // km
0x242000a5: 422, // km-KH
0x24900000: 423, // kn
0x24900098: 424, // kn-IN
0x24b00000: 425, // ko
0x24b000a9: 426, // ko-KP
0x24b000aa: 427, // ko-KR
0x24d00000: 428, // kok
0x24d00098: 429, // kok-IN
0x26100000: 430, // ks
0x26100098: 431, // ks-IN
0x26200000: 432, // ksb
0x2620012e: 433, // ksb-TZ
0x26400000: 434, // ksf
0x26400051: 435, // ksf-CM
0x26500000: 436, // ksh
0x2650005f: 437, // ksh-DE
0x26b00000: 438, // ku
0x27800000: 439, // kw
0x2780007a: 440, // kw-GB
0x28100000: 441, // ky
0x281000a4: 442, // ky-KG
0x28800000: 443, // lag
0x2880012e: 444, // lag-TZ
0x28c00000: 445, // lb
0x28c000b6: 446, // lb-LU
0x29a00000: 447, // lg
0x29a00130: 448, // lg-UG
0x2a600000: 449, // lkt
0x2a600134: 450, // lkt-US
0x2ac00000: 451, // ln
0x2ac00029: 452, // ln-AO
0x2ac0004a: 453, // ln-CD
0x2ac0004b: 454, // ln-CF
0x2ac0004c: 455, // ln-CG
0x2af00000: 456, // lo
0x2af000ae: 457, // lo-LA
0x2b600000: 458, // lrc
0x2b60009a: 459, // lrc-IQ
0x2b60009b: 460, // lrc-IR
0x2b700000: 461, // lt
0x2b7000b5: 462, // lt-LT
0x2b900000: 463, // lu
0x2b90004a: 464, // lu-CD
0x2bb00000: 465, // luo
0x2bb000a3: 466, // luo-KE
0x2bc00000: 467, // luy
0x2bc000a3: 468, // luy-KE
0x2be00000: 469, // lv
0x2be000b7: 470, // lv-LV
0x2c800000: 471, // mas
0x2c8000a3: 472, // mas-KE
0x2c80012e: 473, // mas-TZ
0x2e000000: 474, // mer
0x2e0000a3: 475, // mer-KE
0x2e400000: 476, // mfe
0x2e4000cb: 477, // mfe-MU
0x2e800000: 478, // mg
0x2e8000be: 479, // mg-MG
0x2e900000: 480, // mgh
0x2e9000d0: 481, // mgh-MZ
0x2eb00000: 482, // mgo
0x2eb00051: 483, // mgo-CM
0x2f600000: 484, // mk
0x2f6000c1: 485, // mk-MK
0x2fb00000: 486, // ml
0x2fb00098: 487, // ml-IN
0x30200000: 488, // mn
0x302000c4: 489, // mn-MN
0x31200000: 490, // mr
0x31200098: 491, // mr-IN
0x31600000: 492, // ms
0x3160003d: 493, // ms-BN
0x316000cf: 494, // ms-MY
0x3160010c: 495, // ms-SG
0x31700000: 496, // mt
0x317000ca: 497, // mt-MT
0x31c00000: 498, // mua
0x31c00051: 499, // mua-CM
0x32800000: 500, // my
0x328000c3: 501, // my-MM
0x33100000: 502, // mzn
0x3310009b: 503, // mzn-IR
0x33800000: 504, // nah
0x33c00000: 505, // naq
0x33c000d1: 506, // naq-NA
0x33e00000: 507, // nb
0x33e000d9: 508, // nb-NO
0x33e0010f: 509, // nb-SJ
0x34500000: 510, // nd
0x34500163: 511, // nd-ZW
0x34700000: 512, // nds
0x3470005f: 513, // nds-DE
0x347000d8: 514, // nds-NL
0x34800000: 515, // ne
0x34800098: 516, // ne-IN
0x348000da: 517, // ne-NP
0x35e00000: 518, // nl
0x35e0002f: 519, // nl-AW
0x35e00035: 520, // nl-BE
0x35e0003f: 521, // nl-BQ
0x35e0005a: 522, // nl-CW
0x35e000d8: 523, // nl-NL
0x35e00115: 524, // nl-SR
0x35e0011a: 525, // nl-SX
0x35f00000: 526, // nmg
0x35f00051: 527, // nmg-CM
0x36100000: 528, // nn
0x361000d9: 529, // nn-NO
0x36300000: 530, // nnh
0x36300051: 531, // nnh-CM
0x36600000: 532, // no
0x36c00000: 533, // nqo
0x36d00000: 534, // nr
0x37100000: 535, // nso
0x37700000: 536, // nus
0x37700116: 537, // nus-SS
0x37e00000: 538, // ny
0x38000000: 539, // nyn
0x38000130: 540, // nyn-UG
0x38700000: 541, // om
0x3870006e: 542, // om-ET
0x387000a3: 543, // om-KE
0x38c00000: 544, // or
0x38c00098: 545, // or-IN
0x38f00000: 546, // os
0x38f0007c: 547, // os-GE
0x38f00105: 548, // os-RU
0x39400000: 549, // pa
0x39405000: 550, // pa-Arab
0x394050e7: 551, // pa-Arab-PK
0x3942f000: 552, // pa-Guru
0x3942f098: 553, // pa-Guru-IN
0x39800000: 554, // pap
0x3aa00000: 555, // pl
0x3aa000e8: 556, // pl-PL
0x3b400000: 557, // prg
0x3b400001: 558, // prg-001
0x3b500000: 559, // ps
0x3b500023: 560, // ps-AF
0x3b700000: 561, // pt
0x3b700029: 562, // pt-AO
0x3b700040: 563, // pt-BR
0x3b70004d: 564, // pt-CH
0x3b700059: 565, // pt-CV
0x3b700085: 566, // pt-GQ
0x3b70008a: 567, // pt-GW
0x3b7000b6: 568, // pt-LU
0x3b7000c5: 569, // pt-MO
0x3b7000d0: 570, // pt-MZ
0x3b7000ed: 571, // pt-PT
0x3b700117: 572, // pt-ST
0x3b700125: 573, // pt-TL
0x3bb00000: 574, // qu
0x3bb0003e: 575, // qu-BO
0x3bb00068: 576, // qu-EC
0x3bb000e3: 577, // qu-PE
0x3cb00000: 578, // rm
0x3cb0004d: 579, // rm-CH
0x3d000000: 580, // rn
0x3d000039: 581, // rn-BI
0x3d300000: 582, // ro
0x3d3000bb: 583, // ro-MD
0x3d300103: 584, // ro-RO
0x3d500000: 585, // rof
0x3d50012e: 586, // rof-TZ
0x3d900000: 587, // ru
0x3d900046: 588, // ru-BY
0x3d9000a4: 589, // ru-KG
0x3d9000ad: 590, // ru-KZ
0x3d9000bb: 591, // ru-MD
0x3d900105: 592, // ru-RU
0x3d90012f: 593, // ru-UA
0x3dc00000: 594, // rw
0x3dc00106: 595, // rw-RW
0x3dd00000: 596, // rwk
0x3dd0012e: 597, // rwk-TZ
0x3e200000: 598, // sah
0x3e200105: 599, // sah-RU
0x3e300000: 600, // saq
0x3e3000a3: 601, // saq-KE
0x3e900000: 602, // sbp
0x3e90012e: 603, // sbp-TZ
0x3f200000: 604, // sdh
0x3f300000: 605, // se
0x3f300071: 606, // se-FI
0x3f3000d9: 607, // se-NO
0x3f30010b: 608, // se-SE
0x3f500000: 609, // seh
0x3f5000d0: 610, // seh-MZ
0x3f700000: 611, // ses
0x3f7000c2: 612, // ses-ML
0x3f800000: 613, // sg
0x3f80004b: 614, // sg-CF
0x3fe00000: 615, // shi
0x3fe52000: 616, // shi-Latn
0x3fe520b9: 617, // shi-Latn-MA
0x3fed2000: 618, // shi-Tfng
0x3fed20b9: 619, // shi-Tfng-MA
0x40200000: 620, // si
0x402000b2: 621, // si-LK
0x40800000: 622, // sk
0x40800110: 623, // sk-SK
0x40c00000: 624, // sl
0x40c0010e: 625, // sl-SI
0x41200000: 626, // sma
0x41300000: 627, // smi
0x41400000: 628, // smj
0x41500000: 629, // smn
0x41500071: 630, // smn-FI
0x41800000: 631, // sms
0x41900000: 632, // sn
0x41900163: 633, // sn-ZW
0x41f00000: 634, // so
0x41f00061: 635, // so-DJ
0x41f0006e: 636, // so-ET
0x41f000a3: 637, // so-KE
0x41f00114: 638, // so-SO
0x42700000: 639, // sq
0x42700026: 640, // sq-AL
0x427000c1: 641, // sq-MK
0x4270014c: 642, // sq-XK
0x42800000: 643, // sr
0x4281e000: 644, // sr-Cyrl
0x4281e032: 645, // sr-Cyrl-BA
0x4281e0bc: 646, // sr-Cyrl-ME
0x4281e104: 647, // sr-Cyrl-RS
0x4281e14c: 648, // sr-Cyrl-XK
0x42852000: 649, // sr-Latn
0x42852032: 650, // sr-Latn-BA
0x428520bc: 651, // sr-Latn-ME
0x42852104: 652, // sr-Latn-RS
0x4285214c: 653, // sr-Latn-XK
0x42d00000: 654, // ss
0x43000000: 655, // ssy
0x43100000: 656, // st
0x43a00000: 657, // sv
0x43a00030: 658, // sv-AX
0x43a00071: 659, // sv-FI
0x43a0010b: 660, // sv-SE
0x43b00000: 661, // sw
0x43b0004a: 662, // sw-CD
0x43b000a3: 663, // sw-KE
0x43b0012e: 664, // sw-TZ
0x43b00130: 665, // sw-UG
0x44400000: 666, // syr
0x44600000: 667, // ta
0x44600098: 668, // ta-IN
0x446000b2: 669, // ta-LK
0x446000cf: 670, // ta-MY
0x4460010c: 671, // ta-SG
0x45700000: 672, // te
0x45700098: 673, // te-IN
0x45a00000: 674, // teo
0x45a000a3: 675, // teo-KE
0x45a00130: 676, // teo-UG
0x46100000: 677, // th
0x46100122: 678, // th-TH
0x46500000: 679, // ti
0x4650006c: 680, // ti-ER
0x4650006e: 681, // ti-ET
0x46700000: 682, // tig
0x46c00000: 683, // tk
0x46c00126: 684, // tk-TM
0x47600000: 685, // tn
0x47800000: 686, // to
0x47800128: 687, // to-TO
0x48000000: 688, // tr
0x4800005c: 689, // tr-CY
0x4800012a: 690, // tr-TR
0x48400000: 691, // ts
0x49a00000: 692, // twq
0x49a000d3: 693, // twq-NE
0x49f00000: 694, // tzm
0x49f000b9: 695, // tzm-MA
0x4a200000: 696, // ug
0x4a200052: 697, // ug-CN
0x4a400000: 698, // uk
0x4a40012f: 699, // uk-UA
0x4aa00000: 700, // ur
0x4aa00098: 701, // ur-IN
0x4aa000e7: 702, // ur-PK
0x4b200000: 703, // uz
0x4b205000: 704, // uz-Arab
0x4b205023: 705, // uz-Arab-AF
0x4b21e000: 706, // uz-Cyrl
0x4b21e136: 707, // uz-Cyrl-UZ
0x4b252000: 708, // uz-Latn
0x4b252136: 709, // uz-Latn-UZ
0x4b400000: 710, // vai
0x4b452000: 711, // vai-Latn
0x4b4520b3: 712, // vai-Latn-LR
0x4b4d9000: 713, // vai-Vaii
0x4b4d90b3: 714, // vai-Vaii-LR
0x4b600000: 715, // ve
0x4b900000: 716, // vi
0x4b90013d: 717, // vi-VN
0x4bf00000: 718, // vo
0x4bf00001: 719, // vo-001
0x4c200000: 720, // vun
0x4c20012e: 721, // vun-TZ
0x4c400000: 722, // wa
0x4c500000: 723, // wae
0x4c50004d: 724, // wae-CH
0x4db00000: 725, // wo
0x4e800000: 726, // xh
0x4f100000: 727, // xog
0x4f100130: 728, // xog-UG
0x4ff00000: 729, // yav
0x4ff00051: 730, // yav-CM
0x50800000: 731, // yi
0x50800001: 732, // yi-001
0x50e00000: 733, // yo
0x50e0003a: 734, // yo-BJ
0x50e000d5: 735, // yo-NG
0x51500000: 736, // yue
0x5150008c: 737, // yue-HK
0x51e00000: 738, // zgh
0x51e000b9: 739, // zgh-MA
0x51f00000: 740, // zh
0x51f34000: 741, // zh-Hans
0x51f34052: 742, // zh-Hans-CN
0x51f3408c: 743, // zh-Hans-HK
0x51f340c5: 744, // zh-Hans-MO
0x51f3410c: 745, // zh-Hans-SG
0x51f35000: 746, // zh-Hant
0x51f3508c: 747, // zh-Hant-HK
0x51f350c5: 748, // zh-Hant-MO
0x51f3512d: 749, // zh-Hant-TW
0x52400000: 750, // zu
0x52400160: 751, // zu-ZA
}
// Total table size 4580 bytes (4KiB); checksum: A7F72A2A

975
vendor/golang.org/x/text/language/language.go generated vendored Normal file
View file

@ -0,0 +1,975 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run maketables.go gen_common.go -output tables.go
//go:generate go run gen_index.go
// Package language implements BCP 47 language tags and related functionality.
//
// The Tag type, which is used to represent languages, is agnostic to the
// meaning of its subtags. Tags are not fully canonicalized to preserve
// information that may be valuable in certain contexts. As a consequence, two
// different tags may represent identical languages.
//
// Initializing language- or locale-specific components usually consists of
// two steps. The first step is to select a display language based on the
// preferred languages of the user and the languages supported by an application.
// The second step is to create the language-specific services based on
// this selection. Each is discussed in more details below.
//
// Matching preferred against supported languages
//
// An application may support various languages. This list is typically limited
// by the languages for which there exists translations of the user interface.
// Similarly, a user may provide a list of preferred languages which is limited
// by the languages understood by this user.
// An application should use a Matcher to find the best supported language based
// on the user's preferred list.
// Matchers are aware of the intricacies of equivalence between languages.
// The default Matcher implementation takes into account things such as
// deprecated subtags, legacy tags, and mutual intelligibility between scripts
// and languages.
//
// A Matcher for English, Australian English, Danish, and standard Mandarin can
// be defined as follows:
//
// var matcher = language.NewMatcher([]language.Tag{
// language.English, // The first language is used as fallback.
// language.MustParse("en-AU"),
// language.Danish,
// language.Chinese,
// })
//
// The following code selects the best match for someone speaking Spanish and
// Norwegian:
//
// preferred := []language.Tag{ language.Spanish, language.Norwegian }
// tag, _, _ := matcher.Match(preferred...)
//
// In this case, the best match is Danish, as Danish is sufficiently a match to
// Norwegian to not have to fall back to the default.
// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header.
//
// Selecting language-specific services
//
// One should always use the Tag returned by the Matcher to create an instance
// of any of the language-specific services provided by the text repository.
// This prevents the mixing of languages, such as having a different language for
// messages and display names, as well as improper casing or sorting order for
// the selected language.
// Using the returned Tag also allows user-defined settings, such as collation
// order or numbering system to be transparently passed as options.
//
// If you have language-specific data in your application, however, it will in
// most cases suffice to use the index returned by the matcher to identify
// the user language.
// The following loop provides an alternative in case this is not sufficient:
//
// supported := map[language.Tag]data{
// language.English: enData,
// language.MustParse("en-AU"): enAUData,
// language.Danish: daData,
// language.Chinese: zhData,
// }
// tag, _, _ := matcher.Match(preferred...)
// for ; tag != language.Und; tag = tag.Parent() {
// if v, ok := supported[tag]; ok {
// return v
// }
// }
// return enData // should not reach here
//
// Repeatedly taking the Parent of the tag returned by Match will eventually
// match one of the tags used to initialize the Matcher.
//
// Canonicalization
//
// By default, only legacy and deprecated tags are converted into their
// canonical equivalent. All other information is preserved. This approach makes
// the confidence scores more accurate and allows matchers to distinguish
// between variants that are otherwise lost.
//
// As a consequence, two tags that should be treated as identical according to
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
// Matchers will handle such distinctions, though, and are aware of the
// equivalence relations. The CanonType type can be used to alter the
// canonicalization form.
//
// References
//
// BCP 47 - Tags for Identifying Languages
// http://tools.ietf.org/html/bcp47
package language // import "golang.org/x/text/language"
// TODO: Remove above NOTE after:
// - verifying that tables are dropped correctly (most notably matcher tables).
import (
"errors"
"fmt"
"strings"
)
const (
// maxCoreSize is the maximum size of a BCP 47 tag without variants and
// extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
maxCoreSize = 12
// max99thPercentileSize is a somewhat arbitrary buffer size that presumably
// is large enough to hold at least 99% of the BCP 47 tags.
max99thPercentileSize = 32
// maxSimpleUExtensionSize is the maximum size of a -u extension with one
// key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
maxSimpleUExtensionSize = 14
)
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
// specific language or locale. All language tag values are guaranteed to be
// well-formed.
type Tag struct {
lang langID
region regionID
script scriptID
pVariant byte // offset in str, includes preceding '-'
pExt uint16 // offset of first extension, includes preceding '-'
// str is the string representation of the Tag. It will only be used if the
// tag has variants or extensions.
str string
}
// Make is a convenience wrapper for Parse that omits the error.
// In case of an error, a sensible default is returned.
func Make(s string) Tag {
return Default.Make(s)
}
// Make is a convenience wrapper for c.Parse that omits the error.
// In case of an error, a sensible default is returned.
func (c CanonType) Make(s string) Tag {
t, _ := c.Parse(s)
return t
}
// Raw returns the raw base language, script and region, without making an
// attempt to infer their values.
func (t Tag) Raw() (b Base, s Script, r Region) {
return Base{t.lang}, Script{t.script}, Region{t.region}
}
// equalTags compares language, script and region subtags only.
func (t Tag) equalTags(a Tag) bool {
return t.lang == a.lang && t.script == a.script && t.region == a.region
}
// IsRoot returns true if t is equal to language "und".
func (t Tag) IsRoot() bool {
if int(t.pVariant) < len(t.str) {
return false
}
return t.equalTags(und)
}
// private reports whether the Tag consists solely of a private use tag.
func (t Tag) private() bool {
return t.str != "" && t.pVariant == 0
}
// CanonType can be used to enable or disable various types of canonicalization.
type CanonType int
const (
// Replace deprecated base languages with their preferred replacements.
DeprecatedBase CanonType = 1 << iota
// Replace deprecated scripts with their preferred replacements.
DeprecatedScript
// Replace deprecated regions with their preferred replacements.
DeprecatedRegion
// Remove redundant scripts.
SuppressScript
// Normalize legacy encodings. This includes legacy languages defined in
// CLDR as well as bibliographic codes defined in ISO-639.
Legacy
// Map the dominant language of a macro language group to the macro language
// subtag. For example cmn -> zh.
Macro
// The CLDR flag should be used if full compatibility with CLDR is required.
// There are a few cases where language.Tag may differ from CLDR. To follow all
// of CLDR's suggestions, use All|CLDR.
CLDR
// Raw can be used to Compose or Parse without Canonicalization.
Raw CanonType = 0
// Replace all deprecated tags with their preferred replacements.
Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
// All canonicalizations recommended by BCP 47.
BCP47 = Deprecated | SuppressScript
// All canonicalizations.
All = BCP47 | Legacy | Macro
// Default is the canonicalization used by Parse, Make and Compose. To
// preserve as much information as possible, canonicalizations that remove
// potentially valuable information are not included. The Matcher is
// designed to recognize similar tags that would be the same if
// they were canonicalized using All.
Default = Deprecated | Legacy
canonLang = DeprecatedBase | Legacy | Macro
// TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
)
// canonicalize returns the canonicalized equivalent of the tag and
// whether there was any change.
func (t Tag) canonicalize(c CanonType) (Tag, bool) {
if c == Raw {
return t, false
}
changed := false
if c&SuppressScript != 0 {
if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] {
t.script = 0
changed = true
}
}
if c&canonLang != 0 {
for {
if l, aliasType := normLang(t.lang); l != t.lang {
switch aliasType {
case langLegacy:
if c&Legacy != 0 {
if t.lang == _sh && t.script == 0 {
t.script = _Latn
}
t.lang = l
changed = true
}
case langMacro:
if c&Macro != 0 {
// We deviate here from CLDR. The mapping "nb" -> "no"
// qualifies as a typical Macro language mapping. However,
// for legacy reasons, CLDR maps "no", the macro language
// code for Norwegian, to the dominant variant "nb". This
// change is currently under consideration for CLDR as well.
// See http://unicode.org/cldr/trac/ticket/2698 and also
// http://unicode.org/cldr/trac/ticket/1790 for some of the
// practical implications. TODO: this check could be removed
// if CLDR adopts this change.
if c&CLDR == 0 || t.lang != _nb {
changed = true
t.lang = l
}
}
case langDeprecated:
if c&DeprecatedBase != 0 {
if t.lang == _mo && t.region == 0 {
t.region = _MD
}
t.lang = l
changed = true
// Other canonicalization types may still apply.
continue
}
}
} else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 {
t.lang = _nb
changed = true
}
break
}
}
if c&DeprecatedScript != 0 {
if t.script == _Qaai {
changed = true
t.script = _Zinh
}
}
if c&DeprecatedRegion != 0 {
if r := normRegion(t.region); r != 0 {
changed = true
t.region = r
}
}
return t, changed
}
// Canonicalize returns the canonicalized equivalent of the tag.
func (c CanonType) Canonicalize(t Tag) (Tag, error) {
t, changed := t.canonicalize(c)
if changed {
t.remakeString()
}
return t, nil
}
// Confidence indicates the level of certainty for a given return value.
// For example, Serbian may be written in Cyrillic or Latin script.
// The confidence level indicates whether a value was explicitly specified,
// whether it is typically the only possible value, or whether there is
// an ambiguity.
type Confidence int
const (
No Confidence = iota // full confidence that there was no match
Low // most likely value picked out of a set of alternatives
High // value is generally assumed to be the correct match
Exact // exact match or explicitly specified value
)
var confName = []string{"No", "Low", "High", "Exact"}
func (c Confidence) String() string {
return confName[c]
}
// remakeString is used to update t.str in case lang, script or region changed.
// It is assumed that pExt and pVariant still point to the start of the
// respective parts.
func (t *Tag) remakeString() {
if t.str == "" {
return
}
extra := t.str[t.pVariant:]
if t.pVariant > 0 {
extra = extra[1:]
}
if t.equalTags(und) && strings.HasPrefix(extra, "x-") {
t.str = extra
t.pVariant = 0
t.pExt = 0
return
}
var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
b := buf[:t.genCoreBytes(buf[:])]
if extra != "" {
diff := len(b) - int(t.pVariant)
b = append(b, '-')
b = append(b, extra...)
t.pVariant = uint8(int(t.pVariant) + diff)
t.pExt = uint16(int(t.pExt) + diff)
} else {
t.pVariant = uint8(len(b))
t.pExt = uint16(len(b))
}
t.str = string(b)
}
// genCoreBytes writes a string for the base languages, script and region tags
// to the given buffer and returns the number of bytes written. It will never
// write more than maxCoreSize bytes.
func (t *Tag) genCoreBytes(buf []byte) int {
n := t.lang.stringToBuf(buf[:])
if t.script != 0 {
n += copy(buf[n:], "-")
n += copy(buf[n:], t.script.String())
}
if t.region != 0 {
n += copy(buf[n:], "-")
n += copy(buf[n:], t.region.String())
}
return n
}
// String returns the canonical string representation of the language tag.
func (t Tag) String() string {
if t.str != "" {
return t.str
}
if t.script == 0 && t.region == 0 {
return t.lang.String()
}
buf := [maxCoreSize]byte{}
return string(buf[:t.genCoreBytes(buf[:])])
}
// Base returns the base language of the language tag. If the base language is
// unspecified, an attempt will be made to infer it from the context.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
func (t Tag) Base() (Base, Confidence) {
if t.lang != 0 {
return Base{t.lang}, Exact
}
c := High
if t.script == 0 && !(Region{t.region}).IsCountry() {
c = Low
}
if tag, err := addTags(t); err == nil && tag.lang != 0 {
return Base{tag.lang}, c
}
return Base{0}, No
}
// Script infers the script for the language tag. If it was not explicitly given, it will infer
// a most likely candidate.
// If more than one script is commonly used for a language, the most likely one
// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
// for Serbian.
// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
// Note that an inferred script is never guaranteed to be the correct one. Latin is
// almost exclusively used for Afrikaans, but Arabic has been used for some texts
// in the past. Also, the script that is commonly used may change over time.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
func (t Tag) Script() (Script, Confidence) {
if t.script != 0 {
return Script{t.script}, Exact
}
sc, c := scriptID(_Zzzz), No
if t.lang < langNoIndexOffset {
if scr := scriptID(suppressScript[t.lang]); scr != 0 {
// Note: it is not always the case that a language with a suppress
// script value is only written in one script (e.g. kk, ms, pa).
if t.region == 0 {
return Script{scriptID(scr)}, High
}
sc, c = scr, High
}
}
if tag, err := addTags(t); err == nil {
if tag.script != sc {
sc, c = tag.script, Low
}
} else {
t, _ = (Deprecated | Macro).Canonicalize(t)
if tag, err := addTags(t); err == nil && tag.script != sc {
sc, c = tag.script, Low
}
}
return Script{sc}, c
}
// Region returns the region for the language tag. If it was not explicitly given, it will
// infer a most likely candidate from the context.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
func (t Tag) Region() (Region, Confidence) {
if t.region != 0 {
return Region{t.region}, Exact
}
if t, err := addTags(t); err == nil {
return Region{t.region}, Low // TODO: differentiate between high and low.
}
t, _ = (Deprecated | Macro).Canonicalize(t)
if tag, err := addTags(t); err == nil {
return Region{tag.region}, Low
}
return Region{_ZZ}, No // TODO: return world instead of undetermined?
}
// Variant returns the variants specified explicitly for this language tag.
// or nil if no variant was specified.
func (t Tag) Variants() []Variant {
v := []Variant{}
if int(t.pVariant) < int(t.pExt) {
for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; {
x, str = nextToken(str)
v = append(v, Variant{x})
}
}
return v
}
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
// specific language are substituted with fields from the parent language.
// The parent for a language may change for newer versions of CLDR.
func (t Tag) Parent() Tag {
if t.str != "" {
// Strip the variants and extensions.
t, _ = Raw.Compose(t.Raw())
if t.region == 0 && t.script != 0 && t.lang != 0 {
base, _ := addTags(Tag{lang: t.lang})
if base.script == t.script {
return Tag{lang: t.lang}
}
}
return t
}
if t.lang != 0 {
if t.region != 0 {
maxScript := t.script
if maxScript == 0 {
max, _ := addTags(t)
maxScript = max.script
}
for i := range parents {
if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript {
for _, r := range parents[i].fromRegion {
if regionID(r) == t.region {
return Tag{
lang: t.lang,
script: scriptID(parents[i].script),
region: regionID(parents[i].toRegion),
}
}
}
}
}
// Strip the script if it is the default one.
base, _ := addTags(Tag{lang: t.lang})
if base.script != maxScript {
return Tag{lang: t.lang, script: maxScript}
}
return Tag{lang: t.lang}
} else if t.script != 0 {
// The parent for an base-script pair with a non-default script is
// "und" instead of the base language.
base, _ := addTags(Tag{lang: t.lang})
if base.script != t.script {
return und
}
return Tag{lang: t.lang}
}
}
return und
}
// returns token t and the rest of the string.
func nextToken(s string) (t, tail string) {
p := strings.Index(s[1:], "-")
if p == -1 {
return s[1:], ""
}
p++
return s[1:p], s[p:]
}
// Extension is a single BCP 47 extension.
type Extension struct {
s string
}
// String returns the string representation of the extension, including the
// type tag.
func (e Extension) String() string {
return e.s
}
// ParseExtension parses s as an extension and returns it on success.
func ParseExtension(s string) (e Extension, err error) {
scan := makeScannerString(s)
var end int
if n := len(scan.token); n != 1 {
return Extension{}, errSyntax
}
scan.toLower(0, len(scan.b))
end = parseExtension(&scan)
if end != len(s) {
return Extension{}, errSyntax
}
return Extension{string(scan.b)}, nil
}
// Type returns the one-byte extension type of e. It returns 0 for the zero
// exception.
func (e Extension) Type() byte {
if e.s == "" {
return 0
}
return e.s[0]
}
// Tokens returns the list of tokens of e.
func (e Extension) Tokens() []string {
return strings.Split(e.s, "-")
}
// Extension returns the extension of type x for tag t. It will return
// false for ok if t does not have the requested extension. The returned
// extension will be invalid in this case.
func (t Tag) Extension(x byte) (ext Extension, ok bool) {
for i := int(t.pExt); i < len(t.str)-1; {
var ext string
i, ext = getExtension(t.str, i)
if ext[0] == x {
return Extension{ext}, true
}
}
return Extension{}, false
}
// Extensions returns all extensions of t.
func (t Tag) Extensions() []Extension {
e := []Extension{}
for i := int(t.pExt); i < len(t.str)-1; {
var ext string
i, ext = getExtension(t.str, i)
e = append(e, Extension{ext})
}
return e
}
// TypeForKey returns the type associated with the given key, where key and type
// are of the allowed values defined for the Unicode locale extension ('u') in
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
// TypeForKey will traverse the inheritance chain to get the correct value.
func (t Tag) TypeForKey(key string) string {
if start, end, _ := t.findTypeForKey(key); end != start {
return t.str[start:end]
}
return ""
}
var (
errPrivateUse = errors.New("cannot set a key on a private use tag")
errInvalidArguments = errors.New("invalid key or type")
)
// SetTypeForKey returns a new Tag with the key set to type, where key and type
// are of the allowed values defined for the Unicode locale extension ('u') in
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
// An empty value removes an existing pair with the same key.
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
if t.private() {
return t, errPrivateUse
}
if len(key) != 2 {
return t, errInvalidArguments
}
// Remove the setting if value is "".
if value == "" {
start, end, _ := t.findTypeForKey(key)
if start != end {
// Remove key tag and leading '-'.
start -= 4
// Remove a possible empty extension.
if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' {
start -= 2
}
if start == int(t.pVariant) && end == len(t.str) {
t.str = ""
t.pVariant, t.pExt = 0, 0
} else {
t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
}
}
return t, nil
}
if len(value) < 3 || len(value) > 8 {
return t, errInvalidArguments
}
var (
buf [maxCoreSize + maxSimpleUExtensionSize]byte
uStart int // start of the -u extension.
)
// Generate the tag string if needed.
if t.str == "" {
uStart = t.genCoreBytes(buf[:])
buf[uStart] = '-'
uStart++
}
// Create new key-type pair and parse it to verify.
b := buf[uStart:]
copy(b, "u-")
copy(b[2:], key)
b[4] = '-'
b = b[:5+copy(b[5:], value)]
scan := makeScanner(b)
if parseExtensions(&scan); scan.err != nil {
return t, scan.err
}
// Assemble the replacement string.
if t.str == "" {
t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
t.str = string(buf[:uStart+len(b)])
} else {
s := t.str
start, end, hasExt := t.findTypeForKey(key)
if start == end {
if hasExt {
b = b[2:]
}
t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:])
} else {
t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:])
}
}
return t, nil
}
// findKeyAndType returns the start and end position for the type corresponding
// to key or the point at which to insert the key-value pair if the type
// wasn't found. The hasExt return value reports whether an -u extension was present.
// Note: the extensions are typically very small and are likely to contain
// only one key-type pair.
func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) {
p := int(t.pExt)
if len(key) != 2 || p == len(t.str) || p == 0 {
return p, p, false
}
s := t.str
// Find the correct extension.
for p++; s[p] != 'u'; p++ {
if s[p] > 'u' {
p--
return p, p, false
}
if p = nextExtension(s, p); p == len(s) {
return len(s), len(s), false
}
}
// Proceed to the hyphen following the extension name.
p++
// curKey is the key currently being processed.
curKey := ""
// Iterate over keys until we get the end of a section.
for {
// p points to the hyphen preceding the current token.
if p3 := p + 3; s[p3] == '-' {
// Found a key.
// Check whether we just processed the key that was requested.
if curKey == key {
return start, p, true
}
// Set to the next key and continue scanning type tokens.
curKey = s[p+1 : p3]
if curKey > key {
return p, p, true
}
// Start of the type token sequence.
start = p + 4
// A type is at least 3 characters long.
p += 7 // 4 + 3
} else {
// Attribute or type, which is at least 3 characters long.
p += 4
}
// p points past the third character of a type or attribute.
max := p + 5 // maximum length of token plus hyphen.
if len(s) < max {
max = len(s)
}
for ; p < max && s[p] != '-'; p++ {
}
// Bail if we have exhausted all tokens or if the next token starts
// a new extension.
if p == len(s) || s[p+2] == '-' {
if curKey == key {
return start, p, true
}
return p, p, true
}
}
}
// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
// for which data exists in the text repository. The index will change over time
// and should not be stored in persistent storage. Extensions, except for the
// 'va' type of the 'u' extension, are ignored. It will return 0, false if no
// compact tag exists, where 0 is the index for the root language (Und).
func CompactIndex(t Tag) (index int, ok bool) {
// TODO: perhaps give more frequent tags a lower index.
// TODO: we could make the indexes stable. This will excluded some
// possibilities for optimization, so don't do this quite yet.
b, s, r := t.Raw()
if len(t.str) > 0 {
if strings.HasPrefix(t.str, "x-") {
// We have no entries for user-defined tags.
return 0, false
}
if uint16(t.pVariant) != t.pExt {
// There are no tags with variants and an u-va type.
if t.TypeForKey("va") != "" {
return 0, false
}
t, _ = Raw.Compose(b, s, r, t.Variants())
} else if _, ok := t.Extension('u'); ok {
// Strip all but the 'va' entry.
variant := t.TypeForKey("va")
t, _ = Raw.Compose(b, s, r)
t, _ = t.SetTypeForKey("va", variant)
}
if len(t.str) > 0 {
// We have some variants.
for i, s := range specialTags {
if s == t {
return i + 1, true
}
}
return 0, false
}
}
// No variants specified: just compare core components.
// The key has the form lllssrrr, where l, s, and r are nibbles for
// respectively the langID, scriptID, and regionID.
key := uint32(b.langID) << (8 + 12)
key |= uint32(s.scriptID) << 12
key |= uint32(r.regionID)
x, ok := coreTags[key]
return int(x), ok
}
// Base is an ISO 639 language code, used for encoding the base language
// of a language tag.
type Base struct {
langID
}
// ParseBase parses a 2- or 3-letter ISO 639 code.
// It returns a ValueError if s is a well-formed but unknown language identifier
// or another error if another error occurred.
func ParseBase(s string) (Base, error) {
if n := len(s); n < 2 || 3 < n {
return Base{}, errSyntax
}
var buf [3]byte
l, err := getLangID(buf[:copy(buf[:], s)])
return Base{l}, err
}
// Script is a 4-letter ISO 15924 code for representing scripts.
// It is idiomatically represented in title case.
type Script struct {
scriptID
}
// ParseScript parses a 4-letter ISO 15924 code.
// It returns a ValueError if s is a well-formed but unknown script identifier
// or another error if another error occurred.
func ParseScript(s string) (Script, error) {
if len(s) != 4 {
return Script{}, errSyntax
}
var buf [4]byte
sc, err := getScriptID(script, buf[:copy(buf[:], s)])
return Script{sc}, err
}
// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
type Region struct {
regionID
}
// EncodeM49 returns the Region for the given UN M.49 code.
// It returns an error if r is not a valid code.
func EncodeM49(r int) (Region, error) {
rid, err := getRegionM49(r)
return Region{rid}, err
}
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
// It returns a ValueError if s is a well-formed but unknown region identifier
// or another error if another error occurred.
func ParseRegion(s string) (Region, error) {
if n := len(s); n < 2 || 3 < n {
return Region{}, errSyntax
}
var buf [3]byte
r, err := getRegionID(buf[:copy(buf[:], s)])
return Region{r}, err
}
// IsCountry returns whether this region is a country or autonomous area. This
// includes non-standard definitions from CLDR.
func (r Region) IsCountry() bool {
if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK {
return false
}
return true
}
// IsGroup returns whether this region defines a collection of regions. This
// includes non-standard definitions from CLDR.
func (r Region) IsGroup() bool {
if r.regionID == 0 {
return false
}
return int(regionInclusion[r.regionID]) < len(regionContainment)
}
// Contains returns whether Region c is contained by Region r. It returns true
// if c == r.
func (r Region) Contains(c Region) bool {
return r.regionID.contains(c.regionID)
}
func (r regionID) contains(c regionID) bool {
if r == c {
return true
}
g := regionInclusion[r]
if g >= nRegionGroups {
return false
}
m := regionContainment[g]
d := regionInclusion[c]
b := regionInclusionBits[d]
// A contained country may belong to multiple disjoint groups. Matching any
// of these indicates containment. If the contained region is a group, it
// must strictly be a subset.
if d >= nRegionGroups {
return b&m != 0
}
return b&^m == 0
}
var errNoTLD = errors.New("language: region is not a valid ccTLD")
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
// In all other cases it returns either the region itself or an error.
//
// This method may return an error for a region for which there exists a
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
// region will already be canonicalized it was obtained from a Tag that was
// obtained using any of the default methods.
func (r Region) TLD() (Region, error) {
// See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
// difference between ISO 3166-1 and IANA ccTLD.
if r.regionID == _GB {
r = Region{_UK}
}
if (r.typ() & ccTLD) == 0 {
return Region{}, errNoTLD
}
return r, nil
}
// Canonicalize returns the region or a possible replacement if the region is
// deprecated. It will not return a replacement for deprecated regions that
// are split into multiple regions.
func (r Region) Canonicalize() Region {
if cr := normRegion(r.regionID); cr != 0 {
return Region{cr}
}
return r
}
// Variant represents a registered variant of a language as defined by BCP 47.
type Variant struct {
variant string
}
// ParseVariant parses and returns a Variant. An error is returned if s is not
// a valid variant.
func ParseVariant(s string) (Variant, error) {
s = strings.ToLower(s)
if _, ok := variantIndex[s]; ok {
return Variant{s}, nil
}
return Variant{}, mkErrInvalid([]byte(s))
}
// String returns the string representation of the variant.
func (v Variant) String() string {
return v.variant
}

396
vendor/golang.org/x/text/language/lookup.go generated vendored Normal file
View file

@ -0,0 +1,396 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package language
import (
"bytes"
"fmt"
"sort"
"strconv"
"golang.org/x/text/internal/tag"
)
// findIndex tries to find the given tag in idx and returns a standardized error
// if it could not be found.
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
if !tag.FixCase(form, key) {
return 0, errSyntax
}
i := idx.Index(key)
if i == -1 {
return 0, mkErrInvalid(key)
}
return i, nil
}
func searchUint(imap []uint16, key uint16) int {
return sort.Search(len(imap), func(i int) bool {
return imap[i] >= key
})
}
type langID uint16
// getLangID returns the langID of s if s is a canonical subtag
// or langUnknown if s is not a canonical subtag.
func getLangID(s []byte) (langID, error) {
if len(s) == 2 {
return getLangISO2(s)
}
return getLangISO3(s)
}
// mapLang returns the mapped langID of id according to mapping m.
func normLang(id langID) (langID, langAliasType) {
k := sort.Search(len(langAliasMap), func(i int) bool {
return langAliasMap[i].from >= uint16(id)
})
if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) {
return langID(langAliasMap[k].to), langAliasTypes[k]
}
return id, langAliasTypeUnknown
}
// getLangISO2 returns the langID for the given 2-letter ISO language code
// or unknownLang if this does not exist.
func getLangISO2(s []byte) (langID, error) {
if !tag.FixCase("zz", s) {
return 0, errSyntax
}
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
return langID(i), nil
}
return 0, mkErrInvalid(s)
}
const base = 'z' - 'a' + 1
func strToInt(s []byte) uint {
v := uint(0)
for i := 0; i < len(s); i++ {
v *= base
v += uint(s[i] - 'a')
}
return v
}
// converts the given integer to the original ASCII string passed to strToInt.
// len(s) must match the number of characters obtained.
func intToStr(v uint, s []byte) {
for i := len(s) - 1; i >= 0; i-- {
s[i] = byte(v%base) + 'a'
v /= base
}
}
// getLangISO3 returns the langID for the given 3-letter ISO language code
// or unknownLang if this does not exist.
func getLangISO3(s []byte) (langID, error) {
if tag.FixCase("und", s) {
// first try to match canonical 3-letter entries
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
// We treat "und" as special and always translate it to "unspecified".
// Note that ZZ and Zzzz are private use and are not treated as
// unspecified by default.
id := langID(i)
if id == nonCanonicalUnd {
return 0, nil
}
return id, nil
}
}
if i := altLangISO3.Index(s); i != -1 {
return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil
}
n := strToInt(s)
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
return langID(n) + langNoIndexOffset, nil
}
// Check for non-canonical uses of ISO3.
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
return langID(i), nil
}
}
return 0, mkErrInvalid(s)
}
return 0, errSyntax
}
// stringToBuf writes the string to b and returns the number of bytes
// written. cap(b) must be >= 3.
func (id langID) stringToBuf(b []byte) int {
if id >= langNoIndexOffset {
intToStr(uint(id)-langNoIndexOffset, b[:3])
return 3
} else if id == 0 {
return copy(b, "und")
}
l := lang[id<<2:]
if l[3] == 0 {
return copy(b, l[:3])
}
return copy(b, l[:2])
}
// String returns the BCP 47 representation of the langID.
// Use b as variable name, instead of id, to ensure the variable
// used is consistent with that of Base in which this type is embedded.
func (b langID) String() string {
if b == 0 {
return "und"
} else if b >= langNoIndexOffset {
b -= langNoIndexOffset
buf := [3]byte{}
intToStr(uint(b), buf[:])
return string(buf[:])
}
l := lang.Elem(int(b))
if l[3] == 0 {
return l[:3]
}
return l[:2]
}
// ISO3 returns the ISO 639-3 language code.
func (b langID) ISO3() string {
if b == 0 || b >= langNoIndexOffset {
return b.String()
}
l := lang.Elem(int(b))
if l[3] == 0 {
return l[:3]
} else if l[2] == 0 {
return altLangISO3.Elem(int(l[3]))[:3]
}
// This allocation will only happen for 3-letter ISO codes
// that are non-canonical BCP 47 language identifiers.
return l[0:1] + l[2:4]
}
// IsPrivateUse reports whether this language code is reserved for private use.
func (b langID) IsPrivateUse() bool {
return langPrivateStart <= b && b <= langPrivateEnd
}
type regionID uint16
// getRegionID returns the region id for s if s is a valid 2-letter region code
// or unknownRegion.
func getRegionID(s []byte) (regionID, error) {
if len(s) == 3 {
if isAlpha(s[0]) {
return getRegionISO3(s)
}
if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
return getRegionM49(int(i))
}
}
return getRegionISO2(s)
}
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
// or unknownRegion if this does not exist.
func getRegionISO2(s []byte) (regionID, error) {
i, err := findIndex(regionISO, s, "ZZ")
if err != nil {
return 0, err
}
return regionID(i) + isoRegionOffset, nil
}
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
// or unknownRegion if this does not exist.
func getRegionISO3(s []byte) (regionID, error) {
if tag.FixCase("ZZZ", s) {
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
return regionID(i) + isoRegionOffset, nil
}
}
for i := 0; i < len(altRegionISO3); i += 3 {
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
return regionID(altRegionIDs[i/3]), nil
}
}
return 0, mkErrInvalid(s)
}
return 0, errSyntax
}
func getRegionM49(n int) (regionID, error) {
if 0 < n && n <= 999 {
const (
searchBits = 7
regionBits = 9
regionMask = 1<<regionBits - 1
)
idx := n >> searchBits
buf := fromM49[m49Index[idx]:m49Index[idx+1]]
val := uint16(n) << regionBits // we rely on bits shifting out
i := sort.Search(len(buf), func(i int) bool {
return buf[i] >= val
})
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
return regionID(r & regionMask), nil
}
}
var e ValueError
fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
return 0, e
}
// normRegion returns a region if r is deprecated or 0 otherwise.
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
// TODO: consider mapping split up regions to new most populous one (like CLDR).
func normRegion(r regionID) regionID {
m := regionOldMap
k := sort.Search(len(m), func(i int) bool {
return m[i].from >= uint16(r)
})
if k < len(m) && m[k].from == uint16(r) {
return regionID(m[k].to)
}
return 0
}
const (
iso3166UserAssigned = 1 << iota
ccTLD
bcp47Region
)
func (r regionID) typ() byte {
return regionTypes[r]
}
// String returns the BCP 47 representation for the region.
// It returns "ZZ" for an unspecified region.
func (r regionID) String() string {
if r < isoRegionOffset {
if r == 0 {
return "ZZ"
}
return fmt.Sprintf("%03d", r.M49())
}
r -= isoRegionOffset
return regionISO.Elem(int(r))[:2]
}
// ISO3 returns the 3-letter ISO code of r.
// Note that not all regions have a 3-letter ISO code.
// In such cases this method returns "ZZZ".
func (r regionID) ISO3() string {
if r < isoRegionOffset {
return "ZZZ"
}
r -= isoRegionOffset
reg := regionISO.Elem(int(r))
switch reg[2] {
case 0:
return altRegionISO3[reg[3]:][:3]
case ' ':
return "ZZZ"
}
return reg[0:1] + reg[2:4]
}
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
// is not defined for r.
func (r regionID) M49() int {
return int(m49[r])
}
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
// may include private-use tags that are assigned by CLDR and used in this
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
func (r regionID) IsPrivateUse() bool {
return r.typ()&iso3166UserAssigned != 0
}
type scriptID uint8
// getScriptID returns the script id for string s. It assumes that s
// is of the format [A-Z][a-z]{3}.
func getScriptID(idx tag.Index, s []byte) (scriptID, error) {
i, err := findIndex(idx, s, "Zzzz")
return scriptID(i), err
}
// String returns the script code in title case.
// It returns "Zzzz" for an unspecified script.
func (s scriptID) String() string {
if s == 0 {
return "Zzzz"
}
return script.Elem(int(s))
}
// IsPrivateUse reports whether this script code is reserved for private use.
func (s scriptID) IsPrivateUse() bool {
return _Qaaa <= s && s <= _Qabx
}
const (
maxAltTaglen = len("en-US-POSIX")
maxLen = maxAltTaglen
)
var (
// grandfatheredMap holds a mapping from legacy and grandfathered tags to
// their base language or index to more elaborate tag.
grandfatheredMap = map[[maxLen]byte]int16{
[maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
[maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
[maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
[maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
[maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
[maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
[maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
[maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
[maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
[maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
[maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
[maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
[maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
[maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
[maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
[maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
[maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
// Grandfathered tags with no modern replacement will be converted as
// follows:
[maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
[maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
[maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
[maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
[maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
// CLDR-specific tag.
[maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
[maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
}
altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
)
func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
if v, ok := grandfatheredMap[s]; ok {
if v < 0 {
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
}
t.lang = langID(v)
return t, true
}
return t, false
}

1648
vendor/golang.org/x/text/language/maketables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

841
vendor/golang.org/x/text/language/match.go generated vendored Normal file
View file

@ -0,0 +1,841 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package language
import "errors"
// Matcher is the interface that wraps the Match method.
//
// Match returns the best match for any of the given tags, along with
// a unique index associated with the returned tag and a confidence
// score.
type Matcher interface {
Match(t ...Tag) (tag Tag, index int, c Confidence)
}
// Comprehends reports the confidence score for a speaker of a given language
// to being able to comprehend the written form of an alternative language.
func Comprehends(speaker, alternative Tag) Confidence {
_, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
return c
}
// NewMatcher returns a Matcher that matches an ordered list of preferred tags
// against a list of supported tags based on written intelligibility, closeness
// of dialect, equivalence of subtags and various other rules. It is initialized
// with the list of supported tags. The first element is used as the default
// value in case no match is found.
//
// Its Match method matches the first of the given Tags to reach a certain
// confidence threshold. The tags passed to Match should therefore be specified
// in order of preference. Extensions are ignored for matching.
//
// The index returned by the Match method corresponds to the index of the
// matched tag in t, but is augmented with the Unicode extension ('u')of the
// corresponding preferred tag. This allows user locale options to be passed
// transparently.
func NewMatcher(t []Tag) Matcher {
return newMatcher(t)
}
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
match, w, c := m.getBest(want...)
if match == nil {
t = m.default_.tag
} else {
t, index = match.tag, match.index
}
// Copy options from the user-provided tag into the result tag. This is hard
// to do after the fact, so we do it here.
// TODO: consider also adding in variants that are compatible with the
// matched language.
// TODO: Add back region if it is non-ambiguous? Or create another tag to
// preserve the region?
if u, ok := w.Extension('u'); ok {
t, _ = Raw.Compose(t, u)
}
return t, index, c
}
type scriptRegionFlags uint8
const (
isList = 1 << iota
scriptInFrom
regionInFrom
)
func (t *Tag) setUndefinedLang(id langID) {
if t.lang == 0 {
t.lang = id
}
}
func (t *Tag) setUndefinedScript(id scriptID) {
if t.script == 0 {
t.script = id
}
}
func (t *Tag) setUndefinedRegion(id regionID) {
if t.region == 0 || t.region.contains(id) {
t.region = id
}
}
// ErrMissingLikelyTagsData indicates no information was available
// to compute likely values of missing tags.
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
// addLikelySubtags sets subtags to their most likely value, given the locale.
// In most cases this means setting fields for unknown values, but in some
// cases it may alter a value. It returns a ErrMissingLikelyTagsData error
// if the given locale cannot be expanded.
func (t Tag) addLikelySubtags() (Tag, error) {
id, err := addTags(t)
if err != nil {
return t, err
} else if id.equalTags(t) {
return t, nil
}
id.remakeString()
return id, nil
}
// specializeRegion attempts to specialize a group region.
func specializeRegion(t *Tag) bool {
if i := regionInclusion[t.region]; i < nRegionGroups {
x := likelyRegionGroup[i]
if langID(x.lang) == t.lang && scriptID(x.script) == t.script {
t.region = regionID(x.region)
}
return true
}
return false
}
func addTags(t Tag) (Tag, error) {
// We leave private use identifiers alone.
if t.private() {
return t, nil
}
if t.script != 0 && t.region != 0 {
if t.lang != 0 {
// already fully specified
specializeRegion(&t)
return t, nil
}
// Search matches for und-script-region. Note that for these cases
// region will never be a group so there is no need to check for this.
list := likelyRegion[t.region : t.region+1]
if x := list[0]; x.flags&isList != 0 {
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
}
for _, x := range list {
// Deviating from the spec. See match_test.go for details.
if scriptID(x.script) == t.script {
t.setUndefinedLang(langID(x.lang))
return t, nil
}
}
}
if t.lang != 0 {
// Search matches for lang-script and lang-region, where lang != und.
if t.lang < langNoIndexOffset {
x := likelyLang[t.lang]
if x.flags&isList != 0 {
list := likelyLangList[x.region : x.region+uint16(x.script)]
if t.script != 0 {
for _, x := range list {
if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 {
t.setUndefinedRegion(regionID(x.region))
return t, nil
}
}
} else if t.region != 0 {
count := 0
goodScript := true
tt := t
for _, x := range list {
// We visit all entries for which the script was not
// defined, including the ones where the region was not
// defined. This allows for proper disambiguation within
// regions.
if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) {
tt.region = regionID(x.region)
tt.setUndefinedScript(scriptID(x.script))
goodScript = goodScript && tt.script == scriptID(x.script)
count++
}
}
if count == 1 {
return tt, nil
}
// Even if we fail to find a unique Region, we might have
// an unambiguous script.
if goodScript {
t.script = tt.script
}
}
}
}
} else {
// Search matches for und-script.
if t.script != 0 {
x := likelyScript[t.script]
if x.region != 0 {
t.setUndefinedRegion(regionID(x.region))
t.setUndefinedLang(langID(x.lang))
return t, nil
}
}
// Search matches for und-region. If und-script-region exists, it would
// have been found earlier.
if t.region != 0 {
if i := regionInclusion[t.region]; i < nRegionGroups {
x := likelyRegionGroup[i]
if x.region != 0 {
t.setUndefinedLang(langID(x.lang))
t.setUndefinedScript(scriptID(x.script))
t.region = regionID(x.region)
}
} else {
x := likelyRegion[t.region]
if x.flags&isList != 0 {
x = likelyRegionList[x.lang]
}
if x.script != 0 && x.flags != scriptInFrom {
t.setUndefinedLang(langID(x.lang))
t.setUndefinedScript(scriptID(x.script))
return t, nil
}
}
}
}
// Search matches for lang.
if t.lang < langNoIndexOffset {
x := likelyLang[t.lang]
if x.flags&isList != 0 {
x = likelyLangList[x.region]
}
if x.region != 0 {
t.setUndefinedScript(scriptID(x.script))
t.setUndefinedRegion(regionID(x.region))
}
specializeRegion(&t)
if t.lang == 0 {
t.lang = _en // default language
}
return t, nil
}
return t, ErrMissingLikelyTagsData
}
func (t *Tag) setTagsFrom(id Tag) {
t.lang = id.lang
t.script = id.script
t.region = id.region
}
// minimize removes the region or script subtags from t such that
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
func (t Tag) minimize() (Tag, error) {
t, err := minimizeTags(t)
if err != nil {
return t, err
}
t.remakeString()
return t, nil
}
// minimizeTags mimics the behavior of the ICU 51 C implementation.
func minimizeTags(t Tag) (Tag, error) {
if t.equalTags(und) {
return t, nil
}
max, err := addTags(t)
if err != nil {
return t, err
}
for _, id := range [...]Tag{
{lang: t.lang},
{lang: t.lang, region: t.region},
{lang: t.lang, script: t.script},
} {
if x, err := addTags(id); err == nil && max.equalTags(x) {
t.setTagsFrom(id)
break
}
}
return t, nil
}
// Tag Matching
// CLDR defines an algorithm for finding the best match between two sets of language
// tags. The basic algorithm defines how to score a possible match and then find
// the match with the best score
// (see http://www.unicode.org/reports/tr35/#LanguageMatching).
// Using scoring has several disadvantages. The scoring obfuscates the importance of
// the various factors considered, making the algorithm harder to understand. Using
// scoring also requires the full score to be computed for each pair of tags.
//
// We will use a different algorithm which aims to have the following properties:
// - clarity on the precedence of the various selection factors, and
// - improved performance by allowing early termination of a comparison.
//
// Matching algorithm (overview)
// Input:
// - supported: a set of supported tags
// - default: the default tag to return in case there is no match
// - desired: list of desired tags, ordered by preference, starting with
// the most-preferred.
//
// Algorithm:
// 1) Set the best match to the lowest confidence level
// 2) For each tag in "desired":
// a) For each tag in "supported":
// 1) compute the match between the two tags.
// 2) if the match is better than the previous best match, replace it
// with the new match. (see next section)
// b) if the current best match is above a certain threshold, return this
// match without proceeding to the next tag in "desired". [See Note 1]
// 3) If the best match so far is below a certain threshold, return "default".
//
// Ranking:
// We use two phases to determine whether one pair of tags are a better match
// than another pair of tags. First, we determine a rough confidence level. If the
// levels are different, the one with the highest confidence wins.
// Second, if the rough confidence levels are identical, we use a set of tie-breaker
// rules.
//
// The confidence level of matching a pair of tags is determined by finding the
// lowest confidence level of any matches of the corresponding subtags (the
// result is deemed as good as its weakest link).
// We define the following levels:
// Exact - An exact match of a subtag, before adding likely subtags.
// MaxExact - An exact match of a subtag, after adding likely subtags.
// [See Note 2].
// High - High level of mutual intelligibility between different subtag
// variants.
// Low - Low level of mutual intelligibility between different subtag
// variants.
// No - No mutual intelligibility.
//
// The following levels can occur for each type of subtag:
// Base: Exact, MaxExact, High, Low, No
// Script: Exact, MaxExact [see Note 3], Low, No
// Region: Exact, MaxExact, High
// Variant: Exact, High
// Private: Exact, No
//
// Any result with a confidence level of Low or higher is deemed a possible match.
// Once a desired tag matches any of the supported tags with a level of MaxExact
// or higher, the next desired tag is not considered (see Step 2.b).
// Note that CLDR provides languageMatching data that defines close equivalence
// classes for base languages, scripts and regions.
//
// Tie-breaking
// If we get the same confidence level for two matches, we apply a sequence of
// tie-breaking rules. The first that succeeds defines the result. The rules are
// applied in the following order.
// 1) Original language was defined and was identical.
// 2) Original region was defined and was identical.
// 3) Distance between two maximized regions was the smallest.
// 4) Original script was defined and was identical.
// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
// If there is still no winner after these rules are applied, the first match
// found wins.
//
// Notes:
// [1] Note that even if we may not have a perfect match, if a match is above a
// certain threshold, it is considered a better match than any other match
// to a tag later in the list of preferred language tags.
// [2] In practice, as matching of Exact is done in a separate phase from
// matching the other levels, we reuse the Exact level to mean MaxExact in
// the second phase. As a consequence, we only need the levels defined by
// the Confidence type. The MaxExact confidence level is mapped to High in
// the public API.
// [3] We do not differentiate between maximized script values that were derived
// from suppressScript versus most likely tag data. We determined that in
// ranking the two, one ranks just after the other. Moreover, the two cannot
// occur concurrently. As a consequence, they are identical for practical
// purposes.
// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
// the MaxExact level to allow iw vs he to still be a closer match than
// en-AU vs en-US, for example.
// [5] In CLDR a locale inherits fields that are unspecified for this locale
// from its parent. Therefore, if a locale is a parent of another locale,
// it is a strong measure for closeness, especially when no other tie
// breaker rule applies. One could also argue it is inconsistent, for
// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
// though its parent is pt-PT according to the inheritance rules.
//
// Implementation Details:
// There are several performance considerations worth pointing out. Most notably,
// we preprocess as much as possible (within reason) at the time of creation of a
// matcher. This includes:
// - creating a per-language map, which includes data for the raw base language
// and its canonicalized variant (if applicable),
// - expanding entries for the equivalence classes defined in CLDR's
// languageMatch data.
// The per-language map ensures that typically only a very small number of tags
// need to be considered. The pre-expansion of canonicalized subtags and
// equivalence classes reduces the amount of map lookups that need to be done at
// runtime.
// matcher keeps a set of supported language tags, indexed by language.
type matcher struct {
default_ *haveTag
index map[langID]*matchHeader
passSettings bool
}
// matchHeader has the lists of tags for exact matches and matches based on
// maximized and canonicalized tags for a given language.
type matchHeader struct {
exact []*haveTag
max []*haveTag
}
// haveTag holds a supported Tag and its maximized script and region. The maximized
// or canonicalized language is not stored as it is not needed during matching.
type haveTag struct {
tag Tag
// index of this tag in the original list of supported tags.
index int
// conf is the maximum confidence that can result from matching this haveTag.
// When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
conf Confidence
// Maximized region and script.
maxRegion regionID
maxScript scriptID
// altScript may be checked as an alternative match to maxScript. If altScript
// matches, the confidence level for this match is Low. Theoretically there
// could be multiple alternative scripts. This does not occur in practice.
altScript scriptID
// nextMax is the index of the next haveTag with the same maximized tags.
nextMax uint16
}
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
max := tag
if tag.lang != 0 {
max, _ = max.canonicalize(All)
max, _ = addTags(max)
max.remakeString()
}
return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang
}
// altScript returns an alternative script that may match the given script with
// a low confidence. At the moment, the langMatch data allows for at most one
// script to map to another and we rely on this to keep the code simple.
func altScript(l langID, s scriptID) scriptID {
for _, alt := range matchScript {
if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s {
return scriptID(alt.want)
}
}
return 0
}
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
// Tags that have the same maximized values are linked by index.
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
// Don't add new exact matches.
for _, v := range h.exact {
if v.tag.equalsRest(n.tag) {
return
}
}
if exact {
h.exact = append(h.exact, &n)
}
// Allow duplicate maximized tags, but create a linked list to allow quickly
// comparing the equivalents and bail out.
for i, v := range h.max {
if v.maxScript == n.maxScript &&
v.maxRegion == n.maxRegion &&
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
for h.max[i].nextMax != 0 {
i = int(h.max[i].nextMax)
}
h.max[i].nextMax = uint16(len(h.max))
break
}
}
h.max = append(h.max, &n)
}
// header returns the matchHeader for the given language. It creates one if
// it doesn't already exist.
func (m *matcher) header(l langID) *matchHeader {
if h := m.index[l]; h != nil {
return h
}
h := &matchHeader{}
m.index[l] = h
return h
}
// newMatcher builds an index for the given supported tags and returns it as
// a matcher. It also expands the index by considering various equivalence classes
// for a given tag.
func newMatcher(supported []Tag) *matcher {
m := &matcher{
index: make(map[langID]*matchHeader),
}
if len(supported) == 0 {
m.default_ = &haveTag{}
return m
}
// Add supported languages to the index. Add exact matches first to give
// them precedence.
for i, tag := range supported {
pair, _ := makeHaveTag(tag, i)
m.header(tag.lang).addIfNew(pair, true)
}
m.default_ = m.header(supported[0].lang).exact[0]
for i, tag := range supported {
pair, max := makeHaveTag(tag, i)
if max != tag.lang {
m.header(max).addIfNew(pair, false)
}
}
// update is used to add indexes in the map for equivalent languages.
// If force is true, the update will also apply to derived entries. To
// avoid applying a "transitive closure", use false.
update := func(want, have uint16, conf Confidence, force bool) {
if hh := m.index[langID(have)]; hh != nil {
if !force && len(hh.exact) == 0 {
return
}
hw := m.header(langID(want))
for _, ht := range hh.max {
v := *ht
if conf < v.conf {
v.conf = conf
}
v.nextMax = 0 // this value needs to be recomputed
if v.altScript != 0 {
v.altScript = altScript(langID(want), v.maxScript)
}
hw.addIfNew(v, conf == Exact && len(hh.exact) > 0)
}
}
}
// Add entries for languages with mutual intelligibility as defined by CLDR's
// languageMatch data.
for _, ml := range matchLang {
update(ml.want, ml.have, Confidence(ml.conf), false)
if !ml.oneway {
update(ml.have, ml.want, Confidence(ml.conf), false)
}
}
// Add entries for possible canonicalizations. This is an optimization to
// ensure that only one map lookup needs to be done at runtime per desired tag.
// First we match deprecated equivalents. If they are perfect equivalents
// (their canonicalization simply substitutes a different language code, but
// nothing else), the match confidence is Exact, otherwise it is High.
for i, lm := range langAliasMap {
if lm.from == _sh {
continue
}
// If deprecated codes match and there is no fiddling with the script or
// or region, we consider it an exact match.
conf := Exact
if langAliasTypes[i] != langMacro {
if !isExactEquivalent(langID(lm.from)) {
conf = High
}
update(lm.to, lm.from, conf, true)
}
update(lm.from, lm.to, conf, true)
}
return m
}
// getBest gets the best matching tag in m for any of the given tags, taking into
// account the order of preference of the given tags.
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
best := bestMatch{}
for _, w := range want {
var max Tag
// Check for exact match first.
h := m.index[w.lang]
if w.lang != 0 {
// Base language is defined.
if h == nil {
continue
}
for i := range h.exact {
have := h.exact[i]
if have.tag.equalsRest(w) {
return have, w, Exact
}
}
max, _ = w.canonicalize(Legacy | Deprecated)
max, _ = addTags(max)
} else {
// Base language is not defined.
if h != nil {
for i := range h.exact {
have := h.exact[i]
if have.tag.equalsRest(w) {
return have, w, Exact
}
}
}
if w.script == 0 && w.region == 0 {
// We skip all tags matching und for approximate matching, including
// private tags.
continue
}
max, _ = addTags(w)
if h = m.index[max.lang]; h == nil {
continue
}
}
// Check for match based on maximized tag.
for i := range h.max {
have := h.max[i]
best.update(have, w, max.script, max.region)
if best.conf == Exact {
for have.nextMax != 0 {
have = h.max[have.nextMax]
best.update(have, w, max.script, max.region)
}
return best.have, best.want, High
}
}
}
if best.conf <= No {
if len(want) != 0 {
return nil, want[0], No
}
return nil, Tag{}, No
}
return best.have, best.want, best.conf
}
// bestMatch accumulates the best match so far.
type bestMatch struct {
have *haveTag
want Tag
conf Confidence
// Cached results from applying tie-breaking rules.
origLang bool
origReg bool
regDist uint8
origScript bool
parentDist uint8 // 255 if have is not an ancestor of want tag.
}
// update updates the existing best match if the new pair is considered to be a
// better match.
// To determine if the given pair is a better match, it first computes the rough
// confidence level. If this surpasses the current match, it will replace it and
// update the tie-breaker rule cache. If there is a tie, it proceeds with applying
// a series of tie-breaker rules. If there is no conclusive winner after applying
// the tie-breaker rules, it leaves the current match as the preferred match.
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) {
// Bail if the maximum attainable confidence is below that of the current best match.
c := have.conf
if c < m.conf {
return
}
if have.maxScript != maxScript {
// There is usually very little comprehension between different scripts.
// In a few cases there may still be Low comprehension. This possibility is
// pre-computed and stored in have.altScript.
if Low < m.conf || have.altScript != maxScript {
return
}
c = Low
} else if have.maxRegion != maxRegion {
// There is usually a small difference between languages across regions.
// We use the region distance (below) to disambiguate between equal matches.
if High < c {
c = High
}
}
// We store the results of the computations of the tie-breaker rules along
// with the best match. There is no need to do the checks once we determine
// we have a winner, but we do still need to do the tie-breaker computations.
// We use "beaten" to keep track if we still need to do the checks.
beaten := false // true if the new pair defeats the current one.
if c != m.conf {
if c < m.conf {
return
}
beaten = true
}
// Tie-breaker rules:
// We prefer if the pre-maximized language was specified and identical.
origLang := have.tag.lang == tag.lang && tag.lang != 0
if !beaten && m.origLang != origLang {
if m.origLang {
return
}
beaten = true
}
// We prefer if the pre-maximized region was specified and identical.
origReg := have.tag.region == tag.region && tag.region != 0
if !beaten && m.origReg != origReg {
if m.origReg {
return
}
beaten = true
}
// Next we prefer smaller distances between regions, as defined by regionDist.
regDist := regionDist(have.maxRegion, maxRegion, tag.lang)
if !beaten && m.regDist != regDist {
if regDist > m.regDist {
return
}
beaten = true
}
// Next we prefer if the pre-maximized script was specified and identical.
origScript := have.tag.script == tag.script && tag.script != 0
if !beaten && m.origScript != origScript {
if m.origScript {
return
}
beaten = true
}
// Finally we prefer tags which have a closer parent relationship.
parentDist := parentDistance(have.tag.region, tag)
if !beaten && m.parentDist != parentDist {
if parentDist > m.parentDist {
return
}
beaten = true
}
// Update m to the newly found best match.
if beaten {
m.have = have
m.want = tag
m.conf = c
m.origLang = origLang
m.origReg = origReg
m.origScript = origScript
m.regDist = regDist
m.parentDist = parentDist
}
}
// parentDistance returns the number of times Parent must be called before the
// regions match. It is assumed that it has already been checked that lang and
// script are identical. If haveRegion does not occur in the ancestor chain of
// tag, it returns 255.
func parentDistance(haveRegion regionID, tag Tag) uint8 {
p := tag.Parent()
d := uint8(1)
for haveRegion != p.region {
if p.region == 0 {
return 255
}
p = p.Parent()
d++
}
return d
}
// regionDist wraps regionDistance with some exceptions to the algorithmic distance.
func regionDist(a, b regionID, lang langID) uint8 {
if lang == _en {
// Two variants of non-US English are close to each other, regardless of distance.
if a != _US && b != _US {
return 2
}
}
return uint8(regionDistance(a, b))
}
// regionDistance computes the distance between two regions based on the
// distance in the graph of region containments as defined in CLDR. It iterates
// over increasingly inclusive sets of groups, represented as bit vectors, until
// the source bit vector has bits in common with the destination vector.
func regionDistance(a, b regionID) int {
if a == b {
return 0
}
p, q := regionInclusion[a], regionInclusion[b]
if p < nRegionGroups {
p, q = q, p
}
set := regionInclusionBits
if q < nRegionGroups && set[p]&(1<<q) != 0 {
return 1
}
d := 2
for goal := set[q]; set[p]&goal == 0; p = regionInclusionNext[p] {
d++
}
return d
}
func (t Tag) variants() string {
if t.pVariant == 0 {
return ""
}
return t.str[t.pVariant:t.pExt]
}
// variantOrPrivateTagStr returns variants or private use tags.
func (t Tag) variantOrPrivateTagStr() string {
if t.pExt > 0 {
return t.str[t.pVariant:t.pExt]
}
return t.str[t.pVariant:]
}
// equalsRest compares everything except the language.
func (a Tag) equalsRest(b Tag) bool {
// TODO: don't include extensions in this comparison. To do this efficiently,
// though, we should handle private tags separately.
return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr()
}
// isExactEquivalent returns true if canonicalizing the language will not alter
// the script or region of a tag.
func isExactEquivalent(l langID) bool {
for _, o := range notEquivalent {
if o == l {
return false
}
}
return true
}
var notEquivalent []langID
func init() {
// Create a list of all languages for which canonicalization may alter the
// script or region.
for _, lm := range langAliasMap {
tag := Tag{lang: langID(lm.from)}
if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 {
notEquivalent = append(notEquivalent, langID(lm.from))
}
}
}

859
vendor/golang.org/x/text/language/parse.go generated vendored Normal file
View file

@ -0,0 +1,859 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package language
import (
"bytes"
"errors"
"fmt"
"sort"
"strconv"
"strings"
"golang.org/x/text/internal/tag"
)
// isAlpha returns true if the byte is not a digit.
// b must be an ASCII letter or digit.
func isAlpha(b byte) bool {
return b > '9'
}
// isAlphaNum returns true if the string contains only ASCII letters or digits.
func isAlphaNum(s []byte) bool {
for _, c := range s {
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
return false
}
}
return true
}
// errSyntax is returned by any of the parsing functions when the
// input is not well-formed, according to BCP 47.
// TODO: return the position at which the syntax error occurred?
var errSyntax = errors.New("language: tag is not well-formed")
// ValueError is returned by any of the parsing functions when the
// input is well-formed but the respective subtag is not recognized
// as a valid value.
type ValueError struct {
v [8]byte
}
func mkErrInvalid(s []byte) error {
var e ValueError
copy(e.v[:], s)
return e
}
func (e ValueError) tag() []byte {
n := bytes.IndexByte(e.v[:], 0)
if n == -1 {
n = 8
}
return e.v[:n]
}
// Error implements the error interface.
func (e ValueError) Error() string {
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
}
// Subtag returns the subtag for which the error occurred.
func (e ValueError) Subtag() string {
return string(e.tag())
}
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
type scanner struct {
b []byte
bytes [max99thPercentileSize]byte
token []byte
start int // start position of the current token
end int // end position of the current token
next int // next point for scan
err error
done bool
}
func makeScannerString(s string) scanner {
scan := scanner{}
if len(s) <= len(scan.bytes) {
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
} else {
scan.b = []byte(s)
}
scan.init()
return scan
}
// makeScanner returns a scanner using b as the input buffer.
// b is not copied and may be modified by the scanner routines.
func makeScanner(b []byte) scanner {
scan := scanner{b: b}
scan.init()
return scan
}
func (s *scanner) init() {
for i, c := range s.b {
if c == '_' {
s.b[i] = '-'
}
}
s.scan()
}
// restToLower converts the string between start and end to lower case.
func (s *scanner) toLower(start, end int) {
for i := start; i < end; i++ {
c := s.b[i]
if 'A' <= c && c <= 'Z' {
s.b[i] += 'a' - 'A'
}
}
}
func (s *scanner) setError(e error) {
if s.err == nil || (e == errSyntax && s.err != errSyntax) {
s.err = e
}
}
// resizeRange shrinks or grows the array at position oldStart such that
// a new string of size newSize can fit between oldStart and oldEnd.
// Sets the scan point to after the resized range.
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
s.start = oldStart
if end := oldStart + newSize; end != oldEnd {
diff := end - oldEnd
if end < cap(s.b) {
b := make([]byte, len(s.b)+diff)
copy(b, s.b[:oldStart])
copy(b[end:], s.b[oldEnd:])
s.b = b
} else {
s.b = append(s.b[end:], s.b[oldEnd:]...)
}
s.next = end + (s.next - s.end)
s.end = end
}
}
// replace replaces the current token with repl.
func (s *scanner) replace(repl string) {
s.resizeRange(s.start, s.end, len(repl))
copy(s.b[s.start:], repl)
}
// gobble removes the current token from the input.
// Caller must call scan after calling gobble.
func (s *scanner) gobble(e error) {
s.setError(e)
if s.start == 0 {
s.b = s.b[:+copy(s.b, s.b[s.next:])]
s.end = 0
} else {
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
s.end = s.start - 1
}
s.next = s.start
}
// deleteRange removes the given range from s.b before the current token.
func (s *scanner) deleteRange(start, end int) {
s.setError(errSyntax)
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
diff := end - start
s.next -= diff
s.start -= diff
s.end -= diff
}
// scan parses the next token of a BCP 47 string. Tokens that are larger
// than 8 characters or include non-alphanumeric characters result in an error
// and are gobbled and removed from the output.
// It returns the end position of the last token consumed.
func (s *scanner) scan() (end int) {
end = s.end
s.token = nil
for s.start = s.next; s.next < len(s.b); {
i := bytes.IndexByte(s.b[s.next:], '-')
if i == -1 {
s.end = len(s.b)
s.next = len(s.b)
i = s.end - s.start
} else {
s.end = s.next + i
s.next = s.end + 1
}
token := s.b[s.start:s.end]
if i < 1 || i > 8 || !isAlphaNum(token) {
s.gobble(errSyntax)
continue
}
s.token = token
return end
}
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
s.setError(errSyntax)
s.b = s.b[:len(s.b)-1]
}
s.done = true
return end
}
// acceptMinSize parses multiple tokens of the given size or greater.
// It returns the end position of the last token consumed.
func (s *scanner) acceptMinSize(min int) (end int) {
end = s.end
s.scan()
for ; len(s.token) >= min; s.scan() {
end = s.end
}
return end
}
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
// failed it returns an error and any part of the tag that could be parsed.
// If parsing succeeded but an unknown value was found, it returns
// ValueError. The Tag returned in this case is just stripped of the unknown
// value. All other values are preserved. It accepts tags in the BCP 47 format
// and extensions to this standard defined in
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
// The resulting tag is canonicalized using the default canonicalization type.
func Parse(s string) (t Tag, err error) {
return Default.Parse(s)
}
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
// failed it returns an error and any part of the tag that could be parsed.
// If parsing succeeded but an unknown value was found, it returns
// ValueError. The Tag returned in this case is just stripped of the unknown
// value. All other values are preserved. It accepts tags in the BCP 47 format
// and extensions to this standard defined in
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
// The resulting tag is canonicalized using the the canonicalization type c.
func (c CanonType) Parse(s string) (t Tag, err error) {
// TODO: consider supporting old-style locale key-value pairs.
if s == "" {
return und, errSyntax
}
if len(s) <= maxAltTaglen {
b := [maxAltTaglen]byte{}
for i, c := range s {
// Generating invalid UTF-8 is okay as it won't match.
if 'A' <= c && c <= 'Z' {
c += 'a' - 'A'
} else if c == '_' {
c = '-'
}
b[i] = byte(c)
}
if t, ok := grandfathered(b); ok {
return t, nil
}
}
scan := makeScannerString(s)
t, err = parse(&scan, s)
t, changed := t.canonicalize(c)
if changed {
t.remakeString()
}
return t, err
}
func parse(scan *scanner, s string) (t Tag, err error) {
t = und
var end int
if n := len(scan.token); n <= 1 {
scan.toLower(0, len(scan.b))
if n == 0 || scan.token[0] != 'x' {
return t, errSyntax
}
end = parseExtensions(scan)
} else if n >= 4 {
return und, errSyntax
} else { // the usual case
t, end = parseTag(scan)
if n := len(scan.token); n == 1 {
t.pExt = uint16(end)
end = parseExtensions(scan)
} else if end < len(scan.b) {
scan.setError(errSyntax)
scan.b = scan.b[:end]
}
}
if int(t.pVariant) < len(scan.b) {
if end < len(s) {
s = s[:end]
}
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
t.str = s
} else {
t.str = string(scan.b)
}
} else {
t.pVariant, t.pExt = 0, 0
}
return t, scan.err
}
// parseTag parses language, script, region and variants.
// It returns a Tag and the end position in the input that was parsed.
func parseTag(scan *scanner) (t Tag, end int) {
var e error
// TODO: set an error if an unknown lang, script or region is encountered.
t.lang, e = getLangID(scan.token)
scan.setError(e)
scan.replace(t.lang.String())
langStart := scan.start
end = scan.scan()
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
// to a tag of the form <extlang>.
lang, e := getLangID(scan.token)
if lang != 0 {
t.lang = lang
copy(scan.b[langStart:], lang.String())
scan.b[langStart+3] = '-'
scan.start = langStart + 4
}
scan.gobble(e)
end = scan.scan()
}
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
t.script, e = getScriptID(script, scan.token)
if t.script == 0 {
scan.gobble(e)
}
end = scan.scan()
}
if n := len(scan.token); n >= 2 && n <= 3 {
t.region, e = getRegionID(scan.token)
if t.region == 0 {
scan.gobble(e)
} else {
scan.replace(t.region.String())
}
end = scan.scan()
}
scan.toLower(scan.start, len(scan.b))
t.pVariant = byte(end)
end = parseVariants(scan, end, t)
t.pExt = uint16(end)
return t, end
}
var separator = []byte{'-'}
// parseVariants scans tokens as long as each token is a valid variant string.
// Duplicate variants are removed.
func parseVariants(scan *scanner, end int, t Tag) int {
start := scan.start
varIDBuf := [4]uint8{}
variantBuf := [4][]byte{}
varID := varIDBuf[:0]
variant := variantBuf[:0]
last := -1
needSort := false
for ; len(scan.token) >= 4; scan.scan() {
// TODO: measure the impact of needing this conversion and redesign
// the data structure if there is an issue.
v, ok := variantIndex[string(scan.token)]
if !ok {
// unknown variant
// TODO: allow user-defined variants?
scan.gobble(mkErrInvalid(scan.token))
continue
}
varID = append(varID, v)
variant = append(variant, scan.token)
if !needSort {
if last < int(v) {
last = int(v)
} else {
needSort = true
// There is no legal combinations of more than 7 variants
// (and this is by no means a useful sequence).
const maxVariants = 8
if len(varID) > maxVariants {
break
}
}
}
end = scan.end
}
if needSort {
sort.Sort(variantsSort{varID, variant})
k, l := 0, -1
for i, v := range varID {
w := int(v)
if l == w {
// Remove duplicates.
continue
}
varID[k] = varID[i]
variant[k] = variant[i]
k++
l = w
}
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
end = start - 1
} else {
scan.resizeRange(start, end, len(str))
copy(scan.b[scan.start:], str)
end = scan.end
}
}
return end
}
type variantsSort struct {
i []uint8
v [][]byte
}
func (s variantsSort) Len() int {
return len(s.i)
}
func (s variantsSort) Swap(i, j int) {
s.i[i], s.i[j] = s.i[j], s.i[i]
s.v[i], s.v[j] = s.v[j], s.v[i]
}
func (s variantsSort) Less(i, j int) bool {
return s.i[i] < s.i[j]
}
type bytesSort [][]byte
func (b bytesSort) Len() int {
return len(b)
}
func (b bytesSort) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b bytesSort) Less(i, j int) bool {
return bytes.Compare(b[i], b[j]) == -1
}
// parseExtensions parses and normalizes the extensions in the buffer.
// It returns the last position of scan.b that is part of any extension.
// It also trims scan.b to remove excess parts accordingly.
func parseExtensions(scan *scanner) int {
start := scan.start
exts := [][]byte{}
private := []byte{}
end := scan.end
for len(scan.token) == 1 {
extStart := scan.start
ext := scan.token[0]
end = parseExtension(scan)
extension := scan.b[extStart:end]
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
scan.setError(errSyntax)
end = extStart
continue
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
scan.b = scan.b[:end]
return end
} else if ext == 'x' {
private = extension
break
}
exts = append(exts, extension)
}
sort.Sort(bytesSort(exts))
if len(private) > 0 {
exts = append(exts, private)
}
scan.b = scan.b[:start]
if len(exts) > 0 {
scan.b = append(scan.b, bytes.Join(exts, separator)...)
} else if start > 0 {
// Strip trailing '-'.
scan.b = scan.b[:start-1]
}
return end
}
// parseExtension parses a single extension and returns the position of
// the extension end.
func parseExtension(scan *scanner) int {
start, end := scan.start, scan.end
switch scan.token[0] {
case 'u':
attrStart := end
scan.scan()
for last := []byte{}; len(scan.token) > 2; scan.scan() {
if bytes.Compare(scan.token, last) != -1 {
// Attributes are unsorted. Start over from scratch.
p := attrStart + 1
scan.next = p
attrs := [][]byte{}
for scan.scan(); len(scan.token) > 2; scan.scan() {
attrs = append(attrs, scan.token)
end = scan.end
}
sort.Sort(bytesSort(attrs))
copy(scan.b[p:], bytes.Join(attrs, separator))
break
}
last = scan.token
end = scan.end
}
var last, key []byte
for attrEnd := end; len(scan.token) == 2; last = key {
key = scan.token
keyEnd := scan.end
end = scan.acceptMinSize(3)
// TODO: check key value validity
if keyEnd == end || bytes.Compare(key, last) != 1 {
// We have an invalid key or the keys are not sorted.
// Start scanning keys from scratch and reorder.
p := attrEnd + 1
scan.next = p
keys := [][]byte{}
for scan.scan(); len(scan.token) == 2; {
keyStart, keyEnd := scan.start, scan.end
end = scan.acceptMinSize(3)
if keyEnd != end {
keys = append(keys, scan.b[keyStart:end])
} else {
scan.setError(errSyntax)
end = keyStart
}
}
sort.Sort(bytesSort(keys))
reordered := bytes.Join(keys, separator)
if e := p + len(reordered); e < end {
scan.deleteRange(e, end)
end = e
}
copy(scan.b[p:], bytes.Join(keys, separator))
break
}
}
case 't':
scan.scan()
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
_, end = parseTag(scan)
scan.toLower(start, end)
}
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
end = scan.acceptMinSize(3)
}
case 'x':
end = scan.acceptMinSize(1)
default:
end = scan.acceptMinSize(2)
}
return end
}
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
// Base, Script or Region or slice of type Variant or Extension is passed more
// than once, the latter will overwrite the former. Variants and Extensions are
// accumulated, but if two extensions of the same type are passed, the latter
// will replace the former. A Tag overwrites all former values and typically
// only makes sense as the first argument. The resulting tag is returned after
// canonicalizing using the Default CanonType. If one or more errors are
// encountered, one of the errors is returned.
func Compose(part ...interface{}) (t Tag, err error) {
return Default.Compose(part...)
}
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
// Base, Script or Region or slice of type Variant or Extension is passed more
// than once, the latter will overwrite the former. Variants and Extensions are
// accumulated, but if two extensions of the same type are passed, the latter
// will replace the former. A Tag overwrites all former values and typically
// only makes sense as the first argument. The resulting tag is returned after
// canonicalizing using CanonType c. If one or more errors are encountered,
// one of the errors is returned.
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
var b builder
if err = b.update(part...); err != nil {
return und, err
}
t, _ = b.tag.canonicalize(c)
if len(b.ext) > 0 || len(b.variant) > 0 {
sort.Sort(sortVariant(b.variant))
sort.Strings(b.ext)
if b.private != "" {
b.ext = append(b.ext, b.private)
}
n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...)
buf := make([]byte, n)
p := t.genCoreBytes(buf)
t.pVariant = byte(p)
p += appendTokens(buf[p:], b.variant...)
t.pExt = uint16(p)
p += appendTokens(buf[p:], b.ext...)
t.str = string(buf[:p])
} else if b.private != "" {
t.str = b.private
t.remakeString()
}
return
}
type builder struct {
tag Tag
private string // the x extension
ext []string
variant []string
err error
}
func (b *builder) addExt(e string) {
if e == "" {
} else if e[0] == 'x' {
b.private = e
} else {
b.ext = append(b.ext, e)
}
}
var errInvalidArgument = errors.New("invalid Extension or Variant")
func (b *builder) update(part ...interface{}) (err error) {
replace := func(l *[]string, s string, eq func(a, b string) bool) bool {
if s == "" {
b.err = errInvalidArgument
return true
}
for i, v := range *l {
if eq(v, s) {
(*l)[i] = s
return true
}
}
return false
}
for _, x := range part {
switch v := x.(type) {
case Tag:
b.tag.lang = v.lang
b.tag.region = v.region
b.tag.script = v.script
if v.str != "" {
b.variant = nil
for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; {
x, s = nextToken(s)
b.variant = append(b.variant, x)
}
b.ext, b.private = nil, ""
for i, e := int(v.pExt), ""; i < len(v.str); {
i, e = getExtension(v.str, i)
b.addExt(e)
}
}
case Base:
b.tag.lang = v.langID
case Script:
b.tag.script = v.scriptID
case Region:
b.tag.region = v.regionID
case Variant:
if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) {
b.variant = append(b.variant, v.variant)
}
case Extension:
if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) {
b.addExt(v.s)
}
case []Variant:
b.variant = nil
for _, x := range v {
b.update(x)
}
case []Extension:
b.ext, b.private = nil, ""
for _, e := range v {
b.update(e)
}
// TODO: support parsing of raw strings based on morphology or just extensions?
case error:
err = v
}
}
return
}
func tokenLen(token ...string) (n int) {
for _, t := range token {
n += len(t) + 1
}
return
}
func appendTokens(b []byte, token ...string) int {
p := 0
for _, t := range token {
b[p] = '-'
copy(b[p+1:], t)
p += 1 + len(t)
}
return p
}
type sortVariant []string
func (s sortVariant) Len() int {
return len(s)
}
func (s sortVariant) Swap(i, j int) {
s[j], s[i] = s[i], s[j]
}
func (s sortVariant) Less(i, j int) bool {
return variantIndex[s[i]] < variantIndex[s[j]]
}
func findExt(list []string, x byte) int {
for i, e := range list {
if e[0] == x {
return i
}
}
return -1
}
// getExtension returns the name, body and end position of the extension.
func getExtension(s string, p int) (end int, ext string) {
if s[p] == '-' {
p++
}
if s[p] == 'x' {
return len(s), s[p:]
}
end = nextExtension(s, p)
return end, s[p:end]
}
// nextExtension finds the next extension within the string, searching
// for the -<char>- pattern from position p.
// In the fast majority of cases, language tags will have at most
// one extension and extensions tend to be small.
func nextExtension(s string, p int) int {
for n := len(s) - 3; p < n; {
if s[p] == '-' {
if s[p+2] == '-' {
return p
}
p += 3
} else {
p++
}
}
return len(s)
}
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
// ParseAcceptLanguage parses the contents of a Accept-Language header as
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
// a list of corresponding quality weights. It is more permissive than RFC 2616
// and may return non-nil slices even if the input is not valid.
// The Tags will be sorted by highest weight first and then by first occurrence.
// Tags with a weight of zero will be dropped. An error will be returned if the
// input could not be parsed.
func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
var entry string
for s != "" {
if entry, s = split(s, ','); entry == "" {
continue
}
entry, weight := split(entry, ';')
// Scan the language.
t, err := Parse(entry)
if err != nil {
id, ok := acceptFallback[entry]
if !ok {
return nil, nil, err
}
t = Tag{lang: id}
}
// Scan the optional weight.
w := 1.0
if weight != "" {
weight = consume(weight, 'q')
weight = consume(weight, '=')
// consume returns the empty string when a token could not be
// consumed, resulting in an error for ParseFloat.
if w, err = strconv.ParseFloat(weight, 32); err != nil {
return nil, nil, errInvalidWeight
}
// Drop tags with a quality weight of 0.
if w <= 0 {
continue
}
}
tag = append(tag, t)
q = append(q, float32(w))
}
sortStable(&tagSort{tag, q})
return tag, q, nil
}
// consume removes a leading token c from s and returns the result or the empty
// string if there is no such token.
func consume(s string, c byte) string {
if s == "" || s[0] != c {
return ""
}
return strings.TrimSpace(s[1:])
}
func split(s string, c byte) (head, tail string) {
if i := strings.IndexByte(s, c); i >= 0 {
return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
}
return strings.TrimSpace(s), ""
}
// Add hack mapping to deal with a small number of cases that that occur
// in Accept-Language (with reasonable frequency).
var acceptFallback = map[string]langID{
"english": _en,
"deutsch": _de,
"italian": _it,
"french": _fr,
"*": _mul, // defined in the spec to match all languages.
}
type tagSort struct {
tag []Tag
q []float32
}
func (s *tagSort) Len() int {
return len(s.q)
}
func (s *tagSort) Less(i, j int) bool {
return s.q[i] > s.q[j]
}
func (s *tagSort) Swap(i, j int) {
s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
s.q[i], s.q[j] = s.q[j], s.q[i]
}

3547
vendor/golang.org/x/text/language/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

143
vendor/golang.org/x/text/language/tags.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package language
// TODO: Various sets of commonly use tags and regions.
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
// It simplifies safe initialization of Tag values.
func MustParse(s string) Tag {
t, err := Parse(s)
if err != nil {
panic(err)
}
return t
}
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
// It simplifies safe initialization of Tag values.
func (c CanonType) MustParse(s string) Tag {
t, err := c.Parse(s)
if err != nil {
panic(err)
}
return t
}
// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
// It simplifies safe initialization of Base values.
func MustParseBase(s string) Base {
b, err := ParseBase(s)
if err != nil {
panic(err)
}
return b
}
// MustParseScript is like ParseScript, but panics if the given script cannot be
// parsed. It simplifies safe initialization of Script values.
func MustParseScript(s string) Script {
scr, err := ParseScript(s)
if err != nil {
panic(err)
}
return scr
}
// MustParseRegion is like ParseRegion, but panics if the given region cannot be
// parsed. It simplifies safe initialization of Region values.
func MustParseRegion(s string) Region {
r, err := ParseRegion(s)
if err != nil {
panic(err)
}
return r
}
var (
und = Tag{}
Und Tag = Tag{}
Afrikaans Tag = Tag{lang: _af} // af
Amharic Tag = Tag{lang: _am} // am
Arabic Tag = Tag{lang: _ar} // ar
ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001
Azerbaijani Tag = Tag{lang: _az} // az
Bulgarian Tag = Tag{lang: _bg} // bg
Bengali Tag = Tag{lang: _bn} // bn
Catalan Tag = Tag{lang: _ca} // ca
Czech Tag = Tag{lang: _cs} // cs
Danish Tag = Tag{lang: _da} // da
German Tag = Tag{lang: _de} // de
Greek Tag = Tag{lang: _el} // el
English Tag = Tag{lang: _en} // en
AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US
BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB
Spanish Tag = Tag{lang: _es} // es
EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES
LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419
Estonian Tag = Tag{lang: _et} // et
Persian Tag = Tag{lang: _fa} // fa
Finnish Tag = Tag{lang: _fi} // fi
Filipino Tag = Tag{lang: _fil} // fil
French Tag = Tag{lang: _fr} // fr
CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA
Gujarati Tag = Tag{lang: _gu} // gu
Hebrew Tag = Tag{lang: _he} // he
Hindi Tag = Tag{lang: _hi} // hi
Croatian Tag = Tag{lang: _hr} // hr
Hungarian Tag = Tag{lang: _hu} // hu
Armenian Tag = Tag{lang: _hy} // hy
Indonesian Tag = Tag{lang: _id} // id
Icelandic Tag = Tag{lang: _is} // is
Italian Tag = Tag{lang: _it} // it
Japanese Tag = Tag{lang: _ja} // ja
Georgian Tag = Tag{lang: _ka} // ka
Kazakh Tag = Tag{lang: _kk} // kk
Khmer Tag = Tag{lang: _km} // km
Kannada Tag = Tag{lang: _kn} // kn
Korean Tag = Tag{lang: _ko} // ko
Kirghiz Tag = Tag{lang: _ky} // ky
Lao Tag = Tag{lang: _lo} // lo
Lithuanian Tag = Tag{lang: _lt} // lt
Latvian Tag = Tag{lang: _lv} // lv
Macedonian Tag = Tag{lang: _mk} // mk
Malayalam Tag = Tag{lang: _ml} // ml
Mongolian Tag = Tag{lang: _mn} // mn
Marathi Tag = Tag{lang: _mr} // mr
Malay Tag = Tag{lang: _ms} // ms
Burmese Tag = Tag{lang: _my} // my
Nepali Tag = Tag{lang: _ne} // ne
Dutch Tag = Tag{lang: _nl} // nl
Norwegian Tag = Tag{lang: _no} // no
Punjabi Tag = Tag{lang: _pa} // pa
Polish Tag = Tag{lang: _pl} // pl
Portuguese Tag = Tag{lang: _pt} // pt
BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR
EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT
Romanian Tag = Tag{lang: _ro} // ro
Russian Tag = Tag{lang: _ru} // ru
Sinhala Tag = Tag{lang: _si} // si
Slovak Tag = Tag{lang: _sk} // sk
Slovenian Tag = Tag{lang: _sl} // sl
Albanian Tag = Tag{lang: _sq} // sq
Serbian Tag = Tag{lang: _sr} // sr
SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn
Swedish Tag = Tag{lang: _sv} // sv
Swahili Tag = Tag{lang: _sw} // sw
Tamil Tag = Tag{lang: _ta} // ta
Telugu Tag = Tag{lang: _te} // te
Thai Tag = Tag{lang: _th} // th
Turkish Tag = Tag{lang: _tr} // tr
Ukrainian Tag = Tag{lang: _uk} // uk
Urdu Tag = Tag{lang: _ur} // ur
Uzbek Tag = Tag{lang: _uz} // uz
Vietnamese Tag = Tag{lang: _vi} // vi
Chinese Tag = Tag{lang: _zh} // zh
SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans
TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant
Zulu Tag = Tag{lang: _zu} // zu
)

187
vendor/golang.org/x/text/runes/cond.go generated vendored Normal file
View file

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runes
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
// This is done for various reasons:
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
// one would expect it to be unchanged.
// - It would be very expensive to pass a converted RuneError to a transformer:
// a transformer might need more source bytes after RuneError, meaning that
// the only way to pass it safely is to create a new buffer and manage the
// intermingling of RuneErrors and normal input.
// - Many transformers leave ill-formed UTF-8 as is, so this is not
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
// logical consequence of the operation (as for Map) or if it otherwise would
// pose security concerns (as for Remove).
// - An alternative would be to return an error on ill-formed UTF-8, but this
// would be inconsistent with other operations.
// If returns a transformer that applies tIn to consecutive runes for which
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
// to RuneError to determine which transformer to apply, but is passed as is to
// the respective transformer.
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
if tIn == nil && tNotIn == nil {
return Transformer{transform.Nop}
}
if tIn == nil {
tIn = transform.Nop
}
if tNotIn == nil {
tNotIn = transform.Nop
}
sIn, ok := tIn.(transform.SpanningTransformer)
if !ok {
sIn = dummySpan{tIn}
}
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
if !ok {
sNotIn = dummySpan{tNotIn}
}
a := &cond{
tIn: sIn,
tNotIn: sNotIn,
f: s.Contains,
}
a.Reset()
return Transformer{a}
}
type dummySpan struct{ transform.Transformer }
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
return 0, transform.ErrEndOfSpan
}
type cond struct {
tIn, tNotIn transform.SpanningTransformer
f func(rune) bool
check func(rune) bool // current check to perform
t transform.SpanningTransformer // current transformer to use
}
// Reset implements transform.Transformer.
func (t *cond) Reset() {
t.check = t.is
t.t = t.tIn
t.t.Reset() // notIn will be reset on first usage.
}
func (t *cond) is(r rune) bool {
if t.f(r) {
return true
}
t.check = t.isNot
t.t = t.tNotIn
t.tNotIn.Reset()
return false
}
func (t *cond) isNot(r rune) bool {
if !t.f(r) {
return true
}
t.check = t.is
t.t = t.tIn
t.tIn.Reset()
return false
}
// This implementation of Span doesn't help all too much, but it needs to be
// there to satisfy this package's Transformer interface.
// TODO: there are certainly room for improvements, though. For example, if
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
// to special-case that loop.
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
p := 0
for n < len(src) && err == nil {
// Don't process too much at a time as the Spanner that will be
// called on this block may terminate early.
const maxChunk = 4096
max := len(src)
if v := n + maxChunk; v < max {
max = v
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
n += n2
if err2 != nil {
return n, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = n + size
}
return n, err
}
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
p := 0
for nSrc < len(src) && err == nil {
// Don't process too much at a time, as the work might be wasted if the
// destination buffer isn't large enough to hold the result or a
// transform returns an error early.
const maxChunk = 4096
max := len(src)
if n := nSrc + maxChunk; n < len(src) {
max = n
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
nDst += nDst2
nSrc += nSrc2
if err2 != nil {
return nDst, nSrc, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = nSrc + size
}
return nDst, nSrc, err
}

Some files were not shown because too many files have changed in this diff Show more