1
0
Fork 0
forked from forgejo/forgejo

Use Go1.11 module (#5743)

* Migrate to go modules

* make vendor

* Update mvdan.cc/xurls

* make vendor

* Update code.gitea.io/git

* make fmt-check

* Update github.com/go-sql-driver/mysql

* make vendor
This commit is contained in:
Mura Li 2019-03-27 19:15:23 +08:00 committed by Lunny Xiao
parent d578b71d61
commit d77176912b
575 changed files with 63239 additions and 13963 deletions

View file

@ -14,7 +14,6 @@
package acme
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
@ -23,6 +22,8 @@ import (
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/base64"
"encoding/hex"
"encoding/json"
@ -33,14 +34,26 @@ import (
"io/ioutil"
"math/big"
"net/http"
"strconv"
"strings"
"sync"
"time"
)
// LetsEncryptURL is the Directory endpoint of Let's Encrypt CA.
const LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory"
const (
// LetsEncryptURL is the Directory endpoint of Let's Encrypt CA.
LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory"
// ALPNProto is the ALPN protocol name used by a CA server when validating
// tls-alpn-01 challenges.
//
// Package users must ensure their servers can negotiate the ACME ALPN in
// order for tls-alpn-01 challenge verifications to succeed.
// See the crypto/tls package's Config.NextProtos field.
ALPNProto = "acme-tls/1"
)
// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge.
var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1}
const (
maxChainLen = 5 // max depth and breadth of a certificate chain
@ -76,6 +89,22 @@ type Client struct {
// will have no effect.
DirectoryURL string
// RetryBackoff computes the duration after which the nth retry of a failed request
// should occur. The value of n for the first call on failure is 1.
// The values of r and resp are the request and response of the last failed attempt.
// If the returned value is negative or zero, no more retries are done and an error
// is returned to the caller of the original method.
//
// Requests which result in a 4xx client error are not retried,
// except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests.
//
// If RetryBackoff is nil, a truncated exponential backoff algorithm
// with the ceiling of 10 seconds is used, where each subsequent retry n
// is done after either ("Retry-After" + jitter) or (2^n seconds + jitter),
// preferring the former if "Retry-After" header is found in the resp.
// The jitter is a random value up to 1 second.
RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration
dirMu sync.Mutex // guards writes to dir
dir *Directory // cached result of Client's Discover method
@ -99,15 +128,12 @@ func (c *Client) Discover(ctx context.Context) (Directory, error) {
if dirURL == "" {
dirURL = LetsEncryptURL
}
res, err := c.get(ctx, dirURL)
res, err := c.get(ctx, dirURL, wantStatus(http.StatusOK))
if err != nil {
return Directory{}, err
}
defer res.Body.Close()
c.addNonce(res.Header)
if res.StatusCode != http.StatusOK {
return Directory{}, responseError(res)
}
var v struct {
Reg string `json:"new-reg"`
@ -166,14 +192,11 @@ func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration,
req.NotAfter = now.Add(exp).Format(time.RFC3339)
}
res, err := c.retryPostJWS(ctx, c.Key, c.dir.CertURL, req)
res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated))
if err != nil {
return nil, "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
return nil, "", responseError(res)
}
curl := res.Header.Get("Location") // cert permanent URL
if res.ContentLength == 0 {
@ -196,26 +219,11 @@ func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration,
// Callers are encouraged to parse the returned value to ensure the certificate is valid
// and has expected features.
func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) {
for {
res, err := c.get(ctx, url)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode == http.StatusOK {
return c.responseCert(ctx, res, bundle)
}
if res.StatusCode > 299 {
return nil, responseError(res)
}
d := retryAfter(res.Header.Get("Retry-After"), 3*time.Second)
select {
case <-time.After(d):
// retry
case <-ctx.Done():
return nil, ctx.Err()
}
res, err := c.get(ctx, url, wantStatus(http.StatusOK))
if err != nil {
return nil, err
}
return c.responseCert(ctx, res, bundle)
}
// RevokeCert revokes a previously issued certificate cert, provided in DER format.
@ -241,14 +249,11 @@ func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte,
if key == nil {
key = c.Key
}
res, err := c.retryPostJWS(ctx, key, c.dir.RevokeURL, body)
res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK))
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return responseError(res)
}
return nil
}
@ -329,14 +334,11 @@ func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization,
Resource: "new-authz",
Identifier: authzID{Type: "dns", Value: domain},
}
res, err := c.retryPostJWS(ctx, c.Key, c.dir.AuthzURL, req)
res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated))
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
return nil, responseError(res)
}
var v wireAuthz
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
@ -353,14 +355,11 @@ func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization,
// If a caller needs to poll an authorization until its status is final,
// see the WaitAuthorization method.
func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) {
res, err := c.get(ctx, url)
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {
return nil, responseError(res)
}
var v wireAuthz
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
@ -387,14 +386,11 @@ func (c *Client) RevokeAuthorization(ctx context.Context, url string) error {
Status: "deactivated",
Delete: true,
}
res, err := c.retryPostJWS(ctx, c.Key, url, req)
res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK))
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return responseError(res)
}
return nil
}
@ -406,44 +402,42 @@ func (c *Client) RevokeAuthorization(ctx context.Context, url string) error {
// In all other cases WaitAuthorization returns an error.
// If the Status is StatusInvalid, the returned error is of type *AuthorizationError.
func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) {
sleep := sleeper(ctx)
for {
res, err := c.get(ctx, url)
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
if err != nil {
return nil, err
}
if res.StatusCode >= 400 && res.StatusCode <= 499 {
// Non-retriable error. For instance, Let's Encrypt may return 404 Not Found
// when requesting an expired authorization.
defer res.Body.Close()
return nil, responseError(res)
}
retry := res.Header.Get("Retry-After")
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {
res.Body.Close()
if err := sleep(retry, 1); err != nil {
return nil, err
}
continue
}
var raw wireAuthz
err = json.NewDecoder(res.Body).Decode(&raw)
res.Body.Close()
if err != nil {
if err := sleep(retry, 0); err != nil {
return nil, err
}
continue
}
if raw.Status == StatusValid {
switch {
case err != nil:
// Skip and retry.
case raw.Status == StatusValid:
return raw.authorization(url), nil
}
if raw.Status == StatusInvalid {
case raw.Status == StatusInvalid:
return nil, raw.error(url)
}
if err := sleep(retry, 0); err != nil {
return nil, err
// Exponential backoff is implemented in c.get above.
// This is just to prevent continuously hitting the CA
// while waiting for a final authorization status.
d := retryAfter(res.Header.Get("Retry-After"))
if d == 0 {
// Given that the fastest challenges TLS-SNI and HTTP-01
// require a CA to make at least 1 network round trip
// and most likely persist a challenge state,
// this default delay seems reasonable.
d = time.Second
}
t := time.NewTimer(d)
select {
case <-ctx.Done():
t.Stop()
return nil, ctx.Err()
case <-t.C:
// Retry.
}
}
}
@ -452,14 +446,11 @@ func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorizat
//
// A client typically polls a challenge status using this method.
func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) {
res, err := c.get(ctx, url)
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {
return nil, responseError(res)
}
v := wireChallenge{URI: url}
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
@ -486,16 +477,14 @@ func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error
Type: chal.Type,
Auth: auth,
}
res, err := c.retryPostJWS(ctx, c.Key, chal.URI, req)
res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus(
http.StatusOK, // according to the spec
http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md)
))
if err != nil {
return nil, err
}
defer res.Body.Close()
// Note: the protocol specifies 200 as the expected response code, but
// letsencrypt seems to be returning 202.
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {
return nil, responseError(res)
}
var v wireChallenge
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
@ -552,7 +541,7 @@ func (c *Client) HTTP01ChallengePath(token string) string {
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
//
// The returned certificate is valid for the next 24 hours and must be presented only when
// the server name of the client hello matches exactly the returned name value.
// the server name of the TLS ClientHello matches exactly the returned name value.
func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) {
ka, err := keyAuth(c.Key.Public(), token)
if err != nil {
@ -579,7 +568,7 @@ func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tl
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
//
// The returned certificate is valid for the next 24 hours and must be presented only when
// the server name in the client hello matches exactly the returned name value.
// the server name in the TLS ClientHello matches exactly the returned name value.
func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) {
b := sha256.Sum256([]byte(token))
h := hex.EncodeToString(b[:])
@ -600,6 +589,52 @@ func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tl
return cert, sanA, nil
}
// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response.
// Servers can present the certificate to validate the challenge and prove control
// over a domain name. For more details on TLS-ALPN-01 see
// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3
//
// The token argument is a Challenge.Token value.
// If a WithKey option is provided, its private part signs the returned cert,
// and the public part is used to specify the signee.
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
//
// The returned certificate is valid for the next 24 hours and must be presented only when
// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol
// has been specified.
func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) {
ka, err := keyAuth(c.Key.Public(), token)
if err != nil {
return tls.Certificate{}, err
}
shasum := sha256.Sum256([]byte(ka))
extValue, err := asn1.Marshal(shasum[:])
if err != nil {
return tls.Certificate{}, err
}
acmeExtension := pkix.Extension{
Id: idPeACMEIdentifierV1,
Critical: true,
Value: extValue,
}
tmpl := defaultTLSChallengeCertTemplate()
var newOpt []CertOption
for _, o := range opt {
switch o := o.(type) {
case *certOptTemplate:
t := *(*x509.Certificate)(o) // shallow copy is ok
tmpl = &t
default:
newOpt = append(newOpt, o)
}
}
tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension)
newOpt = append(newOpt, WithTemplate(tmpl))
return tlsChallengeCert([]string{domain}, newOpt)
}
// doReg sends all types of registration requests.
// The type of request is identified by typ argument, which is a "resource"
// in the ACME spec terms.
@ -619,14 +654,15 @@ func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Accoun
req.Contact = acct.Contact
req.Agreement = acct.AgreedTerms
}
res, err := c.retryPostJWS(ctx, c.Key, url, req)
res, err := c.post(ctx, c.Key, url, req, wantStatus(
http.StatusOK, // updates and deletes
http.StatusCreated, // new account creation
http.StatusAccepted, // Let's Encrypt divergent implementation
))
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < 200 || res.StatusCode > 299 {
return nil, responseError(res)
}
var v struct {
Contact []string
@ -656,59 +692,6 @@ func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Accoun
}, nil
}
// retryPostJWS will retry calls to postJWS if there is a badNonce error,
// clearing the stored nonces after each error.
// If the response was 4XX-5XX, then responseError is called on the body,
// the body is closed, and the error returned.
func (c *Client) retryPostJWS(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, error) {
sleep := sleeper(ctx)
for {
res, err := c.postJWS(ctx, key, url, body)
if err != nil {
return nil, err
}
// handle errors 4XX-5XX with responseError
if res.StatusCode >= 400 && res.StatusCode <= 599 {
err := responseError(res)
res.Body.Close()
// according to spec badNonce is urn:ietf:params:acme:error:badNonce
// however, acme servers in the wild return their version of the error
// https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4
if ae, ok := err.(*Error); ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") {
// clear any nonces that we might've stored that might now be
// considered bad
c.clearNonces()
retry := res.Header.Get("Retry-After")
if err := sleep(retry, 1); err != nil {
return nil, err
}
continue
}
return nil, err
}
return res, nil
}
}
// postJWS signs the body with the given key and POSTs it to the provided url.
// The body argument must be JSON-serializable.
func (c *Client) postJWS(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, error) {
nonce, err := c.popNonce(ctx, url)
if err != nil {
return nil, err
}
b, err := jwsEncodeJSON(body, key, nonce)
if err != nil {
return nil, err
}
res, err := c.post(ctx, url, "application/jose+json", bytes.NewReader(b))
if err != nil {
return nil, err
}
c.addNonce(res.Header)
return res, nil
}
// popNonce returns a nonce value previously stored with c.addNonce
// or fetches a fresh one from the given URL.
func (c *Client) popNonce(ctx context.Context, url string) (string, error) {
@ -749,58 +732,12 @@ func (c *Client) addNonce(h http.Header) {
c.nonces[v] = struct{}{}
}
func (c *Client) httpClient() *http.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
return http.DefaultClient
}
func (c *Client) get(ctx context.Context, urlStr string) (*http.Response, error) {
req, err := http.NewRequest("GET", urlStr, nil)
if err != nil {
return nil, err
}
return c.do(ctx, req)
}
func (c *Client) head(ctx context.Context, urlStr string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", urlStr, nil)
if err != nil {
return nil, err
}
return c.do(ctx, req)
}
func (c *Client) post(ctx context.Context, urlStr, contentType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", urlStr, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", contentType)
return c.do(ctx, req)
}
func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) {
res, err := c.httpClient().Do(req.WithContext(ctx))
if err != nil {
select {
case <-ctx.Done():
// Prefer the unadorned context error.
// (The acme package had tests assuming this, previously from ctxhttp's
// behavior, predating net/http supporting contexts natively)
// TODO(bradfitz): reconsider this in the future. But for now this
// requires no test updates.
return nil, ctx.Err()
default:
return nil, err
}
}
return res, nil
}
func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) {
resp, err := c.head(ctx, url)
r, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return "", err
}
resp, err := c.doNoRetry(ctx, r)
if err != nil {
return "", err
}
@ -852,24 +789,6 @@ func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bo
return cert, nil
}
// responseError creates an error of Error type from resp.
func responseError(resp *http.Response) error {
// don't care if ReadAll returns an error:
// json.Unmarshal will fail in that case anyway
b, _ := ioutil.ReadAll(resp.Body)
e := &wireError{Status: resp.StatusCode}
if err := json.Unmarshal(b, e); err != nil {
// this is not a regular error response:
// populate detail with anything we received,
// e.Status will already contain HTTP response code value
e.Detail = string(b)
if e.Detail == "" {
e.Detail = resp.Status
}
}
return e.error(resp.Header)
}
// chainCert fetches CA certificate chain recursively by following "up" links.
// Each recursive call increments the depth by 1, resulting in an error
// if the recursion level reaches maxChainLen.
@ -880,14 +799,11 @@ func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte
return nil, errors.New("acme: certificate chain is too deep")
}
res, err := c.get(ctx, url)
res, err := c.get(ctx, url, wantStatus(http.StatusOK))
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, responseError(res)
}
b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1))
if err != nil {
return nil, err
@ -932,65 +848,6 @@ func linkHeader(h http.Header, rel string) []string {
return links
}
// sleeper returns a function that accepts the Retry-After HTTP header value
// and an increment that's used with backoff to increasingly sleep on
// consecutive calls until the context is done. If the Retry-After header
// cannot be parsed, then backoff is used with a maximum sleep time of 10
// seconds.
func sleeper(ctx context.Context) func(ra string, inc int) error {
var count int
return func(ra string, inc int) error {
count += inc
d := backoff(count, 10*time.Second)
d = retryAfter(ra, d)
wakeup := time.NewTimer(d)
defer wakeup.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-wakeup.C:
return nil
}
}
}
// retryAfter parses a Retry-After HTTP header value,
// trying to convert v into an int (seconds) or use http.ParseTime otherwise.
// It returns d if v cannot be parsed.
func retryAfter(v string, d time.Duration) time.Duration {
if i, err := strconv.Atoi(v); err == nil {
return time.Duration(i) * time.Second
}
t, err := http.ParseTime(v)
if err != nil {
return d
}
return t.Sub(timeNow())
}
// backoff computes a duration after which an n+1 retry iteration should occur
// using truncated exponential backoff algorithm.
//
// The n argument is always bounded between 0 and 30.
// The max argument defines upper bound for the returned value.
func backoff(n int, max time.Duration) time.Duration {
if n < 0 {
n = 0
}
if n > 30 {
n = 30
}
var d time.Duration
if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil {
d = time.Duration(x.Int64()) * time.Millisecond
}
d += time.Duration(1<<uint(n)) * time.Second
if d > max {
return max
}
return d
}
// keyAuth generates a key authorization string for a given token.
func keyAuth(pub crypto.PublicKey, token string) (string, error) {
th, err := JWKThumbprint(pub)
@ -1000,15 +857,25 @@ func keyAuth(pub crypto.PublicKey, token string) (string, error) {
return fmt.Sprintf("%s.%s", token, th), nil
}
// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges.
func defaultTLSChallengeCertTemplate() *x509.Certificate {
return &x509.Certificate{
SerialNumber: big.NewInt(1),
NotBefore: time.Now(),
NotAfter: time.Now().Add(24 * time.Hour),
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
}
// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges
// with the given SANs and auto-generated public/private key pair.
// The Subject Common Name is set to the first SAN to aid debugging.
// To create a cert with a custom key pair, specify WithKey option.
func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
var (
key crypto.Signer
tmpl *x509.Certificate
)
var key crypto.Signer
tmpl := defaultTLSChallengeCertTemplate()
for _, o := range opt {
switch o := o.(type) {
case *certOptKey:
@ -1017,7 +884,7 @@ func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
}
key = o.key
case *certOptTemplate:
var t = *(*x509.Certificate)(o) // shallow copy is ok
t := *(*x509.Certificate)(o) // shallow copy is ok
tmpl = &t
default:
// package's fault, if we let this happen:
@ -1030,16 +897,6 @@ func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
return tls.Certificate{}, err
}
}
if tmpl == nil {
tmpl = &x509.Certificate{
SerialNumber: big.NewInt(1),
NotBefore: time.Now(),
NotAfter: time.Now().Add(24 * time.Hour),
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
}
tmpl.DNSNames = san
if len(san) > 0 {
tmpl.Subject.CommonName = san[0]

View file

@ -44,7 +44,7 @@ var createCertRetryAfter = time.Minute
var pseudoRand *lockedMathRand
func init() {
src := mathrand.NewSource(timeNow().UnixNano())
src := mathrand.NewSource(time.Now().UnixNano())
pseudoRand = &lockedMathRand{rnd: mathrand.New(src)}
}
@ -81,9 +81,9 @@ func defaultHostPolicy(context.Context, string) error {
}
// Manager is a stateful certificate manager built on top of acme.Client.
// It obtains and refreshes certificates automatically using "tls-sni-01",
// "tls-sni-02" and "http-01" challenge types, as well as providing them
// to a TLS server via tls.Config.
// It obtains and refreshes certificates automatically using "tls-alpn-01",
// "tls-sni-01", "tls-sni-02" and "http-01" challenge types,
// as well as providing them to a TLS server via tls.Config.
//
// You must specify a cache implementation, such as DirCache,
// to reuse obtained certificates across program restarts.
@ -98,11 +98,11 @@ type Manager struct {
// To always accept the terms, the callers can use AcceptTOS.
Prompt func(tosURL string) bool
// Cache optionally stores and retrieves previously-obtained certificates.
// If nil, certs will only be cached for the lifetime of the Manager.
// Cache optionally stores and retrieves previously-obtained certificates
// and other state. If nil, certs will only be cached for the lifetime of
// the Manager. Multiple Managers can share the same Cache.
//
// Manager passes the Cache certificates data encoded in PEM, with private/public
// parts combined in a single Cache.Put call, private key first.
// Using a persistent Cache, such as DirCache, is strongly recommended.
Cache Cache
// HostPolicy controls which domains the Manager will attempt
@ -127,8 +127,10 @@ type Manager struct {
// Client is used to perform low-level operations, such as account registration
// and requesting new certificates.
//
// If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL
// directory endpoint and a newly-generated ECDSA P-256 key.
// as directory endpoint. If the Client.Key is nil, a new ECDSA P-256 key is
// generated and, if Cache is not nil, stored in cache.
//
// Mutating the field after the first call of GetCertificate method will have no effect.
Client *acme.Client
@ -140,22 +142,30 @@ type Manager struct {
// If the Client's account key is already registered, Email is not used.
Email string
// ForceRSA makes the Manager generate certificates with 2048-bit RSA keys.
// ForceRSA used to make the Manager generate RSA certificates. It is now ignored.
//
// If false, a default is used. Currently the default
// is EC-based keys using the P-256 curve.
// Deprecated: the Manager will request the correct type of certificate based
// on what each client supports.
ForceRSA bool
// ExtraExtensions are used when generating a new CSR (Certificate Request),
// thus allowing customization of the resulting certificate.
// For instance, TLS Feature Extension (RFC 7633) can be used
// to prevent an OCSP downgrade attack.
//
// The field value is passed to crypto/x509.CreateCertificateRequest
// in the template's ExtraExtensions field as is.
ExtraExtensions []pkix.Extension
clientMu sync.Mutex
client *acme.Client // initialized by acmeClient method
stateMu sync.Mutex
state map[string]*certState // keyed by domain name
state map[certKey]*certState
// renewal tracks the set of domains currently running renewal timers.
// It is keyed by domain name.
renewalMu sync.Mutex
renewal map[string]*domainRenewal
renewal map[certKey]*domainRenewal
// tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens.
tokensMu sync.RWMutex
@ -167,21 +177,60 @@ type Manager struct {
// to be provisioned.
// The entries are stored for the duration of the authorization flow.
httpTokens map[string][]byte
// certTokens contains temporary certificates for tls-sni challenges
// certTokens contains temporary certificates for tls-sni and tls-alpn challenges
// and is keyed by token domain name, which matches server name of ClientHello.
// Keys always have ".acme.invalid" suffix.
// Keys always have ".acme.invalid" suffix for tls-sni. Otherwise, they are domain names
// for tls-alpn.
// The entries are stored for the duration of the authorization flow.
certTokens map[string]*tls.Certificate
// nowFunc, if not nil, returns the current time. This may be set for
// testing purposes.
nowFunc func() time.Time
}
// certKey is the key by which certificates are tracked in state, renewal and cache.
type certKey struct {
domain string // without trailing dot
isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA)
isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA
}
func (c certKey) String() string {
if c.isToken {
return c.domain + "+token"
}
if c.isRSA {
return c.domain + "+rsa"
}
return c.domain
}
// TLSConfig creates a new TLS config suitable for net/http.Server servers,
// supporting HTTP/2 and the tls-alpn-01 ACME challenge type.
func (m *Manager) TLSConfig() *tls.Config {
return &tls.Config{
GetCertificate: m.GetCertificate,
NextProtos: []string{
"h2", "http/1.1", // enable HTTP/2
acme.ALPNProto, // enable tls-alpn ACME challenges
},
}
}
// GetCertificate implements the tls.Config.GetCertificate hook.
// It provides a TLS certificate for hello.ServerName host, including answering
// *.acme.invalid (TLS-SNI) challenges. All other fields of hello are ignored.
// tls-alpn-01 and *.acme.invalid (tls-sni-01 and tls-sni-02) challenges.
// All other fields of hello are ignored.
//
// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting
// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation.
// The error is propagated back to the caller of GetCertificate and is user-visible.
// This does not affect cached certs. See HostPolicy field description for more details.
//
// If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will
// also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler
// for http-01. (The tls-sni-* challenges have been deprecated by popular ACME providers
// due to security issues in the ecosystem.)
func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
if m.Prompt == nil {
return nil, errors.New("acme/autocert: Manager.Prompt not set")
@ -194,7 +243,7 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
if !strings.Contains(strings.Trim(name, "."), ".") {
return nil, errors.New("acme/autocert: server name component count invalid")
}
if strings.ContainsAny(name, `/\`) {
if strings.ContainsAny(name, `+/\`) {
return nil, errors.New("acme/autocert: server name contains invalid character")
}
@ -203,14 +252,17 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// check whether this is a token cert requested for TLS-SNI challenge
if strings.HasSuffix(name, ".acme.invalid") {
// Check whether this is a token cert requested for TLS-SNI or TLS-ALPN challenge.
if wantsTokenCert(hello) {
m.tokensMu.RLock()
defer m.tokensMu.RUnlock()
// It's ok to use the same token cert key for both tls-sni and tls-alpn
// because there's always at most 1 token cert per on-going domain authorization.
// See m.verify for details.
if cert := m.certTokens[name]; cert != nil {
return cert, nil
}
if cert, err := m.cacheGet(ctx, name); err == nil {
if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil {
return cert, nil
}
// TODO: cache error results?
@ -218,8 +270,11 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
}
// regular domain
name = strings.TrimSuffix(name, ".") // golang.org/issue/18114
cert, err := m.cert(ctx, name)
ck := certKey{
domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114
isRSA: !supportsECDSA(hello),
}
cert, err := m.cert(ctx, ck)
if err == nil {
return cert, nil
}
@ -231,14 +286,71 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
if err := m.hostPolicy()(ctx, name); err != nil {
return nil, err
}
cert, err = m.createCert(ctx, name)
cert, err = m.createCert(ctx, ck)
if err != nil {
return nil, err
}
m.cachePut(ctx, name, cert)
m.cachePut(ctx, ck, cert)
return cert, nil
}
// wantsTokenCert reports whether a TLS request with SNI is made by a CA server
// for a challenge verification.
func wantsTokenCert(hello *tls.ClientHelloInfo) bool {
// tls-alpn-01
if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto {
return true
}
// tls-sni-xx
return strings.HasSuffix(hello.ServerName, ".acme.invalid")
}
func supportsECDSA(hello *tls.ClientHelloInfo) bool {
// The "signature_algorithms" extension, if present, limits the key exchange
// algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1.
if hello.SignatureSchemes != nil {
ecdsaOK := false
schemeLoop:
for _, scheme := range hello.SignatureSchemes {
const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10
switch scheme {
case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256,
tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512:
ecdsaOK = true
break schemeLoop
}
}
if !ecdsaOK {
return false
}
}
if hello.SupportedCurves != nil {
ecdsaOK := false
for _, curve := range hello.SupportedCurves {
if curve == tls.CurveP256 {
ecdsaOK = true
break
}
}
if !ecdsaOK {
return false
}
}
for _, suite := range hello.CipherSuites {
switch suite {
case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:
return true
}
}
return false
}
// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses.
// It returns an http.Handler that responds to the challenges and must be
// running on port 80. If it receives a request that is not an ACME challenge,
@ -252,8 +364,8 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
// Because the fallback handler is run with unencrypted port 80 requests,
// the fallback should not serve TLS-only requests.
//
// If HTTPHandler is never called, the Manager will only use TLS SNI
// challenges for domain verification.
// If HTTPHandler is never called, the Manager will only use the "tls-alpn-01"
// challenge for domain verification.
func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler {
m.tokensMu.Lock()
defer m.tokensMu.Unlock()
@ -304,16 +416,16 @@ func stripPort(hostport string) string {
// cert returns an existing certificate either from m.state or cache.
// If a certificate is found in cache but not in m.state, the latter will be filled
// with the cached value.
func (m *Manager) cert(ctx context.Context, name string) (*tls.Certificate, error) {
func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) {
m.stateMu.Lock()
if s, ok := m.state[name]; ok {
if s, ok := m.state[ck]; ok {
m.stateMu.Unlock()
s.RLock()
defer s.RUnlock()
return s.tlscert()
}
defer m.stateMu.Unlock()
cert, err := m.cacheGet(ctx, name)
cert, err := m.cacheGet(ctx, ck)
if err != nil {
return nil, err
}
@ -322,25 +434,25 @@ func (m *Manager) cert(ctx context.Context, name string) (*tls.Certificate, erro
return nil, errors.New("acme/autocert: private key cannot sign")
}
if m.state == nil {
m.state = make(map[string]*certState)
m.state = make(map[certKey]*certState)
}
s := &certState{
key: signer,
cert: cert.Certificate,
leaf: cert.Leaf,
}
m.state[name] = s
go m.renew(name, s.key, s.leaf.NotAfter)
m.state[ck] = s
go m.renew(ck, s.key, s.leaf.NotAfter)
return cert, nil
}
// cacheGet always returns a valid certificate, or an error otherwise.
// If a cached certficate exists but is not valid, ErrCacheMiss is returned.
func (m *Manager) cacheGet(ctx context.Context, domain string) (*tls.Certificate, error) {
// If a cached certificate exists but is not valid, ErrCacheMiss is returned.
func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) {
if m.Cache == nil {
return nil, ErrCacheMiss
}
data, err := m.Cache.Get(ctx, domain)
data, err := m.Cache.Get(ctx, ck.String())
if err != nil {
return nil, err
}
@ -371,7 +483,7 @@ func (m *Manager) cacheGet(ctx context.Context, domain string) (*tls.Certificate
}
// verify and create TLS cert
leaf, err := validCert(domain, pubDER, privKey)
leaf, err := validCert(ck, pubDER, privKey, m.now())
if err != nil {
return nil, ErrCacheMiss
}
@ -383,7 +495,7 @@ func (m *Manager) cacheGet(ctx context.Context, domain string) (*tls.Certificate
return tlscert, nil
}
func (m *Manager) cachePut(ctx context.Context, domain string, tlscert *tls.Certificate) error {
func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error {
if m.Cache == nil {
return nil
}
@ -415,7 +527,7 @@ func (m *Manager) cachePut(ctx context.Context, domain string, tlscert *tls.Cert
}
}
return m.Cache.Put(ctx, domain, buf.Bytes())
return m.Cache.Put(ctx, ck.String(), buf.Bytes())
}
func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error {
@ -432,9 +544,9 @@ func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error {
//
// If the domain is already being verified, it waits for the existing verification to complete.
// Either way, createCert blocks for the duration of the whole process.
func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certificate, error) {
func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) {
// TODO: maybe rewrite this whole piece using sync.Once
state, err := m.certState(domain)
state, err := m.certState(ck)
if err != nil {
return nil, err
}
@ -452,44 +564,44 @@ func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certifica
defer state.Unlock()
state.locked = false
der, leaf, err := m.authorizedCert(ctx, state.key, domain)
der, leaf, err := m.authorizedCert(ctx, state.key, ck)
if err != nil {
// Remove the failed state after some time,
// making the manager call createCert again on the following TLS hello.
time.AfterFunc(createCertRetryAfter, func() {
defer testDidRemoveState(domain)
defer testDidRemoveState(ck)
m.stateMu.Lock()
defer m.stateMu.Unlock()
// Verify the state hasn't changed and it's still invalid
// before deleting.
s, ok := m.state[domain]
s, ok := m.state[ck]
if !ok {
return
}
if _, err := validCert(domain, s.cert, s.key); err == nil {
if _, err := validCert(ck, s.cert, s.key, m.now()); err == nil {
return
}
delete(m.state, domain)
delete(m.state, ck)
})
return nil, err
}
state.cert = der
state.leaf = leaf
go m.renew(domain, state.key, state.leaf.NotAfter)
go m.renew(ck, state.key, state.leaf.NotAfter)
return state.tlscert()
}
// certState returns a new or existing certState.
// If a new certState is returned, state.exist is false and the state is locked.
// The returned error is non-nil only in the case where a new state could not be created.
func (m *Manager) certState(domain string) (*certState, error) {
func (m *Manager) certState(ck certKey) (*certState, error) {
m.stateMu.Lock()
defer m.stateMu.Unlock()
if m.state == nil {
m.state = make(map[string]*certState)
m.state = make(map[certKey]*certState)
}
// existing state
if state, ok := m.state[domain]; ok {
if state, ok := m.state[ck]; ok {
return state, nil
}
@ -498,7 +610,7 @@ func (m *Manager) certState(domain string) (*certState, error) {
err error
key crypto.Signer
)
if m.ForceRSA {
if ck.isRSA {
key, err = rsa.GenerateKey(rand.Reader, 2048)
} else {
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
@ -512,22 +624,22 @@ func (m *Manager) certState(domain string) (*certState, error) {
locked: true,
}
state.Lock() // will be unlocked by m.certState caller
m.state[domain] = state
m.state[ck] = state
return state, nil
}
// authorizedCert starts the domain ownership verification process and requests a new cert upon success.
// The key argument is the certificate private key.
func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) {
func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) {
client, err := m.acmeClient(ctx)
if err != nil {
return nil, nil, err
}
if err := m.verify(ctx, client, domain); err != nil {
if err := m.verify(ctx, client, ck.domain); err != nil {
return nil, nil, err
}
csr, err := certRequest(key, domain)
csr, err := certRequest(key, ck.domain, m.ExtraExtensions)
if err != nil {
return nil, nil, err
}
@ -535,25 +647,55 @@ func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain
if err != nil {
return nil, nil, err
}
leaf, err = validCert(domain, der, key)
leaf, err = validCert(ck, der, key, m.now())
if err != nil {
return nil, nil, err
}
return der, leaf, nil
}
// revokePendingAuthz revokes all authorizations idenfied by the elements of uri slice.
// It ignores revocation errors.
func (m *Manager) revokePendingAuthz(ctx context.Context, uri []string) {
client, err := m.acmeClient(ctx)
if err != nil {
return
}
for _, u := range uri {
client.RevokeAuthorization(ctx, u)
}
}
// verify runs the identifier (domain) authorization flow
// using each applicable ACME challenge type.
func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error {
// The list of challenge types we'll try to fulfill
// in this specific order.
challengeTypes := []string{"tls-sni-02", "tls-sni-01"}
challengeTypes := []string{"tls-alpn-01", "tls-sni-02", "tls-sni-01"}
m.tokensMu.RLock()
if m.tryHTTP01 {
challengeTypes = append(challengeTypes, "http-01")
}
m.tokensMu.RUnlock()
// Keep track of pending authzs and revoke the ones that did not validate.
pendingAuthzs := make(map[string]bool)
defer func() {
var uri []string
for k, pending := range pendingAuthzs {
if pending {
uri = append(uri, k)
}
}
if len(uri) > 0 {
// Use "detached" background context.
// The revocations need not happen in the current verification flow.
go m.revokePendingAuthz(context.Background(), uri)
}
}()
// errs accumulates challenge failure errors, printed if all fail
errs := make(map[*acme.Challenge]error)
var nextTyp int // challengeType index of the next challenge type to try
for {
// Start domain authorization and get the challenge.
@ -570,6 +712,8 @@ func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string
return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI)
}
pendingAuthzs[authz.URI] = true
// Pick the next preferred challenge.
var chal *acme.Challenge
for chal == nil && nextTyp < len(challengeTypes) {
@ -577,28 +721,44 @@ func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string
nextTyp++
}
if chal == nil {
return fmt.Errorf("acme/autocert: unable to authorize %q; tried %q", domain, challengeTypes)
errorMsg := fmt.Sprintf("acme/autocert: unable to authorize %q", domain)
for chal, err := range errs {
errorMsg += fmt.Sprintf("; challenge %q failed with error: %v", chal.Type, err)
}
return errors.New(errorMsg)
}
cleanup, err := m.fulfill(ctx, client, chal)
cleanup, err := m.fulfill(ctx, client, chal, domain)
if err != nil {
errs[chal] = err
continue
}
defer cleanup()
if _, err := client.Accept(ctx, chal); err != nil {
errs[chal] = err
continue
}
// A challenge is fulfilled and accepted: wait for the CA to validate.
if _, err := client.WaitAuthorization(ctx, authz.URI); err == nil {
return nil
if _, err := client.WaitAuthorization(ctx, authz.URI); err != nil {
errs[chal] = err
continue
}
delete(pendingAuthzs, authz.URI)
return nil
}
}
// fulfill provisions a response to the challenge chal.
// The cleanup is non-nil only if provisioning succeeded.
func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge) (cleanup func(), err error) {
func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) {
switch chal.Type {
case "tls-alpn-01":
cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain)
if err != nil {
return nil, err
}
m.putCertToken(ctx, domain, &cert)
return func() { go m.deleteCertToken(domain) }, nil
case "tls-sni-01":
cert, name, err := client.TLSSNI01ChallengeCert(chal.Token)
if err != nil {
@ -634,8 +794,8 @@ func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge {
return nil
}
// putCertToken stores the cert under the named key in both m.certTokens map
// and m.Cache.
// putCertToken stores the token certificate with the specified name
// in both m.certTokens map and m.Cache.
func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) {
m.tokensMu.Lock()
defer m.tokensMu.Unlock()
@ -643,17 +803,18 @@ func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certi
m.certTokens = make(map[string]*tls.Certificate)
}
m.certTokens[name] = cert
m.cachePut(ctx, name, cert)
m.cachePut(ctx, certKey{domain: name, isToken: true}, cert)
}
// deleteCertToken removes the token certificate for the specified domain name
// deleteCertToken removes the token certificate with the specified name
// from both m.certTokens map and m.Cache.
func (m *Manager) deleteCertToken(name string) {
m.tokensMu.Lock()
defer m.tokensMu.Unlock()
delete(m.certTokens, name)
if m.Cache != nil {
m.Cache.Delete(context.Background(), name)
ck := certKey{domain: name, isToken: true}
m.Cache.Delete(context.Background(), ck.String())
}
}
@ -704,7 +865,7 @@ func (m *Manager) deleteHTTPToken(tokenPath string) {
// httpTokenCacheKey returns a key at which an http-01 token value may be stored
// in the Manager's optional Cache.
func httpTokenCacheKey(tokenPath string) string {
return "http-01-" + path.Base(tokenPath)
return path.Base(tokenPath) + "+http-01"
}
// renew starts a cert renewal timer loop, one per domain.
@ -715,18 +876,18 @@ func httpTokenCacheKey(tokenPath string) string {
//
// The key argument is a certificate private key.
// The exp argument is the cert expiration time (NotAfter).
func (m *Manager) renew(domain string, key crypto.Signer, exp time.Time) {
func (m *Manager) renew(ck certKey, key crypto.Signer, exp time.Time) {
m.renewalMu.Lock()
defer m.renewalMu.Unlock()
if m.renewal[domain] != nil {
if m.renewal[ck] != nil {
// another goroutine is already on it
return
}
if m.renewal == nil {
m.renewal = make(map[string]*domainRenewal)
m.renewal = make(map[certKey]*domainRenewal)
}
dr := &domainRenewal{m: m, domain: domain, key: key}
m.renewal[domain] = dr
dr := &domainRenewal{m: m, ck: ck, key: key}
m.renewal[ck] = dr
dr.start(exp)
}
@ -742,7 +903,10 @@ func (m *Manager) stopRenew() {
}
func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) {
const keyName = "acme_account.key"
const keyName = "acme_account+key"
// Previous versions of autocert stored the value under a different key.
const legacyKeyName = "acme_account.key"
genKey := func() (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
@ -753,6 +917,9 @@ func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) {
}
data, err := m.Cache.Get(ctx, keyName)
if err == ErrCacheMiss {
data, err = m.Cache.Get(ctx, legacyKeyName)
}
if err == ErrCacheMiss {
key, err := genKey()
if err != nil {
@ -824,6 +991,13 @@ func (m *Manager) renewBefore() time.Duration {
return 720 * time.Hour // 30 days
}
func (m *Manager) now() time.Time {
if m.nowFunc != nil {
return m.nowFunc()
}
return time.Now()
}
// certState is ready when its mutex is unlocked for reading.
type certState struct {
sync.RWMutex
@ -849,12 +1023,12 @@ func (s *certState) tlscert() (*tls.Certificate, error) {
}, nil
}
// certRequest creates a certificate request for the given common name cn
// and optional SANs.
func certRequest(key crypto.Signer, cn string, san ...string) ([]byte, error) {
// certRequest generates a CSR for the given common name cn and optional SANs.
func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) {
req := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: cn},
DNSNames: san,
Subject: pkix.Name{CommonName: cn},
DNSNames: san,
ExtraExtensions: ext,
}
return x509.CreateCertificateRequest(rand.Reader, req, key)
}
@ -885,12 +1059,12 @@ func parsePrivateKey(der []byte) (crypto.Signer, error) {
return nil, errors.New("acme/autocert: failed to parse private key")
}
// validCert parses a cert chain provided as der argument and verifies the leaf, der[0],
// corresponds to the private key, as well as the domain match and expiration dates.
// It doesn't do any revocation checking.
// validCert parses a cert chain provided as der argument and verifies the leaf and der[0]
// correspond to the private key, the domain and key type match, and expiration dates
// are valid. It doesn't do any revocation checking.
//
// The returned value is the verified leaf cert.
func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certificate, err error) {
func validCert(ck certKey, der [][]byte, key crypto.Signer, now time.Time) (leaf *x509.Certificate, err error) {
// parse public part(s)
var n int
for _, b := range der {
@ -902,22 +1076,21 @@ func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certi
n += copy(pub[n:], b)
}
x509Cert, err := x509.ParseCertificates(pub)
if len(x509Cert) == 0 {
if err != nil || len(x509Cert) == 0 {
return nil, errors.New("acme/autocert: no public key found")
}
// verify the leaf is not expired and matches the domain name
leaf = x509Cert[0]
now := timeNow()
if now.Before(leaf.NotBefore) {
return nil, errors.New("acme/autocert: certificate is not valid yet")
}
if now.After(leaf.NotAfter) {
return nil, errors.New("acme/autocert: expired certificate")
}
if err := leaf.VerifyHostname(domain); err != nil {
if err := leaf.VerifyHostname(ck.domain); err != nil {
return nil, err
}
// ensure the leaf corresponds to the private key
// ensure the leaf corresponds to the private key and matches the certKey type
switch pub := leaf.PublicKey.(type) {
case *rsa.PublicKey:
prv, ok := key.(*rsa.PrivateKey)
@ -927,6 +1100,9 @@ func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certi
if pub.N.Cmp(prv.N) != 0 {
return nil, errors.New("acme/autocert: private key does not match public key")
}
if !ck.isRSA && !ck.isToken {
return nil, errors.New("acme/autocert: key type does not match expected value")
}
case *ecdsa.PublicKey:
prv, ok := key.(*ecdsa.PrivateKey)
if !ok {
@ -935,6 +1111,9 @@ func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certi
if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 {
return nil, errors.New("acme/autocert: private key does not match public key")
}
if ck.isRSA && !ck.isToken {
return nil, errors.New("acme/autocert: key type does not match expected value")
}
default:
return nil, errors.New("acme/autocert: unknown public key algorithm")
}
@ -955,8 +1134,6 @@ func (r *lockedMathRand) int63n(max int64) int64 {
// For easier testing.
var (
timeNow = time.Now
// Called when a state is removed.
testDidRemoveState = func(domain string) {}
testDidRemoveState = func(certKey) {}
)

View file

@ -16,10 +16,10 @@ import (
var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss")
// Cache is used by Manager to store and retrieve previously obtained certificates
// as opaque data.
// and other account data as opaque blobs.
//
// The key argument of the methods refers to a domain name but need not be an FQDN.
// Cache implementations should not rely on the key naming pattern.
// Cache implementations should not rely on the key naming pattern. Keys can
// include any printable ASCII characters, except the following: \/:*?"<>|
type Cache interface {
// Get returns a certificate data for the specified key.
// If there's no such key, Get returns ErrCacheMiss.

View file

@ -72,11 +72,8 @@ func NewListener(domains ...string) net.Listener {
// the Manager m's Prompt, Cache, HostPolicy, and other desired options.
func (m *Manager) Listener() net.Listener {
ln := &listener{
m: m,
conf: &tls.Config{
GetCertificate: m.GetCertificate, // bonus: panic on nil m
NextProtos: []string{"h2", "http/1.1"}, // Enable HTTP/2
},
m: m,
conf: m.TLSConfig(),
}
ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443")
return ln

View file

@ -17,9 +17,9 @@ const renewJitter = time.Hour
// domainRenewal tracks the state used by the periodic timers
// renewing a single domain's cert.
type domainRenewal struct {
m *Manager
domain string
key crypto.Signer
m *Manager
ck certKey
key crypto.Signer
timerMu sync.Mutex
timer *time.Timer
@ -77,7 +77,7 @@ func (dr *domainRenewal) updateState(state *certState) {
dr.m.stateMu.Lock()
defer dr.m.stateMu.Unlock()
dr.key = state.key
dr.m.state[dr.domain] = state
dr.m.state[dr.ck] = state
}
// do is similar to Manager.createCert but it doesn't lock a Manager.state item.
@ -91,7 +91,7 @@ func (dr *domainRenewal) updateState(state *certState) {
func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
// a race is likely unavoidable in a distributed environment
// but we try nonetheless
if tlscert, err := dr.m.cacheGet(ctx, dr.domain); err == nil {
if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil {
next := dr.next(tlscert.Leaf.NotAfter)
if next > dr.m.renewBefore()+renewJitter {
signer, ok := tlscert.PrivateKey.(crypto.Signer)
@ -107,7 +107,7 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
}
}
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.domain)
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck)
if err != nil {
return 0, err
}
@ -120,7 +120,7 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
if err != nil {
return 0, err
}
if err := dr.m.cachePut(ctx, dr.domain, tlscert); err != nil {
if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil {
return 0, err
}
dr.updateState(state)
@ -128,7 +128,7 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
}
func (dr *domainRenewal) next(expiry time.Time) time.Duration {
d := expiry.Sub(timeNow()) - dr.m.renewBefore()
d := expiry.Sub(dr.m.now()) - dr.m.renewBefore()
// add a bit of randomness to renew deadline
n := pseudoRand.int63n(int64(renewJitter))
d -= time.Duration(n)

281
vendor/golang.org/x/crypto/acme/http.go generated vendored Normal file
View file

@ -0,0 +1,281 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package acme
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"net/http"
"strconv"
"strings"
"time"
)
// retryTimer encapsulates common logic for retrying unsuccessful requests.
// It is not safe for concurrent use.
type retryTimer struct {
// backoffFn provides backoff delay sequence for retries.
// See Client.RetryBackoff doc comment.
backoffFn func(n int, r *http.Request, res *http.Response) time.Duration
// n is the current retry attempt.
n int
}
func (t *retryTimer) inc() {
t.n++
}
// backoff pauses the current goroutine as described in Client.RetryBackoff.
func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error {
d := t.backoffFn(t.n, r, res)
if d <= 0 {
return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n)
}
wakeup := time.NewTimer(d)
defer wakeup.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-wakeup.C:
return nil
}
}
func (c *Client) retryTimer() *retryTimer {
f := c.RetryBackoff
if f == nil {
f = defaultBackoff
}
return &retryTimer{backoffFn: f}
}
// defaultBackoff provides default Client.RetryBackoff implementation
// using a truncated exponential backoff algorithm,
// as described in Client.RetryBackoff.
//
// The n argument is always bounded between 1 and 30.
// The returned value is always greater than 0.
func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration {
const max = 10 * time.Second
var jitter time.Duration
if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil {
// Set the minimum to 1ms to avoid a case where
// an invalid Retry-After value is parsed into 0 below,
// resulting in the 0 returned value which would unintentionally
// stop the retries.
jitter = (1 + time.Duration(x.Int64())) * time.Millisecond
}
if v, ok := res.Header["Retry-After"]; ok {
return retryAfter(v[0]) + jitter
}
if n < 1 {
n = 1
}
if n > 30 {
n = 30
}
d := time.Duration(1<<uint(n-1))*time.Second + jitter
if d > max {
return max
}
return d
}
// retryAfter parses a Retry-After HTTP header value,
// trying to convert v into an int (seconds) or use http.ParseTime otherwise.
// It returns zero value if v cannot be parsed.
func retryAfter(v string) time.Duration {
if i, err := strconv.Atoi(v); err == nil {
return time.Duration(i) * time.Second
}
t, err := http.ParseTime(v)
if err != nil {
return 0
}
return t.Sub(timeNow())
}
// resOkay is a function that reports whether the provided response is okay.
// It is expected to keep the response body unread.
type resOkay func(*http.Response) bool
// wantStatus returns a function which reports whether the code
// matches the status code of a response.
func wantStatus(codes ...int) resOkay {
return func(res *http.Response) bool {
for _, code := range codes {
if code == res.StatusCode {
return true
}
}
return false
}
}
// get issues an unsigned GET request to the specified URL.
// It returns a non-error value only when ok reports true.
//
// get retries unsuccessful attempts according to c.RetryBackoff
// until the context is done or a non-retriable error is received.
func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) {
retry := c.retryTimer()
for {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
res, err := c.doNoRetry(ctx, req)
switch {
case err != nil:
return nil, err
case ok(res):
return res, nil
case isRetriable(res.StatusCode):
retry.inc()
resErr := responseError(res)
res.Body.Close()
// Ignore the error value from retry.backoff
// and return the one from last retry, as received from the CA.
if retry.backoff(ctx, req, res) != nil {
return nil, resErr
}
default:
defer res.Body.Close()
return nil, responseError(res)
}
}
}
// post issues a signed POST request in JWS format using the provided key
// to the specified URL.
// It returns a non-error value only when ok reports true.
//
// post retries unsuccessful attempts according to c.RetryBackoff
// until the context is done or a non-retriable error is received.
// It uses postNoRetry to make individual requests.
func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) {
retry := c.retryTimer()
for {
res, req, err := c.postNoRetry(ctx, key, url, body)
if err != nil {
return nil, err
}
if ok(res) {
return res, nil
}
resErr := responseError(res)
res.Body.Close()
switch {
// Check for bad nonce before isRetriable because it may have been returned
// with an unretriable response code such as 400 Bad Request.
case isBadNonce(resErr):
// Consider any previously stored nonce values to be invalid.
c.clearNonces()
case !isRetriable(res.StatusCode):
return nil, resErr
}
retry.inc()
// Ignore the error value from retry.backoff
// and return the one from last retry, as received from the CA.
if err := retry.backoff(ctx, req, res); err != nil {
return nil, resErr
}
}
}
// postNoRetry signs the body with the given key and POSTs it to the provided url.
// The body argument must be JSON-serializable.
// It is used by c.post to retry unsuccessful attempts.
func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) {
nonce, err := c.popNonce(ctx, url)
if err != nil {
return nil, nil, err
}
b, err := jwsEncodeJSON(body, key, nonce)
if err != nil {
return nil, nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
if err != nil {
return nil, nil, err
}
req.Header.Set("Content-Type", "application/jose+json")
res, err := c.doNoRetry(ctx, req)
if err != nil {
return nil, nil, err
}
c.addNonce(res.Header)
return res, req, nil
}
// doNoRetry issues a request req, replacing its context (if any) with ctx.
func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) {
res, err := c.httpClient().Do(req.WithContext(ctx))
if err != nil {
select {
case <-ctx.Done():
// Prefer the unadorned context error.
// (The acme package had tests assuming this, previously from ctxhttp's
// behavior, predating net/http supporting contexts natively)
// TODO(bradfitz): reconsider this in the future. But for now this
// requires no test updates.
return nil, ctx.Err()
default:
return nil, err
}
}
return res, nil
}
func (c *Client) httpClient() *http.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
return http.DefaultClient
}
// isBadNonce reports whether err is an ACME "badnonce" error.
func isBadNonce(err error) bool {
// According to the spec badNonce is urn:ietf:params:acme:error:badNonce.
// However, ACME servers in the wild return their versions of the error.
// See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4
// and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66.
ae, ok := err.(*Error)
return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce")
}
// isRetriable reports whether a request can be retried
// based on the response status code.
//
// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code.
// Callers should parse the response and check with isBadNonce.
func isRetriable(code int) bool {
return code <= 399 || code >= 500 || code == http.StatusTooManyRequests
}
// responseError creates an error of Error type from resp.
func responseError(resp *http.Response) error {
// don't care if ReadAll returns an error:
// json.Unmarshal will fail in that case anyway
b, _ := ioutil.ReadAll(resp.Body)
e := &wireError{Status: resp.StatusCode}
if err := json.Unmarshal(b, e); err != nil {
// this is not a regular error response:
// populate detail with anything we received,
// e.Status will already contain HTTP response code value
e.Detail = string(b)
if e.Detail == "" {
e.Detail = resp.Status
}
}
return e.error(resp.Header)
}

View file

@ -104,7 +104,7 @@ func RateLimit(err error) (time.Duration, bool) {
if e.Header == nil {
return 0, true
}
return retryAfter(e.Header.Get("Retry-After"), 0), true
return retryAfter(e.Header.Get("Retry-After")), true
}
// Account is a user account. It is associated with a private key.
@ -296,8 +296,8 @@ func (e *wireError) error(h http.Header) *Error {
}
}
// CertOption is an optional argument type for the TLSSNIxChallengeCert methods for
// customizing a temporary certificate for TLS-SNI challenges.
// CertOption is an optional argument type for the TLS ChallengeCert methods for
// customizing a temporary certificate for TLS-based challenges.
type CertOption interface {
privateCertOpt()
}
@ -317,7 +317,7 @@ func (*certOptKey) privateCertOpt() {}
// WithTemplate creates an option for specifying a certificate template.
// See x509.CreateCertificate for template usage details.
//
// In TLSSNIxChallengeCert methods, the template is also used as parent,
// In TLS ChallengeCert methods, the template is also used as parent,
// resulting in a self-signed certificate.
// The DNSNames field of t is always overwritten for tls-sni challenge certs.
func WithTemplate(t *x509.Certificate) CertOption {

View file

@ -6,7 +6,10 @@
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032.
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
@ -31,6 +34,8 @@ const (
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
@ -46,6 +51,15 @@ func (priv PrivateKey) Public() crypto.PublicKey {
return PublicKey(publicKey)
}
// Seed returns the private key seed corresponding to priv. It is provided for
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
// in this package.
func (priv PrivateKey) Seed() []byte {
seed := make([]byte, SeedSize)
copy(seed, priv[:32])
return seed
}
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
@ -61,19 +75,33 @@ func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOp
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
if rand == nil {
rand = cryptorand.Reader
}
privateKey = make([]byte, PrivateKeySize)
publicKey = make([]byte, PublicKeySize)
_, err = io.ReadFull(rand, privateKey[:32])
if err != nil {
seed := make([]byte, SeedSize)
if _, err := io.ReadFull(rand, seed); err != nil {
return nil, nil, err
}
digest := sha512.Sum512(privateKey[:32])
privateKey := NewKeyFromSeed(seed)
publicKey := make([]byte, PublicKeySize)
copy(publicKey, privateKey[32:])
return publicKey, privateKey, nil
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
if l := len(seed); l != SeedSize {
panic("ed25519: bad seed length: " + strconv.Itoa(l))
}
digest := sha512.Sum512(seed)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
@ -85,10 +113,11 @@ func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, er
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
privateKey := make([]byte, PrivateKeySize)
copy(privateKey, seed)
copy(privateKey[32:], publicKeyBytes[:])
copy(publicKey, publicKeyBytes[:])
return publicKey, privateKey, nil
return privateKey
}
// Sign signs the message with privateKey and returns a signature. It will

View file

@ -9,6 +9,8 @@ package chacha20
import (
"crypto/cipher"
"encoding/binary"
"golang.org/x/crypto/internal/subtle"
)
// assert that *Cipher implements cipher.Stream
@ -18,10 +20,10 @@ var _ cipher.Stream = (*Cipher)(nil)
// and nonce. A *Cipher implements the cipher.Stream interface.
type Cipher struct {
key [8]uint32
counter uint32 // incremented after each block
nonce [3]uint32
counter uint32 // incremented after each block
buf [64]byte // buffer for unused keystream bytes
len int // number of unused keystream bytes at end of buf
buf [bufSize]byte // buffer for unused keystream bytes
len int // number of unused keystream bytes at end of buf
}
// New creates a new ChaCha20 stream cipher with the given key and nonce.
@ -30,6 +32,30 @@ func New(key [8]uint32, nonce [3]uint32) *Cipher {
return &Cipher{key: key, nonce: nonce}
}
// ChaCha20 constants spelling "expand 32-byte k"
const (
j0 uint32 = 0x61707865
j1 uint32 = 0x3320646e
j2 uint32 = 0x79622d32
j3 uint32 = 0x6b206574
)
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
a += b
d ^= a
d = (d << 16) | (d >> 16)
c += d
b ^= c
b = (b << 12) | (b >> 20)
a += b
d ^= a
d = (d << 8) | (d >> 24)
c += d
b ^= c
b = (b << 7) | (b >> 25)
return a, b, c, d
}
// XORKeyStream XORs each byte in the given slice with a byte from the
// cipher's key stream. Dst and src must overlap entirely or not at all.
//
@ -41,6 +67,13 @@ func New(key [8]uint32, nonce [3]uint32) *Cipher {
// the src buffers was passed in a single run. That is, Cipher
// maintains state and does not reset at each XORKeyStream call.
func (s *Cipher) XORKeyStream(dst, src []byte) {
if len(dst) < len(src) {
panic("chacha20: output smaller than input")
}
if subtle.InexactOverlap(dst[:len(src)], src) {
panic("chacha20: invalid buffer overlap")
}
// xor src with buffered keystream first
if s.len != 0 {
buf := s.buf[len(s.buf)-s.len:]
@ -63,6 +96,13 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
if len(src) == 0 {
return
}
if haveAsm {
if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
panic("chacha20: counter overflow")
}
s.xorKeyStreamAsm(dst, src)
return
}
// set up a 64-byte buffer to pad out the final block if needed
// (hoisted out of the main loop to avoid spills)
@ -72,59 +112,34 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
copy(s.buf[len(s.buf)-64:], src[fin:])
}
// qr calculates a quarter round
qr := func(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
a += b
d ^= a
d = (d << 16) | (d >> 16)
c += d
b ^= c
b = (b << 12) | (b >> 20)
a += b
d ^= a
d = (d << 8) | (d >> 24)
c += d
b ^= c
b = (b << 7) | (b >> 25)
return a, b, c, d
}
// ChaCha20 constants
const (
j0 = 0x61707865
j1 = 0x3320646e
j2 = 0x79622d32
j3 = 0x6b206574
)
// pre-calculate most of the first round
s1, s5, s9, s13 := qr(j1, s.key[1], s.key[5], s.nonce[0])
s2, s6, s10, s14 := qr(j2, s.key[2], s.key[6], s.nonce[1])
s3, s7, s11, s15 := qr(j3, s.key[3], s.key[7], s.nonce[2])
s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
n := len(src)
src, dst = src[:n:n], dst[:n:n] // BCE hint
for i := 0; i < n; i += 64 {
// calculate the remainder of the first round
s0, s4, s8, s12 := qr(j0, s.key[0], s.key[4], s.counter)
s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
// execute the second round
x0, x5, x10, x15 := qr(s0, s5, s10, s15)
x1, x6, x11, x12 := qr(s1, s6, s11, s12)
x2, x7, x8, x13 := qr(s2, s7, s8, s13)
x3, x4, x9, x14 := qr(s3, s4, s9, s14)
x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
// execute the remaining 18 rounds
for i := 0; i < 9; i++ {
x0, x4, x8, x12 = qr(x0, x4, x8, x12)
x1, x5, x9, x13 = qr(x1, x5, x9, x13)
x2, x6, x10, x14 = qr(x2, x6, x10, x14)
x3, x7, x11, x15 = qr(x3, x7, x11, x15)
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
x0, x5, x10, x15 = qr(x0, x5, x10, x15)
x1, x6, x11, x12 = qr(x1, x6, x11, x12)
x2, x7, x8, x13 = qr(x2, x7, x8, x13)
x3, x4, x9, x14 = qr(x3, x4, x9, x14)
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
x0 += j0
@ -221,3 +236,29 @@ func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
}
s.XORKeyStream(out, in)
}
// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
// nonce. It should only be used as part of the XChaCha20 construction.
func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
x0, x1, x2, x3 := j0, j1, j2, j3
x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
for i := 0; i < 10; i++ {
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
var out [8]uint32
out[0], out[1], out[2], out[3] = x0, x1, x2, x3
out[4], out[5], out[6], out[7] = x12, x13, x14, x15
return out
}

View file

@ -0,0 +1,16 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !s390x gccgo appengine
package chacha20
const (
bufSize = 64
haveAsm = false
)
func (*Cipher) xorKeyStreamAsm(dst, src []byte) {
panic("not implemented")
}

View file

@ -0,0 +1,30 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo,!appengine
package chacha20
var haveAsm = hasVectorFacility()
const bufSize = 256
// hasVectorFacility reports whether the machine supports the vector
// facility (vx).
// Implementation in asm_s390x.s.
func hasVectorFacility() bool
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
// be called when the vector facility is available.
// Implementation in asm_s390x.s.
//go:noescape
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len)
}
// EXRL targets, DO NOT CALL!
func mvcSrcToBuf()
func mvcBufToDst()

View file

@ -0,0 +1,283 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo,!appengine
#include "go_asm.h"
#include "textflag.h"
// This is an implementation of the ChaCha20 encryption algorithm as
// specified in RFC 7539. It uses vector instructions to compute
// 4 keystream blocks in parallel (256 bytes) which are then XORed
// with the bytes in the input slice.
GLOBL ·constants<>(SB), RODATA|NOPTR, $32
// BSWAP: swap bytes in each 4-byte element
DATA ·constants<>+0x00(SB)/4, $0x03020100
DATA ·constants<>+0x04(SB)/4, $0x07060504
DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
// J0: [j0, j1, j2, j3]
DATA ·constants<>+0x10(SB)/4, $0x61707865
DATA ·constants<>+0x14(SB)/4, $0x3320646e
DATA ·constants<>+0x18(SB)/4, $0x79622d32
DATA ·constants<>+0x1c(SB)/4, $0x6b206574
// EXRL targets:
TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0
MVC $1, (R1), (R8)
RET
TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
MVC $1, (R8), (R9)
RET
#define BSWAP V5
#define J0 V6
#define KEY0 V7
#define KEY1 V8
#define NONCE V9
#define CTR V10
#define M0 V11
#define M1 V12
#define M2 V13
#define M3 V14
#define INC V15
#define X0 V16
#define X1 V17
#define X2 V18
#define X3 V19
#define X4 V20
#define X5 V21
#define X6 V22
#define X7 V23
#define X8 V24
#define X9 V25
#define X10 V26
#define X11 V27
#define X12 V28
#define X13 V29
#define X14 V30
#define X15 V31
#define NUM_ROUNDS 20
#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
VAF a1, a0, a0 \
VAF b1, b0, b0 \
VAF c1, c0, c0 \
VAF d1, d0, d0 \
VX a0, a2, a2 \
VX b0, b2, b2 \
VX c0, c2, c2 \
VX d0, d2, d2 \
VERLLF $16, a2, a2 \
VERLLF $16, b2, b2 \
VERLLF $16, c2, c2 \
VERLLF $16, d2, d2 \
VAF a2, a3, a3 \
VAF b2, b3, b3 \
VAF c2, c3, c3 \
VAF d2, d3, d3 \
VX a3, a1, a1 \
VX b3, b1, b1 \
VX c3, c1, c1 \
VX d3, d1, d1 \
VERLLF $12, a1, a1 \
VERLLF $12, b1, b1 \
VERLLF $12, c1, c1 \
VERLLF $12, d1, d1 \
VAF a1, a0, a0 \
VAF b1, b0, b0 \
VAF c1, c0, c0 \
VAF d1, d0, d0 \
VX a0, a2, a2 \
VX b0, b2, b2 \
VX c0, c2, c2 \
VX d0, d2, d2 \
VERLLF $8, a2, a2 \
VERLLF $8, b2, b2 \
VERLLF $8, c2, c2 \
VERLLF $8, d2, d2 \
VAF a2, a3, a3 \
VAF b2, b3, b3 \
VAF c2, c3, c3 \
VAF d2, d3, d3 \
VX a3, a1, a1 \
VX b3, b1, b1 \
VX c3, c1, c1 \
VX d3, d1, d1 \
VERLLF $7, a1, a1 \
VERLLF $7, b1, b1 \
VERLLF $7, c1, c1 \
VERLLF $7, d1, d1
#define PERMUTE(mask, v0, v1, v2, v3) \
VPERM v0, v0, mask, v0 \
VPERM v1, v1, mask, v1 \
VPERM v2, v2, mask, v2 \
VPERM v3, v3, mask, v3
#define ADDV(x, v0, v1, v2, v3) \
VAF x, v0, v0 \
VAF x, v1, v1 \
VAF x, v2, v2 \
VAF x, v3, v3
#define XORV(off, dst, src, v0, v1, v2, v3) \
VLM off(src), M0, M3 \
PERMUTE(BSWAP, v0, v1, v2, v3) \
VX v0, M0, M0 \
VX v1, M1, M1 \
VX v2, M2, M2 \
VX v3, M3, M3 \
VSTM M0, M3, off(dst)
#define SHUFFLE(a, b, c, d, t, u, v, w) \
VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
MOVD $·constants<>(SB), R1
MOVD dst+0(FP), R2 // R2=&dst[0]
LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
MOVD key+48(FP), R5 // R5=key
MOVD nonce+56(FP), R6 // R6=nonce
MOVD counter+64(FP), R7 // R7=counter
MOVD buf+72(FP), R8 // R8=buf
MOVD len+80(FP), R9 // R9=len
// load BSWAP and J0
VLM (R1), BSWAP, J0
// set up tail buffer
ADD $-1, R4, R12
MOVBZ R12, R12
CMPUBEQ R12, $255, aligned
MOVD R4, R1
AND $~255, R1
MOVD $(R3)(R1*1), R1
EXRL $·mvcSrcToBuf(SB), R12
MOVD $255, R0
SUB R12, R0
MOVD R0, (R9) // update len
aligned:
// setup
MOVD $95, R0
VLM (R5), KEY0, KEY1
VLL R0, (R6), NONCE
VZERO M0
VLEIB $7, $32, M0
VSRLB M0, NONCE, NONCE
// initialize counter values
VLREPF (R7), CTR
VZERO INC
VLEIF $1, $1, INC
VLEIF $2, $2, INC
VLEIF $3, $3, INC
VAF INC, CTR, CTR
VREPIF $4, INC
chacha:
VREPF $0, J0, X0
VREPF $1, J0, X1
VREPF $2, J0, X2
VREPF $3, J0, X3
VREPF $0, KEY0, X4
VREPF $1, KEY0, X5
VREPF $2, KEY0, X6
VREPF $3, KEY0, X7
VREPF $0, KEY1, X8
VREPF $1, KEY1, X9
VREPF $2, KEY1, X10
VREPF $3, KEY1, X11
VLR CTR, X12
VREPF $1, NONCE, X13
VREPF $2, NONCE, X14
VREPF $3, NONCE, X15
MOVD $(NUM_ROUNDS/2), R1
loop:
ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
ADD $-1, R1
BNE loop
// decrement length
ADD $-256, R4
BLT tail
continue:
// rearrange vectors
SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
ADDV(J0, X0, X1, X2, X3)
SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
ADDV(KEY0, X4, X5, X6, X7)
SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
ADDV(KEY1, X8, X9, X10, X11)
VAF CTR, X12, X12
SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
ADDV(NONCE, X12, X13, X14, X15)
// increment counters
VAF INC, CTR, CTR
// xor keystream with plaintext
XORV(0*64, R2, R3, X0, X4, X8, X12)
XORV(1*64, R2, R3, X1, X5, X9, X13)
XORV(2*64, R2, R3, X2, X6, X10, X14)
XORV(3*64, R2, R3, X3, X7, X11, X15)
// increment pointers
MOVD $256(R2), R2
MOVD $256(R3), R3
CMPBNE R4, $0, chacha
CMPUBEQ R12, $255, return
EXRL $·mvcBufToDst(SB), R12 // len was updated during setup
return:
VSTEF $0, CTR, (R7)
RET
tail:
MOVD R2, R9
MOVD R8, R2
MOVD R8, R3
MOVD $0, R4
JMP continue
// func hasVectorFacility() bool
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
MOVD $x-24(SP), R1
XC $24, 0(R1), 0(R1) // clear the storage
MOVD $2, R0 // R0 is the number of double words stored -1
WORD $0xB2B01000 // STFLE 0(R1)
XOR R0, R0 // reset the value of R0
MOVBZ z-8(SP), R1
AND $0x40, R1
BEQ novector
vectorinstalled:
// check if the vector instruction has been enabled
VLEIB $0, $0xF, V16
VLGVB $0, V16, R1
CMPBNE R1, $0xF, novector
MOVB $1, ret+0(FP) // have vx
RET
novector:
MOVB $0, ret+0(FP) // no vx
RET

32
vendor/golang.org/x/crypto/internal/subtle/aliasing.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
import "unsafe"
// AnyOverlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
func AnyOverlap(x, y []byte) bool {
return len(x) > 0 && len(y) > 0 &&
uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
}
// InexactOverlap reports whether x and y share memory at any non-corresponding
// index. The memory beyond the slice length is ignored. Note that x and y can
// have different lengths and still not have any inexact overlap.
//
// InexactOverlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
func InexactOverlap(x, y []byte) bool {
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
return false
}
return AnyOverlap(x, y)
}

View file

@ -0,0 +1,35 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.
import "reflect"
// AnyOverlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
func AnyOverlap(x, y []byte) bool {
return len(x) > 0 && len(y) > 0 &&
reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
}
// InexactOverlap reports whether x and y share memory at any non-corresponding
// index. The memory beyond the slice length is ignored. Note that x and y can
// have different lengths and still not have any inexact overlap.
//
// InexactOverlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
func InexactOverlap(x, y []byte) bool {
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
return false
}
return AnyOverlap(x, y)
}

View file

@ -346,22 +346,25 @@ EachPacket:
switch pkt := p.(type) {
case *packet.UserId:
// Make a new Identity object, that we might wind up throwing away.
// We'll only add it if we get a valid self-signature over this
// userID.
current = new(Identity)
current.Name = pkt.Id
current.UserId = pkt
e.Identities[pkt.Id] = current
for {
p, err = packets.Next()
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
break EachPacket
} else if err != nil {
return nil, err
}
sig, ok := p.(*packet.Signature)
if !ok {
return nil, errors.StructuralError("user ID packet not followed by self-signature")
packets.Unread(p)
continue EachPacket
}
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
@ -369,9 +372,10 @@ EachPacket:
return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
}
current.SelfSignature = sig
break
e.Identities[pkt.Id] = current
} else {
current.Signatures = append(current.Signatures, sig)
}
current.Signatures = append(current.Signatures, sig)
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
@ -500,6 +504,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
if err != nil {
return nil, err
}
// If the user passes in a DefaultHash via packet.Config,
// set the PreferredHash for the SelfSignature.
@ -529,13 +537,16 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
if err != nil {
return nil, err
}
return e, nil
}
// SerializePrivate serializes an Entity, including private key material, to
// the given Writer. For now, it must only be used on an Entity returned from
// NewEntity.
// SerializePrivate serializes an Entity, including private key material, but
// excluding signatures from other entities, to the given Writer.
// Identities and subkeys are re-signed in case they changed since NewEntry.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
@ -573,8 +584,8 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error
return nil
}
// Serialize writes the public part of the given Entity to w. (No private
// key material will be output).
// Serialize writes the public part of the given Entity to w, including
// signatures from other entities. No private key material will be output.
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {

View file

@ -164,12 +164,12 @@ func hashToHashId(h crypto.Hash) uint8 {
return v
}
// Encrypt encrypts a message to a number of recipients and, optionally, signs
// it. hints contains optional information, that is also encrypted, that aids
// the recipients in processing the message. The resulting WriteCloser must
// be closed after the contents of the file have been written.
// If config is nil, sensible defaults will be used.
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
// writeAndSign writes the data as a payload package and, optionally, signs
// it. hints contains optional information, that is also encrypted,
// that aids the recipients in processing the message. The resulting
// WriteCloser must be closed after the contents of the file have been
// written. If config is nil, sensible defaults will be used.
func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
var signer *packet.PrivateKey
if signed != nil {
signKey, ok := signed.signingKey(config.Now())
@ -185,6 +185,83 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
if signer != nil {
ops := &packet.OnePassSignature{
SigType: packet.SigTypeBinary,
Hash: hash,
PubKeyAlgo: signer.PubKeyAlgo,
KeyId: signer.KeyId,
IsLast: true,
}
if err := ops.Serialize(payload); err != nil {
return nil, err
}
}
if hints == nil {
hints = &FileHints{}
}
w := payload
if signer != nil {
// If we need to write a signature packet after the literal
// data then we need to stop literalData from closing
// encryptedData.
w = noOpCloser{w}
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
if err != nil {
return nil, err
}
if signer != nil {
return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil
}
return literalData, nil
}
// Encrypt encrypts a message to a number of recipients and, optionally, signs
// it. hints contains optional information, that is also encrypted, that aids
// the recipients in processing the message. The resulting WriteCloser must
// be closed after the contents of the file have been written.
// If config is nil, sensible defaults will be used.
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
if len(to) == 0 {
return nil, errors.InvalidArgumentError("no encryption recipient provided")
}
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
@ -241,33 +318,6 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
symKey := make([]byte, cipher.KeySize())
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
return nil, err
@ -279,49 +329,37 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
if err != nil {
return
}
if signer != nil {
ops := &packet.OnePassSignature{
SigType: packet.SigTypeBinary,
Hash: hash,
PubKeyAlgo: signer.PubKeyAlgo,
KeyId: signer.KeyId,
IsLast: true,
}
if err := ops.Serialize(encryptedData); err != nil {
return nil, err
}
return writeAndSign(payload, candidateHashes, signed, hints, config)
}
// Sign signs a message. The resulting WriteCloser must be closed after the
// contents of the file have been written. hints contains optional information
// that aids the recipients in processing the message.
// If config is nil, sensible defaults will be used.
func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
if signed == nil {
return nil, errors.InvalidArgumentError("no signer provided")
}
if hints == nil {
hints = &FileHints{}
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
}
w := encryptedData
if signer != nil {
// If we need to write a signature packet after the literal
// data then we need to stop literalData from closing
// encryptedData.
w = noOpCloser{encryptedData}
defaultHashes := candidateHashes[len(candidateHashes)-1:]
preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash
if len(preferredHashes) == 0 {
preferredHashes = defaultHashes
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
if err != nil {
return nil, err
}
if signer != nil {
return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
}
return literalData, nil
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config)
}
// signatureWriter hashes the contents of a message while passing it along to

14
vendor/golang.org/x/crypto/poly1305/sum_noasm.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!go1.11 !arm,!amd64,!s390x gccgo appengine nacl
package poly1305
// Sum generates an authenticator for msg using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
sumGeneric(out, msg, key)
}

View file

@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!arm gccgo appengine nacl
package poly1305
import "encoding/binary"
// Sum generates an authenticator for msg using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
// sumGeneric generates an authenticator for msg using a one-time key and
// puts the 16-byte result into out. This is the generic implementation of
// Sum and should be called if no assembly implementation is available.
func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
var (
h0, h1, h2, h3, h4 uint32 // the hash accumulators
r0, r1, r2, r3, r4 uint64 // the r part of the key

49
vendor/golang.org/x/crypto/poly1305/sum_s390x.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,go1.11,!gccgo,!appengine
package poly1305
// hasVectorFacility reports whether the machine supports
// the vector facility (vx).
func hasVectorFacility() bool
// hasVMSLFacility reports whether the machine supports
// Vector Multiply Sum Logical (VMSL).
func hasVMSLFacility() bool
var hasVX = hasVectorFacility()
var hasVMSL = hasVMSLFacility()
// poly1305vx is an assembly implementation of Poly1305 that uses vector
// instructions. It must only be called if the vector facility (vx) is
// available.
//go:noescape
func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
// poly1305vmsl is an assembly implementation of Poly1305 that uses vector
// instructions, including VMSL. It must only be called if the vector facility (vx) is
// available and if VMSL is supported.
//go:noescape
func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
// Sum generates an authenticator for m using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
if hasVX {
var mPtr *byte
if len(m) > 0 {
mPtr = &m[0]
}
if hasVMSL && len(m) > 256 {
poly1305vmsl(out, mPtr, uint64(len(m)), key)
} else {
poly1305vx(out, mPtr, uint64(len(m)), key)
}
} else {
sumGeneric(out, m, key)
}
}

400
vendor/golang.org/x/crypto/poly1305/sum_s390x.s generated vendored Normal file
View file

@ -0,0 +1,400 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,go1.11,!gccgo,!appengine
#include "textflag.h"
// Implementation of Poly1305 using the vector facility (vx).
// constants
#define MOD26 V0
#define EX0 V1
#define EX1 V2
#define EX2 V3
// temporaries
#define T_0 V4
#define T_1 V5
#define T_2 V6
#define T_3 V7
#define T_4 V8
// key (r)
#define R_0 V9
#define R_1 V10
#define R_2 V11
#define R_3 V12
#define R_4 V13
#define R5_1 V14
#define R5_2 V15
#define R5_3 V16
#define R5_4 V17
#define RSAVE_0 R5
#define RSAVE_1 R6
#define RSAVE_2 R7
#define RSAVE_3 R8
#define RSAVE_4 R9
#define R5SAVE_1 V28
#define R5SAVE_2 V29
#define R5SAVE_3 V30
#define R5SAVE_4 V31
// message block
#define F_0 V18
#define F_1 V19
#define F_2 V20
#define F_3 V21
#define F_4 V22
// accumulator
#define H_0 V23
#define H_1 V24
#define H_2 V25
#define H_3 V26
#define H_4 V27
GLOBL ·keyMask<>(SB), RODATA, $16
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
GLOBL ·bswapMask<>(SB), RODATA, $16
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
GLOBL ·constants<>(SB), RODATA, $64
// MOD26
DATA ·constants<>+0(SB)/8, $0x3ffffff
DATA ·constants<>+8(SB)/8, $0x3ffffff
// EX0
DATA ·constants<>+16(SB)/8, $0x0006050403020100
DATA ·constants<>+24(SB)/8, $0x1016151413121110
// EX1
DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706
DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716
// EX2
DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d
DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
// h = (f*g) % (2**130-5) [partial reduction]
#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
VMLOF f0, g0, h0 \
VMLOF f0, g1, h1 \
VMLOF f0, g2, h2 \
VMLOF f0, g3, h3 \
VMLOF f0, g4, h4 \
VMLOF f1, g54, T_0 \
VMLOF f1, g0, T_1 \
VMLOF f1, g1, T_2 \
VMLOF f1, g2, T_3 \
VMLOF f1, g3, T_4 \
VMALOF f2, g53, h0, h0 \
VMALOF f2, g54, h1, h1 \
VMALOF f2, g0, h2, h2 \
VMALOF f2, g1, h3, h3 \
VMALOF f2, g2, h4, h4 \
VMALOF f3, g52, T_0, T_0 \
VMALOF f3, g53, T_1, T_1 \
VMALOF f3, g54, T_2, T_2 \
VMALOF f3, g0, T_3, T_3 \
VMALOF f3, g1, T_4, T_4 \
VMALOF f4, g51, h0, h0 \
VMALOF f4, g52, h1, h1 \
VMALOF f4, g53, h2, h2 \
VMALOF f4, g54, h3, h3 \
VMALOF f4, g0, h4, h4 \
VAG T_0, h0, h0 \
VAG T_1, h1, h1 \
VAG T_2, h2, h2 \
VAG T_3, h3, h3 \
VAG T_4, h4, h4
// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4
#define REDUCE(h0, h1, h2, h3, h4) \
VESRLG $26, h0, T_0 \
VESRLG $26, h3, T_1 \
VN MOD26, h0, h0 \
VN MOD26, h3, h3 \
VAG T_0, h1, h1 \
VAG T_1, h4, h4 \
VESRLG $26, h1, T_2 \
VESRLG $26, h4, T_3 \
VN MOD26, h1, h1 \
VN MOD26, h4, h4 \
VESLG $2, T_3, T_4 \
VAG T_3, T_4, T_4 \
VAG T_2, h2, h2 \
VAG T_4, h0, h0 \
VESRLG $26, h2, T_0 \
VESRLG $26, h0, T_1 \
VN MOD26, h2, h2 \
VN MOD26, h0, h0 \
VAG T_0, h3, h3 \
VAG T_1, h1, h1 \
VESRLG $26, h3, T_2 \
VN MOD26, h3, h3 \
VAG T_2, h4, h4
// expand in0 into d[0] and in1 into d[1]
#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
VGBM $0x0707, d1 \ // d1=tmp
VPERM in0, in1, EX2, d4 \
VPERM in0, in1, EX0, d0 \
VPERM in0, in1, EX1, d2 \
VN d1, d4, d4 \
VESRLG $26, d0, d1 \
VESRLG $30, d2, d3 \
VESRLG $4, d2, d2 \
VN MOD26, d0, d0 \
VN MOD26, d1, d1 \
VN MOD26, d2, d2 \
VN MOD26, d3, d3
// pack h4:h0 into h1:h0 (no carry)
#define PACK(h0, h1, h2, h3, h4) \
VESLG $26, h1, h1 \
VESLG $26, h3, h3 \
VO h0, h1, h0 \
VO h2, h3, h2 \
VESLG $4, h2, h2 \
VLEIB $7, $48, h1 \
VSLB h1, h2, h2 \
VO h0, h2, h0 \
VLEIB $7, $104, h1 \
VSLB h1, h4, h3 \
VO h3, h0, h0 \
VLEIB $7, $24, h1 \
VSRLB h1, h4, h1
// if h > 2**130-5 then h -= 2**130-5
#define MOD(h0, h1, t0, t1, t2) \
VZERO t0 \
VLEIG $1, $5, t0 \
VACCQ h0, t0, t1 \
VAQ h0, t0, t0 \
VONE t2 \
VLEIG $1, $-4, t2 \
VAQ t2, t1, t1 \
VACCQ h1, t1, t1 \
VONE t2 \
VAQ t2, t1, t1 \
VN h0, t1, t2 \
VNC t0, t1, t1 \
VO t1, t2, h0
// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key)
TEXT ·poly1305vx(SB), $0-32
// This code processes up to 2 blocks (32 bytes) per iteration
// using the algorithm described in:
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
// load MOD26, EX0, EX1 and EX2
MOVD $·constants<>(SB), R5
VLM (R5), MOD26, EX2
// setup r
VL (R4), T_0
MOVD $·keyMask<>(SB), R6
VL (R6), T_1
VN T_0, T_1, T_0
EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4)
// setup r*5
VLEIG $0, $5, T_0
VLEIG $1, $5, T_0
// store r (for final block)
VMLOF T_0, R_1, R5SAVE_1
VMLOF T_0, R_2, R5SAVE_2
VMLOF T_0, R_3, R5SAVE_3
VMLOF T_0, R_4, R5SAVE_4
VLGVG $0, R_0, RSAVE_0
VLGVG $0, R_1, RSAVE_1
VLGVG $0, R_2, RSAVE_2
VLGVG $0, R_3, RSAVE_3
VLGVG $0, R_4, RSAVE_4
// skip r**2 calculation
CMPBLE R3, $16, skip
// calculate r**2
MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4)
REDUCE(H_0, H_1, H_2, H_3, H_4)
VLEIG $0, $5, T_0
VLEIG $1, $5, T_0
VMLOF T_0, H_1, R5_1
VMLOF T_0, H_2, R5_2
VMLOF T_0, H_3, R5_3
VMLOF T_0, H_4, R5_4
VLR H_0, R_0
VLR H_1, R_1
VLR H_2, R_2
VLR H_3, R_3
VLR H_4, R_4
// initialize h
VZERO H_0
VZERO H_1
VZERO H_2
VZERO H_3
VZERO H_4
loop:
CMPBLE R3, $32, b2
VLM (R2), T_0, T_1
SUB $32, R3
MOVD $32(R2), R2
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
VLEIB $4, $1, F_4
VLEIB $12, $1, F_4
multiply:
VAG H_0, F_0, F_0
VAG H_1, F_1, F_1
VAG H_2, F_2, F_2
VAG H_3, F_3, F_3
VAG H_4, F_4, F_4
MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
REDUCE(H_0, H_1, H_2, H_3, H_4)
CMPBNE R3, $0, loop
finish:
// sum vectors
VZERO T_0
VSUMQG H_0, T_0, H_0
VSUMQG H_1, T_0, H_1
VSUMQG H_2, T_0, H_2
VSUMQG H_3, T_0, H_3
VSUMQG H_4, T_0, H_4
// h may be >= 2*(2**130-5) so we need to reduce it again
REDUCE(H_0, H_1, H_2, H_3, H_4)
// carry h1->h4
VESRLG $26, H_1, T_1
VN MOD26, H_1, H_1
VAQ T_1, H_2, H_2
VESRLG $26, H_2, T_2
VN MOD26, H_2, H_2
VAQ T_2, H_3, H_3
VESRLG $26, H_3, T_3
VN MOD26, H_3, H_3
VAQ T_3, H_4, H_4
// h is now < 2*(2**130-5)
// pack h into h1 (hi) and h0 (lo)
PACK(H_0, H_1, H_2, H_3, H_4)
// if h > 2**130-5 then h -= 2**130-5
MOD(H_0, H_1, T_0, T_1, T_2)
// h += s
MOVD $·bswapMask<>(SB), R5
VL (R5), T_1
VL 16(R4), T_0
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
VAQ T_0, H_0, H_0
VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little)
VST H_0, (R1)
RET
b2:
CMPBLE R3, $16, b1
// 2 blocks remaining
SUB $17, R3
VL (R2), T_0
VLL R3, 16(R2), T_1
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, T_1
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
CMPBNE R3, $16, 2(PC)
VLEIB $12, $1, F_4
VLEIB $4, $1, F_4
// setup [r²,r]
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, RSAVE_3, R_3
VLVGG $1, RSAVE_4, R_4
VPDI $0, R5_1, R5SAVE_1, R5_1
VPDI $0, R5_2, R5SAVE_2, R5_2
VPDI $0, R5_3, R5SAVE_3, R5_3
VPDI $0, R5_4, R5SAVE_4, R5_4
MOVD $0, R3
BR multiply
skip:
VZERO H_0
VZERO H_1
VZERO H_2
VZERO H_3
VZERO H_4
CMPBEQ R3, $0, finish
b1:
// 1 block remaining
SUB $1, R3
VLL R3, (R2), T_0
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, T_0
VZERO T_1
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
CMPBNE R3, $16, 2(PC)
VLEIB $4, $1, F_4
VLEIG $1, $1, R_0
VZERO R_1
VZERO R_2
VZERO R_3
VZERO R_4
VZERO R5_1
VZERO R5_2
VZERO R5_3
VZERO R5_4
// setup [r, 1]
VLVGG $0, RSAVE_0, R_0
VLVGG $0, RSAVE_1, R_1
VLVGG $0, RSAVE_2, R_2
VLVGG $0, RSAVE_3, R_3
VLVGG $0, RSAVE_4, R_4
VPDI $0, R5SAVE_1, R5_1, R5_1
VPDI $0, R5SAVE_2, R5_2, R5_2
VPDI $0, R5SAVE_3, R5_3, R5_3
VPDI $0, R5SAVE_4, R5_4, R5_4
MOVD $0, R3
BR multiply
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
MOVD $x-24(SP), R1
XC $24, 0(R1), 0(R1) // clear the storage
MOVD $2, R0 // R0 is the number of double words stored -1
WORD $0xB2B01000 // STFLE 0(R1)
XOR R0, R0 // reset the value of R0
MOVBZ z-8(SP), R1
AND $0x40, R1
BEQ novector
vectorinstalled:
// check if the vector instruction has been enabled
VLEIB $0, $0xF, V16
VLGVB $0, V16, R1
CMPBNE R1, $0xF, novector
MOVB $1, ret+0(FP) // have vx
RET
novector:
MOVB $0, ret+0(FP) // no vx
RET

931
vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s generated vendored Normal file
View file

@ -0,0 +1,931 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,go1.11,!gccgo,!appengine
#include "textflag.h"
// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
// constants
#define EX0 V1
#define EX1 V2
#define EX2 V3
// temporaries
#define T_0 V4
#define T_1 V5
#define T_2 V6
#define T_3 V7
#define T_4 V8
#define T_5 V9
#define T_6 V10
#define T_7 V11
#define T_8 V12
#define T_9 V13
#define T_10 V14
// r**2 & r**4
#define R_0 V15
#define R_1 V16
#define R_2 V17
#define R5_1 V18
#define R5_2 V19
// key (r)
#define RSAVE_0 R7
#define RSAVE_1 R8
#define RSAVE_2 R9
#define R5SAVE_1 R10
#define R5SAVE_2 R11
// message block
#define M0 V20
#define M1 V21
#define M2 V22
#define M3 V23
#define M4 V24
#define M5 V25
// accumulator
#define H0_0 V26
#define H1_0 V27
#define H2_0 V28
#define H0_1 V29
#define H1_1 V30
#define H2_1 V31
GLOBL ·keyMask<>(SB), RODATA, $16
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
GLOBL ·bswapMask<>(SB), RODATA, $16
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
GLOBL ·constants<>(SB), RODATA, $48
// EX0
DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
DATA ·constants<>+8(SB)/8, $0x0000050403020100
// EX1
DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
DATA ·constants<>+24(SB)/8, $0x00000a0908070605
// EX2
DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
GLOBL ·c<>(SB), RODATA, $48
// EX0
DATA ·c<>+0(SB)/8, $0x0000050403020100
DATA ·c<>+8(SB)/8, $0x0000151413121110
// EX1
DATA ·c<>+16(SB)/8, $0x00000a0908070605
DATA ·c<>+24(SB)/8, $0x00001a1918171615
// EX2
DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
GLOBL ·reduce<>(SB), RODATA, $32
// 44 bit
DATA ·reduce<>+0(SB)/8, $0x0
DATA ·reduce<>+8(SB)/8, $0xfffffffffff
// 42 bit
DATA ·reduce<>+16(SB)/8, $0x0
DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
// h = (f*g) % (2**130-5) [partial reduction]
// uses T_0...T_9 temporary registers
// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
\ // Eliminate the dependency for the last 2 VMSLs
VMSLG m02_0, r_2, m4_2, m4_2 \
VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
VMSLG m02_0, r_0, m4_0, m4_0 \
VMSLG m02_1, r5_2, V0, T_0 \
VMSLG m02_0, r_1, m4_1, m4_1 \
VMSLG m02_1, r_0, V0, T_1 \
VMSLG m02_1, r_1, V0, T_2 \
VMSLG m02_2, r5_1, V0, T_3 \
VMSLG m02_2, r5_2, V0, T_4 \
VMSLG m13_0, r_0, m5_0, m5_0 \
VMSLG m13_1, r5_2, V0, T_5 \
VMSLG m13_0, r_1, m5_1, m5_1 \
VMSLG m13_1, r_0, V0, T_6 \
VMSLG m13_1, r_1, V0, T_7 \
VMSLG m13_2, r5_1, V0, T_8 \
VMSLG m13_2, r5_2, V0, T_9 \
VMSLG m02_2, r_0, m4_2, m4_2 \
VMSLG m13_2, r_0, m5_2, m5_2 \
VAQ m4_0, T_0, m02_0 \
VAQ m4_1, T_1, m02_1 \
VAQ m5_0, T_5, m13_0 \
VAQ m5_1, T_6, m13_1 \
VAQ m02_0, T_3, m02_0 \
VAQ m02_1, T_4, m02_1 \
VAQ m13_0, T_8, m13_0 \
VAQ m13_1, T_9, m13_1 \
VAQ m4_2, T_2, m02_2 \
VAQ m5_2, T_7, m13_2 \
// SQUARE uses three limbs of r and r_2*5 to output square of r
// uses T_1, T_5 and T_7 temporary registers
// input: r_0, r_1, r_2, r5_2
// temp: TEMP0, TEMP1, TEMP2
// output: p0, p1, p2
#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
VMSLG r_0, r_0, p0, p0 \
VMSLG r_1, r5_2, V0, TEMP0 \
VMSLG r_2, r5_2, p1, p1 \
VMSLG r_0, r_1, V0, TEMP1 \
VMSLG r_1, r_1, p2, p2 \
VMSLG r_0, r_2, V0, TEMP2 \
VAQ TEMP0, p0, p0 \
VAQ TEMP1, p1, p1 \
VAQ TEMP2, p2, p2 \
VAQ TEMP0, p0, p0 \
VAQ TEMP1, p1, p1 \
VAQ TEMP2, p2, p2 \
// carry h0->h1->h2->h0 || h3->h4->h5->h3
// uses T_2, T_4, T_5, T_7, T_8, T_9
// t6, t7, t8, t9, t10, t11
// input: h0, h1, h2, h3, h4, h5
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
// output: h0, h1, h2, h3, h4, h5
#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
VLEIB $7, $0x28, t10 \ // 5 byte shift mask
VREPIB $4, t8 \ // 4 bit shift mask
VREPIB $2, t11 \ // 2 bit shift mask
VSRLB t10, h0, t0 \ // h0 byte shift
VSRLB t10, h1, t1 \ // h1 byte shift
VSRLB t10, h2, t2 \ // h2 byte shift
VSRLB t10, h3, t3 \ // h3 byte shift
VSRLB t10, h4, t4 \ // h4 byte shift
VSRLB t10, h5, t5 \ // h5 byte shift
VSRL t8, t0, t0 \ // h0 bit shift
VSRL t8, t1, t1 \ // h2 bit shift
VSRL t11, t2, t2 \ // h2 bit shift
VSRL t8, t3, t3 \ // h3 bit shift
VSRL t8, t4, t4 \ // h4 bit shift
VESLG $2, t2, t9 \ // h2 carry x5
VSRL t11, t5, t5 \ // h5 bit shift
VN t6, h0, h0 \ // h0 clear carry
VAQ t2, t9, t2 \ // h2 carry x5
VESLG $2, t5, t9 \ // h5 carry x5
VN t6, h1, h1 \ // h1 clear carry
VN t7, h2, h2 \ // h2 clear carry
VAQ t5, t9, t5 \ // h5 carry x5
VN t6, h3, h3 \ // h3 clear carry
VN t6, h4, h4 \ // h4 clear carry
VN t7, h5, h5 \ // h5 clear carry
VAQ t0, h1, h1 \ // h0->h1
VAQ t3, h4, h4 \ // h3->h4
VAQ t1, h2, h2 \ // h1->h2
VAQ t4, h5, h5 \ // h4->h5
VAQ t2, h0, h0 \ // h2->h0
VAQ t5, h3, h3 \ // h5->h3
VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
VREPG $1, t7, t7 \
VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
VSLDB $8, h1, h1, h1 \
VSLDB $8, h2, h2, h2 \
VO h0, h3, h3 \
VO h1, h4, h4 \
VO h2, h5, h5 \
VESRLG $44, h3, t0 \ // 44 bit shift right
VESRLG $44, h4, t1 \
VESRLG $42, h5, t2 \
VN t6, h3, h3 \ // clear carry bits
VN t6, h4, h4 \
VN t7, h5, h5 \
VESLG $2, t2, t9 \ // multiply carry by 5
VAQ t9, t2, t2 \
VAQ t0, h4, h4 \
VAQ t1, h5, h5 \
VAQ t2, h3, h3 \
// carry h0->h1->h2->h0
// input: h0, h1, h2
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
// output: h0, h1, h2
#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
VLEIB $7, $0x28, t3 \ // 5 byte shift mask
VREPIB $4, t4 \ // 4 bit shift mask
VREPIB $2, t7 \ // 2 bit shift mask
VGBM $0x003F, t5 \ // mask to clear carry bits
VSRLB t3, h0, t0 \
VSRLB t3, h1, t1 \
VSRLB t3, h2, t2 \
VESRLG $4, t5, t5 \ // 44 bit clear mask
VSRL t4, t0, t0 \
VSRL t4, t1, t1 \
VSRL t7, t2, t2 \
VESRLG $2, t5, t6 \ // 42 bit clear mask
VESLG $2, t2, t8 \
VAQ t8, t2, t2 \
VN t5, h0, h0 \
VN t5, h1, h1 \
VN t6, h2, h2 \
VAQ t0, h1, h1 \
VAQ t1, h2, h2 \
VAQ t2, h0, h0 \
VSRLB t3, h0, t0 \
VSRLB t3, h1, t1 \
VSRLB t3, h2, t2 \
VSRL t4, t0, t0 \
VSRL t4, t1, t1 \
VSRL t7, t2, t2 \
VN t5, h0, h0 \
VN t5, h1, h1 \
VESLG $2, t2, t8 \
VN t6, h2, h2 \
VAQ t0, h1, h1 \
VAQ t8, t2, t2 \
VAQ t1, h2, h2 \
VAQ t2, h0, h0 \
// expands two message blocks into the lower halfs of the d registers
// moves the contents of the d registers into upper halfs
// input: in1, in2, d0, d1, d2, d3, d4, d5
// temp: TEMP0, TEMP1, TEMP2, TEMP3
// output: d0, d1, d2, d3, d4, d5
#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
VGBM $0xff3f, TEMP0 \
VGBM $0xff1f, TEMP1 \
VESLG $4, d1, TEMP2 \
VESLG $4, d4, TEMP3 \
VESRLG $4, TEMP0, TEMP0 \
VPERM in1, d0, EX0, d0 \
VPERM in2, d3, EX0, d3 \
VPERM in1, d2, EX2, d2 \
VPERM in2, d5, EX2, d5 \
VPERM in1, TEMP2, EX1, d1 \
VPERM in2, TEMP3, EX1, d4 \
VN TEMP0, d0, d0 \
VN TEMP0, d3, d3 \
VESRLG $4, d1, d1 \
VESRLG $4, d4, d4 \
VN TEMP1, d2, d2 \
VN TEMP1, d5, d5 \
VN TEMP0, d1, d1 \
VN TEMP0, d4, d4 \
// expands one message block into the lower halfs of the d registers
// moves the contents of the d registers into upper halfs
// input: in, d0, d1, d2
// temp: TEMP0, TEMP1, TEMP2
// output: d0, d1, d2
#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
VGBM $0xff3f, TEMP0 \
VESLG $4, d1, TEMP2 \
VGBM $0xff1f, TEMP1 \
VPERM in, d0, EX0, d0 \
VESRLG $4, TEMP0, TEMP0 \
VPERM in, d2, EX2, d2 \
VPERM in, TEMP2, EX1, d1 \
VN TEMP0, d0, d0 \
VN TEMP1, d2, d2 \
VESRLG $4, d1, d1 \
VN TEMP0, d1, d1 \
// pack h2:h0 into h1:h0 (no carry)
// input: h0, h1, h2
// output: h0, h1, h2
#define PACK(h0, h1, h2) \
VMRLG h1, h2, h2 \ // copy h1 to upper half h2
VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
VLEIG $0, $0, h2 \ // clear upper half of h2
VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
VLEIG $0, $0, h1 \ // clear upper half of h1
// if h > 2**130-5 then h -= 2**130-5
// input: h0, h1
// temp: t0, t1, t2
// output: h0
#define MOD(h0, h1, t0, t1, t2) \
VZERO t0 \
VLEIG $1, $5, t0 \
VACCQ h0, t0, t1 \
VAQ h0, t0, t0 \
VONE t2 \
VLEIG $1, $-4, t2 \
VAQ t2, t1, t1 \
VACCQ h1, t1, t1 \
VONE t2 \
VAQ t2, t1, t1 \
VN h0, t1, t2 \
VNC t0, t1, t1 \
VO t1, t2, h0 \
// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
TEXT ·poly1305vmsl(SB), $0-32
// This code processes 6 + up to 4 blocks (32 bytes) per iteration
// using the algorithm described in:
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
// And as moddified for VMSL as described in
// Accelerating Poly1305 Cryptographic Message Authentication on the z14
// O'Farrell et al, CASCON 2017, p48-55
// https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
VZERO V0 // c
// load EX0, EX1 and EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2 // c
// setup r
VL (R4), T_0
MOVD $·keyMask<>(SB), R6
VL (R6), T_1
VN T_0, T_1, T_0
VZERO T_2 // limbs for r
VZERO T_3
VZERO T_4
EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
// T_2, T_3, T_4: [0, r]
// setup r*20
VLEIG $0, $0, T_0
VLEIG $1, $20, T_0 // T_0: [0, 20]
VZERO T_5
VZERO T_6
VMSLG T_0, T_3, T_5, T_5
VMSLG T_0, T_4, T_6, T_6
// store r for final block in GR
VLGVG $1, T_2, RSAVE_0 // c
VLGVG $1, T_3, RSAVE_1 // c
VLGVG $1, T_4, RSAVE_2 // c
VLGVG $1, T_5, R5SAVE_1 // c
VLGVG $1, T_6, R5SAVE_2 // c
// initialize h
VZERO H0_0
VZERO H1_0
VZERO H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
// initialize pointer for reduce constants
MOVD $·reduce<>(SB), R12
// calculate r**2 and 20*(r**2)
VZERO R_0
VZERO R_1
VZERO R_2
SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
VZERO R5_1
VZERO R5_2
VMSLG T_0, R_1, R5_1, R5_1
VMSLG T_0, R_2, R5_2, R5_2
// skip r**4 calculation if 3 blocks or less
CMPBLE R3, $48, b4
// calculate r**4 and 20*(r**4)
VZERO T_8
VZERO T_9
VZERO T_10
SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
VZERO T_2
VZERO T_3
VMSLG T_0, T_9, T_2, T_2
VMSLG T_0, T_10, T_3, T_3
// put r**2 to the right and r**4 to the left of R_0, R_1, R_2
VSLDB $8, T_8, T_8, T_8
VSLDB $8, T_9, T_9, T_9
VSLDB $8, T_10, T_10, T_10
VSLDB $8, T_2, T_2, T_2
VSLDB $8, T_3, T_3, T_3
VO T_8, R_0, R_0
VO T_9, R_1, R_1
VO T_10, R_2, R_2
VO T_2, R5_1, R5_1
VO T_3, R5_2, R5_2
CMPBLE R3, $80, load // less than or equal to 5 blocks in message
// 6(or 5+1) blocks
SUB $81, R3
VLM (R2), M0, M4
VLL R3, 80(R2), M5
ADD $1, R3
MOVBZ $1, R0
CMPBGE R3, $16, 2(PC)
VLVGB R3, R0, M5
MOVD $96(R2), R2
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
VLEIB $2, $1, H2_0
VLEIB $2, $1, H2_1
VLEIB $10, $1, H2_0
VLEIB $10, $1, H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO T_4
VZERO T_10
EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
VLR T_4, M4
VLEIB $10, $1, M2
CMPBLT R3, $16, 2(PC)
VLEIB $10, $1, T_10
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
SUB $16, R3
CMPBLE R3, $0, square
load:
// load EX0, EX1 and EX2
MOVD $·c<>(SB), R5
VLM (R5), EX0, EX2
loop:
CMPBLE R3, $64, add // b4 // last 4 or less blocks left
// next 4 full blocks
VLM (R2), M2, M5
SUB $64, R3
MOVD $64(R2), R2
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
// expacc in-lined to create [m2, m3] limbs
VGBM $0x3f3f, T_0 // 44 bit clear mask
VGBM $0x1f1f, T_1 // 40 bit clear mask
VPERM M2, M3, EX0, T_3
VESRLG $4, T_0, T_0 // 44 bit clear mask ready
VPERM M2, M3, EX1, T_4
VPERM M2, M3, EX2, T_5
VN T_0, T_3, T_3
VESRLG $4, T_4, T_4
VN T_1, T_5, T_5
VN T_0, T_4, T_4
VMRHG H0_1, T_3, H0_0
VMRHG H1_1, T_4, H1_0
VMRHG H2_1, T_5, H2_0
VMRLG H0_1, T_3, H0_1
VMRLG H1_1, T_4, H1_1
VMRLG H2_1, T_5, H2_1
VLEIB $10, $1, H2_0
VLEIB $10, $1, H2_1
VPERM M4, M5, EX0, T_3
VPERM M4, M5, EX1, T_4
VPERM M4, M5, EX2, T_5
VN T_0, T_3, T_3
VESRLG $4, T_4, T_4
VN T_1, T_5, T_5
VN T_0, T_4, T_4
VMRHG V0, T_3, M0
VMRHG V0, T_4, M1
VMRHG V0, T_5, M2
VMRLG V0, T_3, M3
VMRLG V0, T_4, M4
VMRLG V0, T_5, M5
VLEIB $10, $1, M2
VLEIB $10, $1, M5
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
CMPBNE R3, $0, loop
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
// load EX0, EX1, EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2
// sum vectors
VAQ H0_0, H0_1, H0_0
VAQ H1_0, H1_1, H1_0
VAQ H2_0, H2_1, H2_0
// h may be >= 2*(2**130-5) so we need to reduce it again
// M0...M4 are used as temps here
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
next: // carry h1->h2
VLEIB $7, $0x28, T_1
VREPIB $4, T_2
VGBM $0x003F, T_3
VESRLG $4, T_3
// byte shift
VSRLB T_1, H1_0, T_4
// bit shift
VSRL T_2, T_4, T_4
// clear h1 carry bits
VN T_3, H1_0, H1_0
// add carry
VAQ T_4, H2_0, H2_0
// h is now < 2*(2**130-5)
// pack h into h1 (hi) and h0 (lo)
PACK(H0_0, H1_0, H2_0)
// if h > 2**130-5 then h -= 2**130-5
MOD(H0_0, H1_0, T_0, T_1, T_2)
// h += s
MOVD $·bswapMask<>(SB), R5
VL (R5), T_1
VL 16(R4), T_0
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
VAQ T_0, H0_0, H0_0
VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
VST H0_0, (R1)
RET
add:
// load EX0, EX1, EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
CMPBLE R3, $64, b4
b4:
CMPBLE R3, $48, b3 // 3 blocks or less
// 4(3+1) blocks remaining
SUB $49, R3
VLM (R2), M0, M2
VLL R3, 48(R2), M3
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M3
MOVD $64(R2), R2
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
VLEIB $10, $1, H2_0
VLEIB $10, $1, H2_1
VZERO M0
VZERO M1
VZERO M4
VZERO M5
VZERO T_4
VZERO T_10
EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
VLR T_4, M2
VLEIB $10, $1, M4
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, T_10
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
SUB $16, R3
CMPBLE R3, $0, square // this condition must always hold true!
b3:
CMPBLE R3, $32, b2
// 3 blocks remaining
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// H*[r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
SUB $33, R3
VLM (R2), M0, M1
VLL R3, 32(R2), M2
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M2
// H += m0
VZERO T_1
VZERO T_2
VZERO T_3
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
VLEIB $10, $1, T_3
VAG H0_0, T_1, H0_0
VAG H1_0, T_2, H1_0
VAG H2_0, T_3, H2_0
VZERO M0
VZERO M3
VZERO M4
VZERO M5
VZERO T_10
// (H+m0)*r
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
// H += m1
VZERO V0
VZERO T_1
VZERO T_2
VZERO T_3
EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
VLEIB $10, $1, T_3
VAQ H0_0, T_1, H0_0
VAQ H1_0, T_2, H1_0
VAQ H2_0, T_3, H2_0
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
// [H, m2] * [r**2, r]
EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, H2_0
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
SUB $16, R3
CMPBLE R3, $0, next // this condition must always hold true!
b2:
CMPBLE R3, $16, b1
// 2 blocks remaining
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// H*[r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
// move h to the left and 0s at the right
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
// get message blocks and append 1 to start
SUB $17, R3
VL (R2), M0
VLL R3, 16(R2), M1
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M1
VZERO T_6
VZERO T_7
VZERO T_8
EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
VLEIB $2, $1, T_8
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, T_8
// add [m0, m1] to h
VAG H0_0, T_6, H0_0
VAG H1_0, T_7, H1_0
VAG H2_0, T_8, H2_0
VZERO M2
VZERO M3
VZERO M4
VZERO M5
VZERO T_10
VZERO M0
// at this point R_0 .. R5_2 look like [r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
SUB $16, R3, R3
CMPBLE R3, $0, next
b1:
CMPBLE R3, $0, next
// 1 block remaining
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// H*[r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
// set up [0, m0] limbs
SUB $1, R3
VLL R3, (R2), M0
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M0
VZERO T_1
VZERO T_2
VZERO T_3
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, T_3
// h+m0
VAQ H0_0, T_1, H0_0
VAQ H1_0, T_2, H1_0
VAQ H2_0, T_3, H2_0
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
BR next
square:
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// (h0*r**2) + (h1*r)
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
BR next
TEXT ·hasVMSLFacility(SB), NOSPLIT, $24-1
MOVD $x-24(SP), R1
XC $24, 0(R1), 0(R1) // clear the storage
MOVD $2, R0 // R0 is the number of double words stored -1
WORD $0xB2B01000 // STFLE 0(R1)
XOR R0, R0 // reset the value of R0
MOVBZ z-8(SP), R1
AND $0x01, R1
BEQ novmsl
vectorinstalled:
// check if the vector instruction has been enabled
VLEIB $0, $0xF, V16
VLGVB $0, V16, R1
CMPBNE R1, $0xF, novmsl
MOVB $1, ret+0(FP) // have vx
RET
novmsl:
MOVB $0, ret+0(FP) // no vx
RET

View file

@ -102,7 +102,7 @@ func (r *keyring) Unlock(passphrase []byte) error {
if !r.locked {
return errors.New("agent: not locked")
}
if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
return fmt.Errorf("agent: incorrect passphrase")
}

View file

@ -16,6 +16,7 @@ import (
"hash"
"io"
"io/ioutil"
"math/bits"
"golang.org/x/crypto/internal/chacha20"
"golang.org/x/crypto/poly1305"
@ -641,8 +642,8 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
// the methods here also implement padding, which RFC4253 Section 6
// also requires of stream ciphers.
type chacha20Poly1305Cipher struct {
lengthKey [32]byte
contentKey [32]byte
lengthKey [8]uint32
contentKey [8]uint32
buf []byte
}
@ -655,20 +656,21 @@ func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionA
buf: make([]byte, 256),
}
copy(c.contentKey[:], key[:32])
copy(c.lengthKey[:], key[32:])
for i := range c.contentKey {
c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4])
}
for i := range c.lengthKey {
c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4])
}
return c, nil
}
// The Poly1305 key is obtained by encrypting 32 0-bytes.
var chacha20PolyKeyInput [32]byte
func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
var counter [16]byte
binary.BigEndian.PutUint64(counter[8:], uint64(seqNum))
nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)}
s := chacha20.New(c.contentKey, nonce)
var polyKey [32]byte
chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey)
s.XORKeyStream(polyKey[:], polyKey[:])
s.Advance() // skip next 32 bytes
encryptedLength := c.buf[:4]
if _, err := io.ReadFull(r, encryptedLength); err != nil {
@ -676,7 +678,7 @@ func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte,
}
var lenBytes [4]byte
chacha20.XORKeyStream(lenBytes[:], encryptedLength, &counter, &c.lengthKey)
chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength)
length := binary.BigEndian.Uint32(lenBytes[:])
if length > maxPacket {
@ -702,10 +704,8 @@ func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte,
return nil, errors.New("ssh: MAC failure")
}
counter[0] = 1
plain := c.buf[4:contentEnd]
chacha20.XORKeyStream(plain, plain, &counter, &c.contentKey)
s.XORKeyStream(plain, plain)
padding := plain[0]
if padding < 4 {
@ -724,11 +724,11 @@ func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte,
}
func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error {
var counter [16]byte
binary.BigEndian.PutUint64(counter[8:], uint64(seqNum))
nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)}
s := chacha20.New(c.contentKey, nonce)
var polyKey [32]byte
chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey)
s.XORKeyStream(polyKey[:], polyKey[:])
s.Advance() // skip next 32 bytes
// There is no blocksize, so fall back to multiple of 8 byte
// padding, as described in RFC 4253, Sec 6.
@ -748,7 +748,7 @@ func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io
}
binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding))
chacha20.XORKeyStream(c.buf, c.buf[:4], &counter, &c.lengthKey)
chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4])
c.buf[4] = byte(padding)
copy(c.buf[5:], payload)
packetEnd := 5 + len(payload) + padding
@ -756,8 +756,7 @@ func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io
return err
}
counter[0] = 1
chacha20.XORKeyStream(c.buf[4:], c.buf[4:packetEnd], &counter, &c.contentKey)
s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd])
var mac [poly1305.TagSize]byte
poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey)

View file

@ -19,6 +19,8 @@ import (
type Client struct {
Conn
handleForwardsOnce sync.Once // guards calling (*Client).handleForwards
forwards forwardList // forwarded tcpip connections from the remote side
mu sync.Mutex
channelHandlers map[string]chan NewChannel
@ -60,8 +62,6 @@ func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
conn.Wait()
conn.forwards.closeAll()
}()
go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com"))
return conn
}

View file

@ -803,7 +803,7 @@ func encryptedBlock(block *pem.Block) bool {
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
@ -817,6 +817,9 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
switch block.Type {
case "RSA PRIVATE KEY":
return x509.ParsePKCS1PrivateKey(block.Bytes)
// RFC5208 - https://tools.ietf.org/html/rfc5208
case "PRIVATE KEY":
return x509.ParsePKCS8PrivateKey(block.Bytes)
case "EC PRIVATE KEY":
return x509.ParseECPrivateKey(block.Bytes)
case "DSA PRIVATE KEY":
@ -900,8 +903,8 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
// Implemented based on the documentation at
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
magic := append([]byte("openssh-key-v1"), 0)
if !bytes.Equal(magic, key[0:len(magic)]) {
const magic = "openssh-key-v1\x00"
if len(key) < len(magic) || string(key[:len(magic)]) != magic {
return nil, errors.New("ssh: invalid openssh private key format")
}
remaining := key[len(magic):]

View file

@ -32,6 +32,7 @@ type streamLocalChannelForwardMsg struct {
// ListenUnix is similar to ListenTCP but uses a Unix domain socket.
func (c *Client) ListenUnix(socketPath string) (net.Listener, error) {
c.handleForwardsOnce.Do(c.handleForwards)
m := streamLocalChannelForwardMsg{
socketPath,
}

View file

@ -90,10 +90,19 @@ type channelForwardMsg struct {
rport uint32
}
// handleForwards starts goroutines handling forwarded connections.
// It's called on first use by (*Client).ListenTCP to not launch
// goroutines until needed.
func (c *Client) handleForwards() {
go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip"))
go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com"))
}
// ListenTCP requests the remote peer open a listening socket
// on laddr. Incoming connections will be available by calling
// Accept on the returned net.Listener.
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
c.handleForwardsOnce.Do(c.handleForwards)
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
return c.autoPortListenWorkaround(laddr)
}

13
vendor/golang.org/x/oauth2/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,13 @@
language: go
go:
- tip
install:
- export GOPATH="$HOME/gopath"
- mkdir -p "$GOPATH/src/golang.org/x"
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
- go get -v -t -d golang.org/x/oauth2/...
script:
- go test -v golang.org/x/oauth2/...

26
vendor/golang.org/x/oauth2/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,26 @@
# Contributing to Go
Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
## Filing issues
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
1. What version of Go are you using (`go version`)?
2. What operating system and processor architecture are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
## Contributing code
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.

79
vendor/golang.org/x/oauth2/README.md generated vendored Normal file
View file

@ -0,0 +1,79 @@
# OAuth2 for Go
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
oauth2 package contains a client implementation for OAuth 2.0 spec.
## Installation
~~~~
go get golang.org/x/oauth2
~~~~
Or you can manually git clone the repository to
`$(go env GOPATH)/src/golang.org/x/oauth2`.
See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
## App Engine
In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor
of the [`context.Context`](https://golang.org/x/net/context#Context) type from
the `golang.org/x/net/context` package. Later replaced by the standard `context` package
of the [`context.Context`](https://golang.org/pkg/context#Context) type.
This means it's no longer possible to use the "Classic App Engine"
`appengine.Context` type with the `oauth2` package. (You're using
Classic App Engine if you import the package `"appengine"`.)
To work around this, you may use the new `"google.golang.org/appengine"`
package. This package has almost the same API as the `"appengine"` package,
but it can be fetched with `go get` and used on "Managed VMs" and well as
Classic App Engine.
See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
for information on updating your app.
If you don't want to update your entire app to use the new App Engine packages,
you may use both sets of packages in parallel, using only the new packages
with the `oauth2` package.
```go
import (
"context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
newappengine "google.golang.org/appengine"
newurlfetch "google.golang.org/appengine/urlfetch"
"appengine"
)
func handler(w http.ResponseWriter, r *http.Request) {
var c appengine.Context = appengine.NewContext(r)
c.Infof("Logging a message with the old package")
var ctx context.Context = newappengine.NewContext(r)
client := &http.Client{
Transport: &oauth2.Transport{
Source: google.AppEngineTokenSource(ctx, "scope"),
Base: &newurlfetch.Transport{Context: ctx},
},
}
client.Get("...")
}
```
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the oauth2 repository is located at
https://github.com/golang/oauth2/issues.

2
vendor/golang.org/x/sys/unix/.gitignore generated vendored Normal file
View file

@ -0,0 +1,2 @@
_obj/
unix.test

173
vendor/golang.org/x/sys/unix/README.md generated vendored Normal file
View file

@ -0,0 +1,173 @@
# Building `sys/unix`
The sys/unix package provides access to the raw system call interface of the
underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
Porting Go to a new architecture/OS combination or adding syscalls, types, or
constants to an existing architecture/OS pair requires some manual effort;
however, there are tools that automate much of the process.
## Build Systems
There are currently two ways we generate the necessary files. We are currently
migrating the build system to use containers so the builds are reproducible.
This is being done on an OS-by-OS basis. Please update this documentation as
components of the build system change.
### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`)
The old build system generates the Go files based on the C header files
present on your system. This means that files
for a given GOOS/GOARCH pair must be generated on a system with that OS and
architecture. This also means that the generated code can differ from system
to system, based on differences in the header files.
To avoid this, if you are using the old build system, only generate the Go
files on an installation with unmodified header files. It is also important to
keep track of which version of the OS the files were generated from (ex.
Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
and have each OS upgrade correspond to a single change.
To build the files for your current OS and architecture, make sure GOOS and
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
your specific system. Running `mkall.sh -n` shows the commands that will be run.
Requirements: bash, perl, go
### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`)
The new build system uses a Docker container to generate the go files directly
from source checkouts of the kernel and various system libraries. This means
that on any platform that supports Docker, all the files using the new build
system can be generated at once, and generated files will not change based on
what the person running the scripts has installed on their computer.
The OS specific files for the new build system are located in the `${GOOS}`
directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
the kernel or system library updates, modify the Dockerfile at
`${GOOS}/Dockerfile` to checkout the new release of the source.
To build all the files under the new build system, you must be on an amd64/Linux
system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
system. Running `mkall.sh -n` shows the commands that will be run.
Requirements: bash, perl, go, docker
## Component files
This section describes the various files used in the code generation process.
It also contains instructions on how to modify these files to add a new
architecture/OS or to add additional syscalls, types, or constants. Note that
if you are using the new build system, the scripts cannot be called normally.
They must be called from within the docker container.
### asm files
The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
call dispatch. There are three entry points:
```
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
```
The first and second are the standard ones; they differ only in how many
arguments can be passed to the kernel. The third is for low-level use by the
ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
let it know that a system call is running.
When porting Go to an new architecture/OS, this file must be implemented for
each GOOS/GOARCH pair.
### mksysnum
Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl`
for the old system). This script takes in a list of header files containing the
syscall number declarations and parses them to produce the corresponding list of
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
constants.
Adding new syscall numbers is mostly done by running the build on a sufficiently
new installation of the target OS (or updating the source checkouts for the
new build system). However, depending on the OS, you make need to update the
parsing in mksysnum.
### mksyscall.pl
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
hand-written Go files which implement system calls (for unix, the specific OS,
or the specific OS/Architecture pair respectively) that need special handling
and list `//sys` comments giving prototypes for ones that can be generated.
The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts
them into syscalls. This requires the name of the prototype in the comment to
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
prototype can be exported (capitalized) or not.
Adding a new syscall often just requires adding a new `//sys` function prototype
with the desired arguments and a capitalized name so it is exported. However, if
you want the interface to the syscall to be different, often one will make an
unexported `//sys` prototype, an then write a custom wrapper in
`syscall_${GOOS}.go`.
### types files
For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
`types_${GOOS}.go` on the old system). This file includes standard C headers and
creates Go type aliases to the corresponding C types. The file is then fed
through godef to get the Go compatible definitions. Finally, the generated code
is fed though mkpost.go to format the code correctly and remove any hidden or
private identifiers. This cleaned-up code is written to
`ztypes_${GOOS}_${GOARCH}.go`.
The hardest part about preparing this file is figuring out which headers to
include and which symbols need to be `#define`d to get the actual data
structures that pass through to the kernel system calls. Some C libraries
preset alternate versions for binary compatibility and translate them on the
way in and out of system calls, but there is almost always a `#define` that can
get the real ones.
See `types_darwin.go` and `linux/types.go` for examples.
To add a new type, add in the necessary include statement at the top of the
file (if it is not already there) and add in a type alias line. Note that if
your type is significantly different on different architectures, you may need
some `#if/#elif` macros in your include statements.
### mkerrors.sh
This script is used to generate the system's various constants. This doesn't
just include the error numbers and error strings, but also the signal numbers
an a wide variety of miscellaneous constants. The constants come from the list
of include files in the `includes_${uname}` variable. A regex then picks out
the desired `#define` statements, and generates the corresponding Go constants.
The error numbers and strings are generated from `#include <errno.h>`, and the
signal numbers and strings are generated from `#include <signal.h>`. All of
these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
`_errors.c`, which prints out all the constants.
To add a constant, add the header that includes it to the appropriate variable.
Then, edit the regex (if necessary) to match the desired constant. Avoid making
the regex too broad to avoid matching unintended constants.
## Generated files
### `zerror_${GOOS}_${GOARCH}.go`
A file containing all of the system's generated error numbers, error strings,
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
### `zsyscall_${GOOS}_${GOARCH}.go`
A file containing all the generated syscalls for a specific GOOS and GOARCH.
Generated by `mksyscall.pl` (see above).
### `zsysnum_${GOOS}_${GOARCH}.go`
A list of numeric constants for all the syscall number of the specific GOOS
and GOARCH. Generated by mksysnum (see above).
### `ztypes_${GOOS}_${GOARCH}.go`
A file containing Go types for passing into (or returning from) syscalls.
Generated by godefs and the types file (see above).

204
vendor/golang.org/x/sys/unix/mkall.sh generated vendored Normal file
View file

@ -0,0 +1,204 @@
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This script runs or (given -n) prints suggested commands to generate files for
# the Architecture/OS specified by the GOARCH and GOOS environment variables.
# See README.md for more information about how the build system works.
GOOSARCH="${GOOS}_${GOARCH}"
# defaults
mksyscall="./mksyscall.pl"
mkerrors="./mkerrors.sh"
zerrors="zerrors_$GOOSARCH.go"
mksysctl=""
zsysctl="zsysctl_$GOOSARCH.go"
mksysnum=
mktypes=
run="sh"
cmd=""
case "$1" in
-syscalls)
for i in zsyscall*go
do
# Run the command line that appears in the first line
# of the generated file to regenerate it.
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
rm _$i
done
exit 0
;;
-n)
run="cat"
cmd="echo"
shift
esac
case "$#" in
0)
;;
*)
echo 'usage: mkall.sh [-n]' 1>&2
exit 2
esac
if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
# Use then new build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
exit
fi
GOOSARCH_in=syscall_$GOOSARCH.go
case "$GOOSARCH" in
_* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
aix_ppc)
mkerrors="$mkerrors -maix32"
mksyscall="./mksyscall_aix_ppc.pl -aix"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
aix_ppc64)
mkerrors="$mkerrors -maix64"
mksyscall="./mksyscall_aix_ppc64.pl -aix"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_amd64)
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_arm)
mkerrors="$mkerrors"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -dragonfly"
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
linux_sparc64)
GOOSARCH_in=syscall_linux_sparc64.go
unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -netbsd -arm"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -openbsd"
mksysctl="./mksysctl_openbsd.pl"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -openbsd -arm"
mksysctl="./mksysctl_openbsd.pl"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
solaris_amd64)
mksyscall="./mksyscall_solaris.pl"
mkerrors="$mkerrors -m64"
mksysnum=
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
*)
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
exit 1
;;
esac
(
if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
case "$GOOS" in
*)
syscall_goos="syscall_$GOOS.go"
case "$GOOS" in
darwin | dragonfly | freebsd | netbsd | openbsd)
syscall_goos="syscall_bsd.go $syscall_goos"
;;
esac
if [ -n "$mksyscall" ]; then
if [ "$GOOSARCH" == "aix_ppc64" ]; then
# aix/ppc64 script generates files instead of writing to stdin.
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
else
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
fi
fi
esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then
echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go";
fi
) | $run

656
vendor/golang.org/x/sys/unix/mkerrors.sh generated vendored Normal file
View file

@ -0,0 +1,656 @@
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Generate Go code listing errors and other #defined constant
# values (ENAMETOOLONG etc.), by asking the preprocessor
# about the definitions.
unset LANG
export LC_ALL=C
export LC_CTYPE=C
if test -z "$GOARCH" -o -z "$GOOS"; then
echo 1>&2 "GOARCH or GOOS not defined in environment"
exit 1
fi
# Check that we are using the new build system if we should
if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
echo 1>&2 "In the new build system, mkerrors should not be called directly."
echo 1>&2 "See README.md"
exit 1
fi
fi
if [[ "$GOOS" = "aix" ]]; then
CC=${CC:-gcc}
else
CC=${CC:-cc}
fi
if [[ "$GOOS" = "solaris" ]]; then
# Assumes GNU versions of utilities in PATH.
export PATH=/usr/gnu/bin:$PATH
fi
uname=$(uname)
includes_AIX='
#include <net/if.h>
#include <net/netopt.h>
#include <netinet/ip_mroute.h>
#include <sys/protosw.h>
#include <sys/stropts.h>
#include <sys/mman.h>
#include <sys/poll.h>
#include <sys/termio.h>
#include <termios.h>
#include <fcntl.h>
#define AF_LOCAL AF_UNIX
'
includes_Darwin='
#define _DARWIN_C_SOURCE
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
#include <stdint.h>
#include <sys/attr.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/ptrace.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <sys/xattr.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <termios.h>
'
includes_DragonFly='
#include <sys/types.h>
#include <sys/event.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <termios.h>
#include <netinet/ip.h>
#include <net/ip_mroute/ip_mroute.h>
'
includes_FreeBSD='
#include <sys/capability.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <termios.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
#include <sys/extattr.h>
#if __FreeBSD__ >= 10
#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
#undef SIOCAIFADDR
#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
#undef SIOCSIFPHYADDR
#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
#endif
'
includes_Linux='
#define _LARGEFILE_SOURCE
#define _LARGEFILE64_SOURCE
#ifndef __LP64__
#define _FILE_OFFSET_BITS 64
#endif
#define _GNU_SOURCE
// <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from <bits/termios.h>.
#if defined(__powerpc__)
struct sgttyb {
char sg_ispeed;
char sg_ospeed;
char sg_erase;
char sg_kill;
short sg_flags;
};
struct tchars {
char t_intrc;
char t_quitc;
char t_startc;
char t_stopc;
char t_eofc;
char t_brkc;
};
struct ltchars {
char t_suspc;
char t_dsuspc;
char t_rprntc;
char t_flushc;
char t_werasc;
char t_lnextc;
};
#endif
#include <bits/sockaddr.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/inotify.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <sys/xattr.h>
#include <linux/if.h>
#include <linux/if_alg.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/if_packet.h>
#include <linux/if_addr.h>
#include <linux/falloc.h>
#include <linux/filter.h>
#include <linux/fs.h>
#include <linux/kexec.h>
#include <linux/keyctl.h>
#include <linux/magic.h>
#include <linux/memfd.h>
#include <linux/module.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netlink.h>
#include <linux/net_namespace.h>
#include <linux/perf_event.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/rtnetlink.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/seccomp.h>
#include <linux/sockios.h>
#include <linux/wait.h>
#include <linux/icmpv6.h>
#include <linux/serial.h>
#include <linux/can.h>
#include <linux/vm_sockets.h>
#include <linux/taskstats.h>
#include <linux/genetlink.h>
#include <linux/watchdog.h>
#include <linux/hdreg.h>
#include <linux/rtc.h>
#include <linux/if_xdp.h>
#include <mtd/ubi-user.h>
#include <net/route.h>
#include <asm/termbits.h>
#ifndef MSG_FASTOPEN
#define MSG_FASTOPEN 0x20000000
#endif
#ifndef PTRACE_GETREGS
#define PTRACE_GETREGS 0xc
#endif
#ifndef PTRACE_SETREGS
#define PTRACE_SETREGS 0xd
#endif
#ifndef SOL_NETLINK
#define SOL_NETLINK 270
#endif
#ifdef SOL_BLUETOOTH
// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
// but it is already in bluetooth_linux.go
#undef SOL_BLUETOOTH
#endif
// Certain constants are missing from the fs/crypto UAPI
#define FS_KEY_DESC_PREFIX "fscrypt:"
#define FS_KEY_DESC_PREFIX_SIZE 8
#define FS_MAX_KEY_SIZE 64
// XDP socket constants do not appear to be picked up otherwise.
// Copied from samples/bpf/xdpsock_user.c.
#ifndef SOL_XDP
#define SOL_XDP 283
#endif
#ifndef AF_XDP
#define AF_XDP 44
#endif
'
includes_NetBSD='
#include <sys/types.h>
#include <sys/param.h>
#include <sys/event.h>
#include <sys/extattr.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/termios.h>
#include <sys/ttycom.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
#include <netinet/if_ether.h>
// Needed since <sys/param.h> refers to it...
#define schedppq 1
'
includes_OpenBSD='
#include <sys/types.h>
#include <sys/param.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/termios.h>
#include <sys/ttycom.h>
#include <sys/unistd.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
#include <netinet/if_ether.h>
#include <net/if_bridge.h>
// We keep some constants not supported in OpenBSD 5.5 and beyond for
// the promise of compatibility.
#define EMUL_ENABLED 0x1
#define EMUL_NATIVE 0x2
#define IPV6_FAITH 0x1d
#define IPV6_OPTIONS 0x1
#define IPV6_RTHDR_STRICT 0x1
#define IPV6_SOCKOPT_RESERVED1 0x3
#define SIOCGIFGENERIC 0xc020693a
#define SIOCSIFGENERIC 0x80206939
#define WALTSIG 0x4
'
includes_SunOS='
#include <limits.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <sys/mkdev.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <termios.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
'
includes='
#include <sys/types.h>
#include <sys/file.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <errno.h>
#include <sys/signal.h>
#include <signal.h>
#include <sys/resource.h>
#include <time.h>
'
ccflags="$@"
# Write go tool cgo -godefs input.
(
echo package unix
echo
echo '/*'
indirect="includes_$(uname)"
echo "${!indirect} $includes"
echo '*/'
echo 'import "C"'
echo 'import "syscall"'
echo
echo 'const ('
# The gcc command line prints all the #defines
# it encounters while processing the input
echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
awk '
$1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
$2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
$2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
$2 ~ /^(SCM_SRCRT)$/ {next}
$2 ~ /^(MAP_FAILED)$/ {next}
$2 ~ /^ELF_.*$/ {next}# <asm/elf.h> contains ELF_ARCH, etc.
$2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
$2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
$2 !~ /^ECCAPBITS/ &&
$2 !~ /^ETH_/ &&
$2 !~ /^EPROC_/ &&
$2 !~ /^EQUIV_/ &&
$2 !~ /^EXPR_/ &&
$2 ~ /^E[A-Z0-9_]+$/ ||
$2 ~ /^B[0-9_]+$/ ||
$2 ~ /^(OLD|NEW)DEV$/ ||
$2 == "BOTHER" ||
$2 ~ /^CI?BAUD(EX)?$/ ||
$2 == "IBSHIFT" ||
$2 ~ /^V[A-Z0-9]+$/ ||
$2 ~ /^CS[A-Z0-9]/ ||
$2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
$2 ~ /^IGN/ ||
$2 ~ /^IX(ON|ANY|OFF)$/ ||
$2 ~ /^IN(LCR|PCK)$/ ||
$2 !~ "X86_CR3_PCID_NOFLUSH" &&
$2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
$2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
$2 == "BRKINT" ||
$2 == "HUPCL" ||
$2 == "PENDIN" ||
$2 == "TOSTOP" ||
$2 == "XCASE" ||
$2 == "ALTWERASE" ||
$2 == "NOKERNINFO" ||
$2 ~ /^PAR/ ||
$2 ~ /^SIG[^_]/ ||
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
$2 ~ /^TP_STATUS_/ ||
$2 ~ /^FALLOC_/ ||
$2 == "ICMPV6_FILTER" ||
$2 == "SOMAXCONN" ||
$2 == "NAME_MAX" ||
$2 == "IFNAMSIZ" ||
$2 ~ /^CTL_(HW|KERN|MAXNAME|NET|QUERY)$/ ||
$2 ~ /^KERN_(HOSTNAME|OS(RELEASE|TYPE)|VERSION)$/ ||
$2 ~ /^HW_MACHINE$/ ||
$2 ~ /^SYSCTL_VERS/ ||
$2 !~ "MNT_BITS" &&
$2 ~ /^(MS|MNT|UMOUNT)_/ ||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
$2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ ||
$2 ~ /^KEXEC_/ ||
$2 ~ /^LINUX_REBOOT_CMD_/ ||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
$2 ~ /^MODULE_INIT_/ ||
$2 !~ "NLA_TYPE_MASK" &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
$2 ~ /^TCGET/ ||
$2 ~ /^TCSET/ ||
$2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
$2 !~ "RTF_BITS" &&
$2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
$2 ~ /^BIOC/ ||
$2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
$2 ~ /^CLONE_[A-Z_]+/ ||
$2 !~ /^(BPF_TIMEVAL)$/ &&
$2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^CLOCK_/ ||
$2 ~ /^CAN_/ ||
$2 ~ /^CAP_/ ||
$2 ~ /^ALG_/ ||
$2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ ||
$2 ~ /^GRND_/ ||
$2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ ||
$2 ~ /^KEYCTL_/ ||
$2 ~ /^PERF_EVENT_IOC_/ ||
$2 ~ /^SECCOMP_MODE_/ ||
$2 ~ /^SPLICE_/ ||
$2 ~ /^SYNC_FILE_RANGE_/ ||
$2 !~ /^AUDIT_RECORD_MAGIC/ &&
$2 !~ /IOC_MAGIC/ &&
$2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ ||
$2 ~ /^(VM|VMADDR)_/ ||
$2 ~ /^IOCTL_VM_SOCKETS_/ ||
$2 ~ /^(TASKSTATS|TS)_/ ||
$2 ~ /^CGROUPSTATS_/ ||
$2 ~ /^GENL_/ ||
$2 ~ /^STATX_/ ||
$2 ~ /^RENAME/ ||
$2 ~ /^UBI_IOC[A-Z]/ ||
$2 ~ /^UTIME_/ ||
$2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ ||
$2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ ||
$2 ~ /^FSOPT_/ ||
$2 ~ /^WDIOC_/ ||
$2 ~ /^NFN/ ||
$2 ~ /^XDP_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ ||
$2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ ||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next}
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
{next}
' | sort
echo ')'
) >_const.go
# Pull out the error names for later.
errors=$(
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
sort
)
# Pull out the signal names for later.
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
# Again, writing regexps to a file.
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"
echo '// Code generated by the command above; see README.md. DO NOT EDIT.'
echo
echo "// +build ${GOARCH},${GOOS}"
echo
go tool cgo -godefs -- "$@" _const.go >_error.out
cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
echo
echo '// Errors'
echo 'const ('
cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/'
echo ')'
echo
echo '// Signals'
echo 'const ('
cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/'
echo ')'
# Run C program to print error and syscall strings.
(
echo -E "
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <ctype.h>
#include <string.h>
#include <signal.h>
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
struct tuple {
int num;
const char *name;
};
struct tuple errors[] = {
"
for i in $errors
do
echo -E ' {'$i', "'$i'" },'
done
echo -E "
};
struct tuple signals[] = {
"
for i in $signals
do
echo -E ' {'$i', "'$i'" },'
done
# Use -E because on some systems bash builtin interprets \n itself.
echo -E '
};
static int
tuplecmp(const void *a, const void *b)
{
return ((struct tuple *)a)->num - ((struct tuple *)b)->num;
}
int
main(void)
{
int i, e;
char buf[1024], *p;
printf("\n\n// Error table\n");
printf("var errorList = [...]struct {\n");
printf("\tnum syscall.Errno\n");
printf("\tname string\n");
printf("\tdesc string\n");
printf("} {\n");
qsort(errors, nelem(errors), sizeof errors[0], tuplecmp);
for(i=0; i<nelem(errors); i++) {
e = errors[i].num;
if(i > 0 && errors[i-1].num == e)
continue;
strcpy(buf, strerror(e));
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
printf("\t{ %d, \"%s\", \"%s\" },\n", e, errors[i].name, buf);
}
printf("}\n\n");
printf("\n\n// Signal table\n");
printf("var signalList = [...]struct {\n");
printf("\tnum syscall.Signal\n");
printf("\tname string\n");
printf("\tdesc string\n");
printf("} {\n");
qsort(signals, nelem(signals), sizeof signals[0], tuplecmp);
for(i=0; i<nelem(signals); i++) {
e = signals[i].num;
if(i > 0 && signals[i-1].num == e)
continue;
strcpy(buf, strsignal(e));
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
// cut trailing : number.
p = strrchr(buf, ":"[0]);
if(p)
*p = '\0';
printf("\t{ %d, \"%s\", \"%s\" },\n", e, signals[i].name, buf);
}
printf("}\n\n");
return 0;
}
'
) >_errors.c
$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out

341
vendor/golang.org/x/sys/unix/mksyscall.pl generated vendored Normal file
View file

@ -0,0 +1,341 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This program reads a file containing function prototypes
# (like syscall_darwin.go) and generates system call bodies.
# The prototypes are marked by lines beginning with "//sys"
# and read like func declarations if //sys is replaced by func, but:
# * The parameter lists must give a name for each argument.
# This includes return parameters.
# * The parameter lists must give a type for each argument:
# the (x, y, z int) shorthand is not allowed.
# * If the return parameter is an error number, it must be named errno.
# A line beginning with //sysnb is like //sys, except that the
# goroutine will not be suspended during the execution of the system
# call. This must only be used for system calls which can never
# block, as otherwise the system call could cause all goroutines to
# hang.
use strict;
my $cmdline = "mksyscall.pl " . join(' ', @ARGV);
my $errors = 0;
my $_32bit = "";
my $plan9 = 0;
my $openbsd = 0;
my $netbsd = 0;
my $dragonfly = 0;
my $arm = 0; # 64-bit value should use (even, odd)-pair
my $tags = ""; # build tags
if($ARGV[0] eq "-b32") {
$_32bit = "big-endian";
shift;
} elsif($ARGV[0] eq "-l32") {
$_32bit = "little-endian";
shift;
}
if($ARGV[0] eq "-plan9") {
$plan9 = 1;
shift;
}
if($ARGV[0] eq "-openbsd") {
$openbsd = 1;
shift;
}
if($ARGV[0] eq "-netbsd") {
$netbsd = 1;
shift;
}
if($ARGV[0] eq "-dragonfly") {
$dragonfly = 1;
shift;
}
if($ARGV[0] eq "-arm") {
$arm = 1;
shift;
}
if($ARGV[0] eq "-tags") {
shift;
$tags = $ARGV[0];
shift;
}
if($ARGV[0] =~ /^-/) {
print STDERR "usage: mksyscall.pl [-b32 | -l32] [-tags x,y] [file ...]\n";
exit 1;
}
# Check that we are using the new build system if we should
if($ENV{'GOOS'} eq "linux" && $ENV{'GOARCH'} ne "sparc64") {
if($ENV{'GOLANG_SYS_BUILD'} ne "docker") {
print STDERR "In the new build system, mksyscall should not be called directly.\n";
print STDERR "See README.md\n";
exit 1;
}
}
sub parseparamlist($) {
my ($list) = @_;
$list =~ s/^\s*//;
$list =~ s/\s*$//;
if($list eq "") {
return ();
}
return split(/\s*,\s*/, $list);
}
sub parseparam($) {
my ($p) = @_;
if($p !~ /^(\S*) (\S*)$/) {
print STDERR "$ARGV:$.: malformed parameter: $p\n";
$errors = 1;
return ("xx", "int");
}
return ($1, $2);
}
my $text = "";
while(<>) {
chomp;
s/\s+/ /g;
s/^\s+//;
s/\s+$//;
my $nonblock = /^\/\/sysnb /;
next if !/^\/\/sys / && !$nonblock;
# Line must be of the form
# func Open(path string, mode int, perm int) (fd int, errno error)
# Split into name, in params, out params.
if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
print STDERR "$ARGV:$.: malformed //sys declaration\n";
$errors = 1;
next;
}
my ($func, $in, $out, $sysname) = ($2, $3, $4, $5);
# Split argument lists on comma.
my @in = parseparamlist($in);
my @out = parseparamlist($out);
# Try in vain to keep people from editing this file.
# The theory is that they jump into the middle of the file
# without reading the header.
$text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
# Go function header.
my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : "";
$text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl;
# Check if err return available
my $errvar = "";
foreach my $p (@out) {
my ($name, $type) = parseparam($p);
if($type eq "error") {
$errvar = $name;
last;
}
}
# Prepare arguments to Syscall.
my @args = ();
my $n = 0;
foreach my $p (@in) {
my ($name, $type) = parseparam($p);
if($type =~ /^\*/) {
push @args, "uintptr(unsafe.Pointer($name))";
} elsif($type eq "string" && $errvar ne "") {
$text .= "\tvar _p$n *byte\n";
$text .= "\t_p$n, $errvar = BytePtrFromString($name)\n";
$text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
push @args, "uintptr(unsafe.Pointer(_p$n))";
$n++;
} elsif($type eq "string") {
print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
$text .= "\tvar _p$n *byte\n";
$text .= "\t_p$n, _ = BytePtrFromString($name)\n";
push @args, "uintptr(unsafe.Pointer(_p$n))";
$n++;
} elsif($type =~ /^\[\](.*)/) {
# Convert slice into pointer, length.
# Have to be careful not to take address of &a[0] if len == 0:
# pass dummy pointer in that case.
# Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
$text .= "\tvar _p$n unsafe.Pointer\n";
$text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}";
$text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}";
$text .= "\n";
push @args, "uintptr(_p$n)", "uintptr(len($name))";
$n++;
} elsif($type eq "int64" && ($openbsd || $netbsd)) {
push @args, "0";
if($_32bit eq "big-endian") {
push @args, "uintptr($name>>32)", "uintptr($name)";
} elsif($_32bit eq "little-endian") {
push @args, "uintptr($name)", "uintptr($name>>32)";
} else {
push @args, "uintptr($name)";
}
} elsif($type eq "int64" && $dragonfly) {
if ($func !~ /^extp(read|write)/i) {
push @args, "0";
}
if($_32bit eq "big-endian") {
push @args, "uintptr($name>>32)", "uintptr($name)";
} elsif($_32bit eq "little-endian") {
push @args, "uintptr($name)", "uintptr($name>>32)";
} else {
push @args, "uintptr($name)";
}
} elsif($type eq "int64" && $_32bit ne "") {
if(@args % 2 && $arm) {
# arm abi specifies 64-bit argument uses
# (even, odd) pair
push @args, "0"
}
if($_32bit eq "big-endian") {
push @args, "uintptr($name>>32)", "uintptr($name)";
} else {
push @args, "uintptr($name)", "uintptr($name>>32)";
}
} else {
push @args, "uintptr($name)";
}
}
# Determine which form to use; pad args with zeros.
my $asm = "Syscall";
if ($nonblock) {
if ($errvar eq "" && $ENV{'GOOS'} eq "linux") {
$asm = "RawSyscallNoError";
} else {
$asm = "RawSyscall";
}
} else {
if ($errvar eq "" && $ENV{'GOOS'} eq "linux") {
$asm = "SyscallNoError";
}
}
if(@args <= 3) {
while(@args < 3) {
push @args, "0";
}
} elsif(@args <= 6) {
$asm .= "6";
while(@args < 6) {
push @args, "0";
}
} elsif(@args <= 9) {
$asm .= "9";
while(@args < 9) {
push @args, "0";
}
} else {
print STDERR "$ARGV:$.: too many arguments to system call\n";
}
# System call number.
if($sysname eq "") {
$sysname = "SYS_$func";
$sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar
$sysname =~ y/a-z/A-Z/;
}
# Actual call.
my $args = join(', ', @args);
my $call = "$asm($sysname, $args)";
# Assign return values.
my $body = "";
my @ret = ("_", "_", "_");
my $do_errno = 0;
for(my $i=0; $i<@out; $i++) {
my $p = $out[$i];
my ($name, $type) = parseparam($p);
my $reg = "";
if($name eq "err" && !$plan9) {
$reg = "e1";
$ret[2] = $reg;
$do_errno = 1;
} elsif($name eq "err" && $plan9) {
$ret[0] = "r0";
$ret[2] = "e1";
next;
} else {
$reg = sprintf("r%d", $i);
$ret[$i] = $reg;
}
if($type eq "bool") {
$reg = "$reg != 0";
}
if($type eq "int64" && $_32bit ne "") {
# 64-bit number in r1:r0 or r0:r1.
if($i+2 > @out) {
print STDERR "$ARGV:$.: not enough registers for int64 return\n";
}
if($_32bit eq "big-endian") {
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
} else {
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
}
$ret[$i] = sprintf("r%d", $i);
$ret[$i+1] = sprintf("r%d", $i+1);
}
if($reg ne "e1" || $plan9) {
$body .= "\t$name = $type($reg)\n";
}
}
if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
$text .= "\t$call\n";
} else {
if ($errvar eq "" && $ENV{'GOOS'} eq "linux") {
# raw syscall without error on Linux, see golang.org/issue/22924
$text .= "\t$ret[0], $ret[1] := $call\n";
} else {
$text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
}
}
$text .= $body;
if ($plan9 && $ret[2] eq "e1") {
$text .= "\tif int32(r0) == -1 {\n";
$text .= "\t\terr = e1\n";
$text .= "\t}\n";
} elsif ($do_errno) {
$text .= "\tif e1 != 0 {\n";
$text .= "\t\terr = errnoErr(e1)\n";
$text .= "\t}\n";
}
$text .= "\treturn\n";
$text .= "}\n\n";
}
chomp $text;
chomp $text;
if($errors) {
exit 1;
}
print <<EOF;
// $cmdline
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $tags
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
$text
EOF
exit 0;

384
vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.pl generated vendored Normal file
View file

@ -0,0 +1,384 @@
#!/usr/bin/env perl
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This program reads a file containing function prototypes
# (like syscall_aix.go) and generates system call bodies.
# The prototypes are marked by lines beginning with "//sys"
# and read like func declarations if //sys is replaced by func, but:
# * The parameter lists must give a name for each argument.
# This includes return parameters.
# * The parameter lists must give a type for each argument:
# the (x, y, z int) shorthand is not allowed.
# * If the return parameter is an error number, it must be named err.
# * If go func name needs to be different than its libc name,
# * or the function is not in libc, name could be specified
# * at the end, after "=" sign, like
# //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
use strict;
my $cmdline = "mksyscall_aix_ppc.pl " . join(' ', @ARGV);
my $errors = 0;
my $_32bit = "";
my $tags = ""; # build tags
my $aix = 0;
my $solaris = 0;
binmode STDOUT;
if($ARGV[0] eq "-b32") {
$_32bit = "big-endian";
shift;
} elsif($ARGV[0] eq "-l32") {
$_32bit = "little-endian";
shift;
}
if($ARGV[0] eq "-aix") {
$aix = 1;
shift;
}
if($ARGV[0] eq "-tags") {
shift;
$tags = $ARGV[0];
shift;
}
if($ARGV[0] =~ /^-/) {
print STDERR "usage: mksyscall_aix.pl [-b32 | -l32] [-tags x,y] [file ...]\n";
exit 1;
}
sub parseparamlist($) {
my ($list) = @_;
$list =~ s/^\s*//;
$list =~ s/\s*$//;
if($list eq "") {
return ();
}
return split(/\s*,\s*/, $list);
}
sub parseparam($) {
my ($p) = @_;
if($p !~ /^(\S*) (\S*)$/) {
print STDERR "$ARGV:$.: malformed parameter: $p\n";
$errors = 1;
return ("xx", "int");
}
return ($1, $2);
}
my $package = "";
my $text = "";
my $c_extern = "/*\n#include <stdint.h>\n#include <stddef.h>\n";
my @vars = ();
while(<>) {
chomp;
s/\s+/ /g;
s/^\s+//;
s/\s+$//;
$package = $1 if !$package && /^package (\S+)$/;
my $nonblock = /^\/\/sysnb /;
next if !/^\/\/sys / && !$nonblock;
# Line must be of the form
# func Open(path string, mode int, perm int) (fd int, err error)
# Split into name, in params, out params.
if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) {
print STDERR "$ARGV:$.: malformed //sys declaration\n";
$errors = 1;
next;
}
my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6);
# Split argument lists on comma.
my @in = parseparamlist($in);
my @out = parseparamlist($out);
$in = join(', ', @in);
$out = join(', ', @out);
# Try in vain to keep people from editing this file.
# The theory is that they jump into the middle of the file
# without reading the header.
$text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
# Check if value return, err return available
my $errvar = "";
my $retvar = "";
my $rettype = "";
foreach my $p (@out) {
my ($name, $type) = parseparam($p);
if($type eq "error") {
$errvar = $name;
} else {
$retvar = $name;
$rettype = $type;
}
}
# System call name.
#if($func ne "fcntl") {
if($sysname eq "") {
$sysname = "$func";
}
$sysname =~ s/([a-z])([A-Z])/${1}_$2/g;
$sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
my $C_rettype = "";
if($rettype eq "unsafe.Pointer") {
$C_rettype = "uintptr_t";
} elsif($rettype eq "uintptr") {
$C_rettype = "uintptr_t";
} elsif($rettype =~ /^_/) {
$C_rettype = "uintptr_t";
} elsif($rettype eq "int") {
$C_rettype = "int";
} elsif($rettype eq "int32") {
$C_rettype = "int";
} elsif($rettype eq "int64") {
$C_rettype = "long long";
} elsif($rettype eq "uint32") {
$C_rettype = "unsigned int";
} elsif($rettype eq "uint64") {
$C_rettype = "unsigned long long";
} else {
$C_rettype = "int";
}
if($sysname eq "exit") {
$C_rettype = "void";
}
# Change types to c
my @c_in = ();
foreach my $p (@in) {
my ($name, $type) = parseparam($p);
if($type =~ /^\*/) {
push @c_in, "uintptr_t";
} elsif($type eq "string") {
push @c_in, "uintptr_t";
} elsif($type =~ /^\[\](.*)/) {
push @c_in, "uintptr_t", "size_t";
} elsif($type eq "unsafe.Pointer") {
push @c_in, "uintptr_t";
} elsif($type eq "uintptr") {
push @c_in, "uintptr_t";
} elsif($type =~ /^_/) {
push @c_in, "uintptr_t";
} elsif($type eq "int") {
push @c_in, "int";
} elsif($type eq "int32") {
push @c_in, "int";
} elsif($type eq "int64") {
push @c_in, "long long";
} elsif($type eq "uint32") {
push @c_in, "unsigned int";
} elsif($type eq "uint64") {
push @c_in, "unsigned long long";
} else {
push @c_in, "int";
}
}
if ($func ne "fcntl" && $func ne "FcntlInt" && $func ne "readlen" && $func ne "writelen") {
# Imports of system calls from libc
$c_extern .= "$C_rettype $sysname";
my $c_in = join(', ', @c_in);
$c_extern .= "($c_in);\n";
}
# So file name.
if($aix) {
if($modname eq "") {
$modname = "libc.a/shr_64.o";
} else {
print STDERR "$func: only syscall using libc are available\n";
$errors = 1;
next;
}
}
my $strconvfunc = "C.CString";
my $strconvtype = "*byte";
# Go function header.
if($out ne "") {
$out = " ($out)";
}
if($text ne "") {
$text .= "\n"
}
$text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out ;
# Prepare arguments to call.
my @args = ();
my $n = 0;
my $arg_n = 0;
foreach my $p (@in) {
my ($name, $type) = parseparam($p);
if($type =~ /^\*/) {
push @args, "C.uintptr_t(uintptr(unsafe.Pointer($name)))";
} elsif($type eq "string" && $errvar ne "") {
$text .= "\t_p$n := uintptr(unsafe.Pointer($strconvfunc($name)))\n";
push @args, "C.uintptr_t(_p$n)";
$n++;
} elsif($type eq "string") {
print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
$text .= "\t_p$n := uintptr(unsafe.Pointer($strconvfunc($name)))\n";
push @args, "C.uintptr_t(_p$n)";
$n++;
} elsif($type =~ /^\[\](.*)/) {
# Convert slice into pointer, length.
# Have to be careful not to take address of &a[0] if len == 0:
# pass nil in that case.
$text .= "\tvar _p$n *$1\n";
$text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n";
push @args, "C.uintptr_t(uintptr(unsafe.Pointer(_p$n)))";
$n++;
$text .= "\tvar _p$n int\n";
$text .= "\t_p$n = len($name)\n";
push @args, "C.size_t(_p$n)";
$n++;
} elsif($type eq "int64" && $_32bit ne "") {
if($_32bit eq "big-endian") {
push @args, "uintptr($name >> 32)", "uintptr($name)";
} else {
push @args, "uintptr($name)", "uintptr($name >> 32)";
}
$n++;
} elsif($type eq "bool") {
$text .= "\tvar _p$n uint32\n";
$text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
push @args, "_p$n";
$n++;
} elsif($type =~ /^_/) {
push @args, "C.uintptr_t(uintptr($name))";
} elsif($type eq "unsafe.Pointer") {
push @args, "C.uintptr_t(uintptr($name))";
} elsif($type eq "int") {
if (($arg_n == 2) && (($func eq "readlen") || ($func eq "writelen"))) {
push @args, "C.size_t($name)";
} elsif ($arg_n == 0 && $func eq "fcntl") {
push @args, "C.uintptr_t($name)";
} elsif (($arg_n == 2) && (($func eq "fcntl") || ($func eq "FcntlInt"))) {
push @args, "C.uintptr_t($name)";
} else {
push @args, "C.int($name)";
}
} elsif($type eq "int32") {
push @args, "C.int($name)";
} elsif($type eq "int64") {
push @args, "C.longlong($name)";
} elsif($type eq "uint32") {
push @args, "C.uint($name)";
} elsif($type eq "uint64") {
push @args, "C.ulonglong($name)";
} elsif($type eq "uintptr") {
push @args, "C.uintptr_t($name)";
} else {
push @args, "C.int($name)";
}
$arg_n++;
}
my $nargs = @args;
# Determine which form to use; pad args with zeros.
if ($nonblock) {
}
my $args = join(', ', @args);
my $call = "";
if ($sysname eq "exit") {
if ($errvar ne "") {
$call .= "er :=";
} else {
$call .= "";
}
} elsif ($errvar ne "") {
$call .= "r0,er :=";
} elsif ($retvar ne "") {
$call .= "r0,_ :=";
} else {
$call .= ""
}
$call .= "C.$sysname($args)";
# Assign return values.
my $body = "";
my $failexpr = "";
for(my $i=0; $i<@out; $i++) {
my $p = $out[$i];
my ($name, $type) = parseparam($p);
my $reg = "";
if($name eq "err") {
$reg = "e1";
} else {
$reg = "r0";
}
if($reg ne "e1" ) {
$body .= "\t$name = $type($reg)\n";
}
}
# verify return
if ($sysname ne "exit" && $errvar ne "") {
if ($C_rettype =~ /^uintptr/) {
$body .= "\tif \(uintptr\(r0\) ==\^uintptr\(0\) && er != nil\) {\n";
$body .= "\t\t$errvar = er\n";
$body .= "\t}\n";
} else {
$body .= "\tif \(r0 ==-1 && er != nil\) {\n";
$body .= "\t\t$errvar = er\n";
$body .= "\t}\n";
}
} elsif ($errvar ne "") {
$body .= "\tif \(er != nil\) {\n";
$body .= "\t\t$errvar = er\n";
$body .= "\t}\n";
}
$text .= "\t$call\n";
$text .= $body;
$text .= "\treturn\n";
$text .= "}\n";
}
if($errors) {
exit 1;
}
print <<EOF;
// $cmdline
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $tags
package $package
$c_extern
*/
import "C"
import (
"unsafe"
)
EOF
print "import \"golang.org/x/sys/unix\"\n" if $package ne "unix";
chomp($_=<<EOF);
$text
EOF
print $_;
exit 0;

579
vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.pl generated vendored Normal file
View file

@ -0,0 +1,579 @@
#!/usr/bin/env perl
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This program reads a file containing function prototypes
# (like syscall_aix.go) and generates system call bodies.
# The prototypes are marked by lines beginning with "//sys"
# and read like func declarations if //sys is replaced by func, but:
# * The parameter lists must give a name for each argument.
# This includes return parameters.
# * The parameter lists must give a type for each argument:
# the (x, y, z int) shorthand is not allowed.
# * If the return parameter is an error number, it must be named err.
# * If go func name needs to be different than its libc name,
# * or the function is not in libc, name could be specified
# * at the end, after "=" sign, like
# //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
# This program will generate three files and handle both gc and gccgo implementation:
# - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
# - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
# - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
# The generated code looks like this
#
# zsyscall_aix_ppc64.go
# func asyscall(...) (n int, err error) {
# // Pointer Creation
# r1, e1 := callasyscall(...)
# // Type Conversion
# // Error Handler
# return
# }
#
# zsyscall_aix_ppc64_gc.go
# //go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
# //go:linkname libc_asyscall libc_asyscall
# var asyscall syscallFunc
#
# func callasyscall(...) (r1 uintptr, e1 Errno) {
# r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
# return
# }
#
# zsyscall_aix_ppc64_ggcgo.go
# /*
# int asyscall(...)
#
# */
# import "C"
#
# func callasyscall(...) (r1 uintptr, e1 Errno) {
# r1 = uintptr(C.asyscall(...))
# e1 = syscall.GetErrno()
# return
# }
use strict;
my $cmdline = "mksyscall_aix_ppc64.pl " . join(' ', @ARGV);
my $errors = 0;
my $_32bit = "";
my $tags = ""; # build tags
my $aix = 0;
my $solaris = 0;
binmode STDOUT;
if($ARGV[0] eq "-b32") {
$_32bit = "big-endian";
shift;
} elsif($ARGV[0] eq "-l32") {
$_32bit = "little-endian";
shift;
}
if($ARGV[0] eq "-aix") {
$aix = 1;
shift;
}
if($ARGV[0] eq "-tags") {
shift;
$tags = $ARGV[0];
shift;
}
if($ARGV[0] =~ /^-/) {
print STDERR "usage: mksyscall_aix.pl [-b32 | -l32] [-tags x,y] [file ...]\n";
exit 1;
}
sub parseparamlist($) {
my ($list) = @_;
$list =~ s/^\s*//;
$list =~ s/\s*$//;
if($list eq "") {
return ();
}
return split(/\s*,\s*/, $list);
}
sub parseparam($) {
my ($p) = @_;
if($p !~ /^(\S*) (\S*)$/) {
print STDERR "$ARGV:$.: malformed parameter: $p\n";
$errors = 1;
return ("xx", "int");
}
return ($1, $2);
}
my $package = "";
# GCCGO
my $textgccgo = "";
my $c_extern = "/*\n#include <stdint.h>\n";
# GC
my $textgc = "";
my $dynimports = "";
my $linknames = "";
my @vars = ();
# COMMUN
my $textcommon = "";
while(<>) {
chomp;
s/\s+/ /g;
s/^\s+//;
s/\s+$//;
$package = $1 if !$package && /^package (\S+)$/;
my $nonblock = /^\/\/sysnb /;
next if !/^\/\/sys / && !$nonblock;
# Line must be of the form
# func Open(path string, mode int, perm int) (fd int, err error)
# Split into name, in params, out params.
if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) {
print STDERR "$ARGV:$.: malformed //sys declaration\n";
$errors = 1;
next;
}
my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6);
# Split argument lists on comma.
my @in = parseparamlist($in);
my @out = parseparamlist($out);
$in = join(', ', @in);
$out = join(', ', @out);
if($sysname eq "") {
$sysname = "$func";
}
my $onlyCommon = 0;
if ($func eq "readlen" || $func eq "writelen" || $func eq "FcntlInt" || $func eq "FcntlFlock") {
# This function call another syscall which is already implemented.
# Therefore, the gc and gccgo part must not be generated.
$onlyCommon = 1
}
# Try in vain to keep people from editing this file.
# The theory is that they jump into the middle of the file
# without reading the header.
$textcommon .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
if (!$onlyCommon) {
$textgccgo .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
$textgc .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
}
# Check if value return, err return available
my $errvar = "";
my $retvar = "";
my $rettype = "";
foreach my $p (@out) {
my ($name, $type) = parseparam($p);
if($type eq "error") {
$errvar = $name;
} else {
$retvar = $name;
$rettype = $type;
}
}
$sysname =~ s/([a-z])([A-Z])/${1}_$2/g;
$sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
# GCCGO Prototype return type
my $C_rettype = "";
if($rettype eq "unsafe.Pointer") {
$C_rettype = "uintptr_t";
} elsif($rettype eq "uintptr") {
$C_rettype = "uintptr_t";
} elsif($rettype =~ /^_/) {
$C_rettype = "uintptr_t";
} elsif($rettype eq "int") {
$C_rettype = "int";
} elsif($rettype eq "int32") {
$C_rettype = "int";
} elsif($rettype eq "int64") {
$C_rettype = "long long";
} elsif($rettype eq "uint32") {
$C_rettype = "unsigned int";
} elsif($rettype eq "uint64") {
$C_rettype = "unsigned long long";
} else {
$C_rettype = "int";
}
if($sysname eq "exit") {
$C_rettype = "void";
}
# GCCGO Prototype arguments type
my @c_in = ();
foreach my $i (0 .. $#in) {
my ($name, $type) = parseparam($in[$i]);
if($type =~ /^\*/) {
push @c_in, "uintptr_t";
} elsif($type eq "string") {
push @c_in, "uintptr_t";
} elsif($type =~ /^\[\](.*)/) {
push @c_in, "uintptr_t", "size_t";
} elsif($type eq "unsafe.Pointer") {
push @c_in, "uintptr_t";
} elsif($type eq "uintptr") {
push @c_in, "uintptr_t";
} elsif($type =~ /^_/) {
push @c_in, "uintptr_t";
} elsif($type eq "int") {
if (($i == 0 || $i == 2) && $func eq "fcntl"){
# These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
push @c_in, "uintptr_t";
} else {
push @c_in, "int";
}
} elsif($type eq "int32") {
push @c_in, "int";
} elsif($type eq "int64") {
push @c_in, "long long";
} elsif($type eq "uint32") {
push @c_in, "unsigned int";
} elsif($type eq "uint64") {
push @c_in, "unsigned long long";
} else {
push @c_in, "int";
}
}
if (!$onlyCommon){
# GCCGO Prototype Generation
# Imports of system calls from libc
$c_extern .= "$C_rettype $sysname";
my $c_in = join(', ', @c_in);
$c_extern .= "($c_in);\n";
}
# GC Library name
if($modname eq "") {
$modname = "libc.a/shr_64.o";
} else {
print STDERR "$func: only syscall using libc are available\n";
$errors = 1;
next;
}
my $sysvarname = "libc_${sysname}";
if (!$onlyCommon){
# GC Runtime import of function to allow cross-platform builds.
$dynimports .= "//go:cgo_import_dynamic ${sysvarname} ${sysname} \"$modname\"\n";
# GC Link symbol to proc address variable.
$linknames .= "//go:linkname ${sysvarname} ${sysvarname}\n";
# GC Library proc address variable.
push @vars, $sysvarname;
}
my $strconvfunc ="BytePtrFromString";
my $strconvtype = "*byte";
# Go function header.
if($out ne "") {
$out = " ($out)";
}
if($textcommon ne "") {
$textcommon .= "\n"
}
$textcommon .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out ;
# Prepare arguments to call.
my @argscommun = (); # Arguments in the commun part
my @argscall = (); # Arguments for call prototype
my @argsgc = (); # Arguments for gc call (with syscall6)
my @argsgccgo = (); # Arguments for gccgo call (with C.name_of_syscall)
my $n = 0;
my $arg_n = 0;
foreach my $p (@in) {
my ($name, $type) = parseparam($p);
if($type =~ /^\*/) {
push @argscommun, "uintptr(unsafe.Pointer($name))";
push @argscall, "$name uintptr";
push @argsgc, "$name";
push @argsgccgo, "C.uintptr_t($name)";
} elsif($type eq "string" && $errvar ne "") {
$textcommon .= "\tvar _p$n $strconvtype\n";
$textcommon .= "\t_p$n, $errvar = $strconvfunc($name)\n";
$textcommon .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
push @argscommun, "uintptr(unsafe.Pointer(_p$n))";
push @argscall, "_p$n uintptr ";
push @argsgc, "_p$n";
push @argsgccgo, "C.uintptr_t(_p$n)";
$n++;
} elsif($type eq "string") {
print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
$textcommon .= "\tvar _p$n $strconvtype\n";
$textcommon .= "\t_p$n, $errvar = $strconvfunc($name)\n";
$textcommon .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
push @argscommun, "uintptr(unsafe.Pointer(_p$n))";
push @argscall, "_p$n uintptr";
push @argsgc, "_p$n";
push @argsgccgo, "C.uintptr_t(_p$n)";
$n++;
} elsif($type =~ /^\[\](.*)/) {
# Convert slice into pointer, length.
# Have to be careful not to take address of &a[0] if len == 0:
# pass nil in that case.
$textcommon .= "\tvar _p$n *$1\n";
$textcommon .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n";
push @argscommun, "uintptr(unsafe.Pointer(_p$n))", "len($name)";
push @argscall, "_p$n uintptr", "_lenp$n int";
push @argsgc, "_p$n", "uintptr(_lenp$n)";
push @argsgccgo, "C.uintptr_t(_p$n)", "C.size_t(_lenp$n)";
$n++;
} elsif($type eq "int64" && $_32bit ne "") {
print STDERR "$ARGV:$.: $func uses int64 with 32 bits mode. Case not yet implemented\n";
# if($_32bit eq "big-endian") {
# push @args, "uintptr($name >> 32)", "uintptr($name)";
# } else {
# push @args, "uintptr($name)", "uintptr($name >> 32)";
# }
# $n++;
} elsif($type eq "bool") {
print STDERR "$ARGV:$.: $func uses bool. Case not yet implemented\n";
# $text .= "\tvar _p$n uint32\n";
# $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
# push @args, "_p$n";
# $n++;
} elsif($type =~ /^_/ ||$type eq "unsafe.Pointer") {
push @argscommun, "uintptr($name)";
push @argscall, "$name uintptr";
push @argsgc, "$name";
push @argsgccgo, "C.uintptr_t($name)";
} elsif($type eq "int") {
if (($arg_n == 0 || $arg_n == 2) && ($func eq "fcntl" || $func eq "FcntlInt" || $func eq "FcntlFlock")) {
# These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
push @argscommun, "uintptr($name)";
push @argscall, "$name uintptr";
push @argsgc, "$name";
push @argsgccgo, "C.uintptr_t($name)";
} else {
push @argscommun, "$name";
push @argscall, "$name int";
push @argsgc, "uintptr($name)";
push @argsgccgo, "C.int($name)";
}
} elsif($type eq "int32") {
push @argscommun, "$name";
push @argscall, "$name int32";
push @argsgc, "uintptr($name)";
push @argsgccgo, "C.int($name)";
} elsif($type eq "int64") {
push @argscommun, "$name";
push @argscall, "$name int64";
push @argsgc, "uintptr($name)";
push @argsgccgo, "C.longlong($name)";
} elsif($type eq "uint32") {
push @argscommun, "$name";
push @argscall, "$name uint32";
push @argsgc, "uintptr($name)";
push @argsgccgo, "C.uint($name)";
} elsif($type eq "uint64") {
push @argscommun, "$name";
push @argscall, "$name uint64";
push @argsgc, "uintptr($name)";
push @argsgccgo, "C.ulonglong($name)";
} elsif($type eq "uintptr") {
push @argscommun, "$name";
push @argscall, "$name uintptr";
push @argsgc, "$name";
push @argsgccgo, "C.uintptr_t($name)";
} else {
push @argscommun, "int($name)";
push @argscall, "$name int";
push @argsgc, "uintptr($name)";
push @argsgccgo, "C.int($name)";
}
$arg_n++;
}
my $nargs = @argsgc;
# COMMUN function generation
my $argscommun = join(', ', @argscommun);
my $callcommun = "call$sysname($argscommun)";
my @ret = ("_", "_");
my $body = "";
my $do_errno = 0;
for(my $i=0; $i<@out; $i++) {
my $p = $out[$i];
my ($name, $type) = parseparam($p);
my $reg = "";
if($name eq "err") {
$reg = "e1";
$ret[1] = $reg;
$do_errno = 1;
} else {
$reg = "r0";
$ret[0] = $reg;
}
if($type eq "bool") {
$reg = "$reg != 0";
}
if($reg ne "e1") {
$body .= "\t$name = $type($reg)\n";
}
}
if ($ret[0] eq "_" && $ret[1] eq "_") {
$textcommon .= "\t$callcommun\n";
} else {
$textcommon .= "\t$ret[0], $ret[1] := $callcommun\n";
}
$textcommon .= $body;
if ($do_errno) {
$textcommon .= "\tif e1 != 0 {\n";
$textcommon .= "\t\terr = errnoErr(e1)\n";
$textcommon .= "\t}\n";
}
$textcommon .= "\treturn\n";
$textcommon .= "}\n";
if ($onlyCommon){
next
}
# CALL Prototype
my $callProto = sprintf "func call%s(%s) (r1 uintptr, e1 Errno) {\n", $sysname, join(', ', @argscall);
# GC function generation
my $asm = "syscall6";
if ($nonblock) {
$asm = "rawSyscall6";
}
if(@argsgc <= 6) {
while(@argsgc < 6) {
push @argsgc, "0";
}
} else {
print STDERR "$ARGV:$.: too many arguments to system call\n";
}
my $argsgc = join(', ', @argsgc);
my $callgc = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $argsgc)";
$textgc .= $callProto;
$textgc .= "\tr1, _, e1 = $callgc\n";
$textgc .= "\treturn\n}\n";
# GCCGO function generation
my $argsgccgo = join(', ', @argsgccgo);
my $callgccgo = "C.$sysname($argsgccgo)";
$textgccgo .= $callProto;
$textgccgo .= "\tr1 = uintptr($callgccgo)\n";
$textgccgo .= "\te1 = syscall.GetErrno()\n";
$textgccgo .= "\treturn\n}\n";
}
if($errors) {
exit 1;
}
# Print zsyscall_aix_ppc64.go
open(my $fcommun, '>', 'zsyscall_aix_ppc64.go');
my $tofcommun = <<EOF;
// $cmdline
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $tags
package $package
import (
"unsafe"
)
EOF
$tofcommun .= "import \"golang.org/x/sys/unix\"\n" if $package ne "unix";
$tofcommun .=<<EOF;
$textcommon
EOF
print $fcommun $tofcommun;
# Print zsyscall_aix_ppc64_gc.go
open(my $fgc, '>', 'zsyscall_aix_ppc64_gc.go');
my $tofgc = <<EOF;
// $cmdline
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $tags
// +build !gccgo
package $package
import (
"unsafe"
)
EOF
$tofgc .= "import \"golang.org/x/sys/unix\"\n" if $package ne "unix";
my $vardecls = "\t" . join(",\n\t", @vars);
$vardecls .= " syscallFunc";
$tofgc .=<<EOF;
$dynimports
$linknames
type syscallFunc uintptr
var (
$vardecls
)
// Implemented in runtime/syscall_aix.go.
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
$textgc
EOF
print $fgc $tofgc;
# Print zsyscall_aix_ppc64_gc.go
open(my $fgccgo, '>', 'zsyscall_aix_ppc64_gccgo.go');
my $tofgccgo = <<EOF;
// $cmdline
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $tags
// +build gccgo
package $package
$c_extern
*/
import "C"
import (
"syscall"
)
EOF
$tofgccgo .= "import \"golang.org/x/sys/unix\"\n" if $package ne "unix";
$tofgccgo .=<<EOF;
$textgccgo
EOF
print $fgccgo $tofgccgo;
exit 0;

294
vendor/golang.org/x/sys/unix/mksyscall_solaris.pl generated vendored Normal file
View file

@ -0,0 +1,294 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This program reads a file containing function prototypes
# (like syscall_solaris.go) and generates system call bodies.
# The prototypes are marked by lines beginning with "//sys"
# and read like func declarations if //sys is replaced by func, but:
# * The parameter lists must give a name for each argument.
# This includes return parameters.
# * The parameter lists must give a type for each argument:
# the (x, y, z int) shorthand is not allowed.
# * If the return parameter is an error number, it must be named err.
# * If go func name needs to be different than its libc name,
# * or the function is not in libc, name could be specified
# * at the end, after "=" sign, like
# //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
use strict;
my $cmdline = "mksyscall_solaris.pl " . join(' ', @ARGV);
my $errors = 0;
my $_32bit = "";
my $tags = ""; # build tags
binmode STDOUT;
if($ARGV[0] eq "-b32") {
$_32bit = "big-endian";
shift;
} elsif($ARGV[0] eq "-l32") {
$_32bit = "little-endian";
shift;
}
if($ARGV[0] eq "-tags") {
shift;
$tags = $ARGV[0];
shift;
}
if($ARGV[0] =~ /^-/) {
print STDERR "usage: mksyscall_solaris.pl [-b32 | -l32] [-tags x,y] [file ...]\n";
exit 1;
}
sub parseparamlist($) {
my ($list) = @_;
$list =~ s/^\s*//;
$list =~ s/\s*$//;
if($list eq "") {
return ();
}
return split(/\s*,\s*/, $list);
}
sub parseparam($) {
my ($p) = @_;
if($p !~ /^(\S*) (\S*)$/) {
print STDERR "$ARGV:$.: malformed parameter: $p\n";
$errors = 1;
return ("xx", "int");
}
return ($1, $2);
}
my $package = "";
my $text = "";
my $dynimports = "";
my $linknames = "";
my @vars = ();
while(<>) {
chomp;
s/\s+/ /g;
s/^\s+//;
s/\s+$//;
$package = $1 if !$package && /^package (\S+)$/;
my $nonblock = /^\/\/sysnb /;
next if !/^\/\/sys / && !$nonblock;
# Line must be of the form
# func Open(path string, mode int, perm int) (fd int, err error)
# Split into name, in params, out params.
if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) {
print STDERR "$ARGV:$.: malformed //sys declaration\n";
$errors = 1;
next;
}
my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6);
# Split argument lists on comma.
my @in = parseparamlist($in);
my @out = parseparamlist($out);
# Try in vain to keep people from editing this file.
# The theory is that they jump into the middle of the file
# without reading the header.
$text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
# So file name.
if($modname eq "") {
$modname = "libc";
}
# System call name.
if($sysname eq "") {
$sysname = "$func";
}
# System call pointer variable name.
my $sysvarname = "proc$sysname";
my $strconvfunc = "BytePtrFromString";
my $strconvtype = "*byte";
$sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
# Runtime import of function to allow cross-platform builds.
$dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n";
# Link symbol to proc address variable.
$linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n";
# Library proc address variable.
push @vars, $sysvarname;
# Go function header.
$out = join(', ', @out);
if($out ne "") {
$out = " ($out)";
}
if($text ne "") {
$text .= "\n"
}
$text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out;
# Check if err return available
my $errvar = "";
foreach my $p (@out) {
my ($name, $type) = parseparam($p);
if($type eq "error") {
$errvar = $name;
last;
}
}
# Prepare arguments to Syscall.
my @args = ();
my $n = 0;
foreach my $p (@in) {
my ($name, $type) = parseparam($p);
if($type =~ /^\*/) {
push @args, "uintptr(unsafe.Pointer($name))";
} elsif($type eq "string" && $errvar ne "") {
$text .= "\tvar _p$n $strconvtype\n";
$text .= "\t_p$n, $errvar = $strconvfunc($name)\n";
$text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
push @args, "uintptr(unsafe.Pointer(_p$n))";
$n++;
} elsif($type eq "string") {
print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
$text .= "\tvar _p$n $strconvtype\n";
$text .= "\t_p$n, _ = $strconvfunc($name)\n";
push @args, "uintptr(unsafe.Pointer(_p$n))";
$n++;
} elsif($type =~ /^\[\](.*)/) {
# Convert slice into pointer, length.
# Have to be careful not to take address of &a[0] if len == 0:
# pass nil in that case.
$text .= "\tvar _p$n *$1\n";
$text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n";
push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))";
$n++;
} elsif($type eq "int64" && $_32bit ne "") {
if($_32bit eq "big-endian") {
push @args, "uintptr($name >> 32)", "uintptr($name)";
} else {
push @args, "uintptr($name)", "uintptr($name >> 32)";
}
} elsif($type eq "bool") {
$text .= "\tvar _p$n uint32\n";
$text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
push @args, "uintptr(_p$n)";
$n++;
} else {
push @args, "uintptr($name)";
}
}
my $nargs = @args;
# Determine which form to use; pad args with zeros.
my $asm = "sysvicall6";
if ($nonblock) {
$asm = "rawSysvicall6";
}
if(@args <= 6) {
while(@args < 6) {
push @args, "0";
}
} else {
print STDERR "$ARGV:$.: too many arguments to system call\n";
}
# Actual call.
my $args = join(', ', @args);
my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)";
# Assign return values.
my $body = "";
my $failexpr = "";
my @ret = ("_", "_", "_");
my @pout= ();
my $do_errno = 0;
for(my $i=0; $i<@out; $i++) {
my $p = $out[$i];
my ($name, $type) = parseparam($p);
my $reg = "";
if($name eq "err") {
$reg = "e1";
$ret[2] = $reg;
$do_errno = 1;
} else {
$reg = sprintf("r%d", $i);
$ret[$i] = $reg;
}
if($type eq "bool") {
$reg = "$reg != 0";
}
if($type eq "int64" && $_32bit ne "") {
# 64-bit number in r1:r0 or r0:r1.
if($i+2 > @out) {
print STDERR "$ARGV:$.: not enough registers for int64 return\n";
}
if($_32bit eq "big-endian") {
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
} else {
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
}
$ret[$i] = sprintf("r%d", $i);
$ret[$i+1] = sprintf("r%d", $i+1);
}
if($reg ne "e1") {
$body .= "\t$name = $type($reg)\n";
}
}
if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
$text .= "\t$call\n";
} else {
$text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
}
$text .= $body;
if ($do_errno) {
$text .= "\tif e1 != 0 {\n";
$text .= "\t\terr = e1\n";
$text .= "\t}\n";
}
$text .= "\treturn\n";
$text .= "}\n";
}
if($errors) {
exit 1;
}
print <<EOF;
// $cmdline
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $tags
package $package
import (
"syscall"
"unsafe"
)
EOF
print "import \"golang.org/x/sys/unix\"\n" if $package ne "unix";
my $vardecls = "\t" . join(",\n\t", @vars);
$vardecls .= " syscallFunc";
chomp($_=<<EOF);
$dynimports
$linknames
var (
$vardecls
)
$text
EOF
print $_;
exit 0;

265
vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl generated vendored Normal file
View file

@ -0,0 +1,265 @@
#!/usr/bin/env perl
# Copyright 2011 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
#
# Build a MIB with each entry being an array containing the level, type and
# a hash that will contain additional entries if the current entry is a node.
# We then walk this MIB and create a flattened sysctl name to OID hash.
#
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $debug = 0;
my %ctls = ();
my @headers = qw (
sys/sysctl.h
sys/socket.h
sys/tty.h
sys/malloc.h
sys/mount.h
sys/namei.h
sys/sem.h
sys/shm.h
sys/vmmeter.h
uvm/uvmexp.h
uvm/uvm_param.h
uvm/uvm_swap_encrypt.h
ddb/db_var.h
net/if.h
net/if_pfsync.h
net/pipex.h
netinet/in.h
netinet/icmp_var.h
netinet/igmp_var.h
netinet/ip_ah.h
netinet/ip_carp.h
netinet/ip_divert.h
netinet/ip_esp.h
netinet/ip_ether.h
netinet/ip_gre.h
netinet/ip_ipcomp.h
netinet/ip_ipip.h
netinet/pim_var.h
netinet/tcp_var.h
netinet/udp_var.h
netinet6/in6.h
netinet6/ip6_divert.h
netinet6/pim6_var.h
netinet/icmp6.h
netmpls/mpls.h
);
my @ctls = qw (
kern
vm
fs
net
#debug # Special handling required
hw
#machdep # Arch specific
user
ddb
#vfs # Special handling required
fs.posix
kern.forkstat
kern.intrcnt
kern.malloc
kern.nchstats
kern.seminfo
kern.shminfo
kern.timecounter
kern.tty
kern.watchdog
net.bpf
net.ifq
net.inet
net.inet.ah
net.inet.carp
net.inet.divert
net.inet.esp
net.inet.etherip
net.inet.gre
net.inet.icmp
net.inet.igmp
net.inet.ip
net.inet.ip.ifq
net.inet.ipcomp
net.inet.ipip
net.inet.mobileip
net.inet.pfsync
net.inet.pim
net.inet.tcp
net.inet.udp
net.inet6
net.inet6.divert
net.inet6.ip6
net.inet6.icmp6
net.inet6.pim6
net.inet6.tcp6
net.inet6.udp6
net.mpls
net.mpls.ifq
net.key
net.pflow
net.pfsync
net.pipex
net.rt
vm.swapencrypt
#vfsgenctl # Special handling required
);
# Node name "fixups"
my %ctl_map = (
"ipproto" => "net.inet",
"net.inet.ipproto" => "net.inet",
"net.inet6.ipv6proto" => "net.inet6",
"net.inet6.ipv6" => "net.inet6.ip6",
"net.inet.icmpv6" => "net.inet6.icmp6",
"net.inet6.divert6" => "net.inet6.divert",
"net.inet6.tcp6" => "net.inet.tcp",
"net.inet6.udp6" => "net.inet.udp",
"mpls" => "net.mpls",
"swpenc" => "vm.swapencrypt"
);
# Node mappings
my %node_map = (
"net.inet.ip.ifq" => "net.ifq",
"net.inet.pfsync" => "net.pfsync",
"net.mpls.ifq" => "net.ifq"
);
my $ctlname;
my %mib = ();
my %sysctl = ();
my $node;
sub debug() {
print STDERR "$_[0]\n" if $debug;
}
# Walk the MIB and build a sysctl name to OID mapping.
sub build_sysctl() {
my ($node, $name, $oid) = @_;
my %node = %{$node};
my @oid = @{$oid};
foreach my $key (sort keys %node) {
my @node = @{$node{$key}};
my $nodename = $name.($name ne '' ? '.' : '').$key;
my @nodeoid = (@oid, $node[0]);
if ($node[1] eq 'CTLTYPE_NODE') {
if (exists $node_map{$nodename}) {
$node = \%mib;
$ctlname = $node_map{$nodename};
foreach my $part (split /\./, $ctlname) {
$node = \%{@{$$node{$part}}[2]};
}
} else {
$node = $node[2];
}
&build_sysctl($node, $nodename, \@nodeoid);
} elsif ($node[1] ne '') {
$sysctl{$nodename} = \@nodeoid;
}
}
}
foreach my $ctl (@ctls) {
$ctls{$ctl} = $ctl;
}
# Build MIB
foreach my $header (@headers) {
&debug("Processing $header...");
open HEADER, "/usr/include/$header" ||
print STDERR "Failed to open $header\n";
while (<HEADER>) {
if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ ||
$_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ ||
$_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) {
if ($1 eq 'CTL_NAMES') {
# Top level.
$node = \%mib;
} else {
# Node.
my $nodename = lc($2);
if ($header =~ /^netinet\//) {
$ctlname = "net.inet.$nodename";
} elsif ($header =~ /^netinet6\//) {
$ctlname = "net.inet6.$nodename";
} elsif ($header =~ /^net\//) {
$ctlname = "net.$nodename";
} else {
$ctlname = "$nodename";
$ctlname =~ s/^(fs|net|kern)_/$1\./;
}
if (exists $ctl_map{$ctlname}) {
$ctlname = $ctl_map{$ctlname};
}
if (not exists $ctls{$ctlname}) {
&debug("Ignoring $ctlname...");
next;
}
# Walk down from the top of the MIB.
$node = \%mib;
foreach my $part (split /\./, $ctlname) {
if (not exists $$node{$part}) {
&debug("Missing node $part");
$$node{$part} = [ 0, '', {} ];
}
$node = \%{@{$$node{$part}}[2]};
}
}
# Populate current node with entries.
my $i = -1;
while (defined($_) && $_ !~ /^}/) {
$_ = <HEADER>;
$i++ if $_ =~ /{.*}/;
next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/;
$$node{$1} = [ $i, $2, {} ];
}
}
}
close HEADER;
}
&build_sysctl(\%mib, "", []);
print <<EOF;
// mksysctl_openbsd.pl
// Code generated by the command above; DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix;
type mibentry struct {
ctlname string
ctloid []_C_int
}
var sysctlMib = []mibentry {
EOF
foreach my $name (sort keys %sysctl) {
my @oid = @{$sysctl{$name}};
print "\t{ \"$name\", []_C_int{ ", join(', ', @oid), " } }, \n";
}
print <<EOF;
}
EOF

39
vendor/golang.org/x/sys/unix/mksysnum_darwin.pl generated vendored Normal file
View file

@ -0,0 +1,39 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generate system call table for Darwin from sys/syscall.h
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $command = "mksysnum_darwin.pl " . join(' ', @ARGV);
print <<EOF;
// $command
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix
const (
EOF
while(<>){
if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){
my $name = $1;
my $num = $2;
$name =~ y/a-z/A-Z/;
print " SYS_$name = $num;"
}
}
print <<EOF;
)
EOF

50
vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl generated vendored Normal file
View file

@ -0,0 +1,50 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generate system call table for DragonFly from master list
# (for example, /usr/src/sys/kern/syscalls.master).
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $command = "mksysnum_dragonfly.pl " . join(' ', @ARGV);
print <<EOF;
// $command
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix
const (
EOF
while(<>){
if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){
my $num = $1;
my $proto = $2;
my $name = "SYS_$3";
$name =~ y/a-z/A-Z/;
# There are multiple entries for enosys and nosys, so comment them out.
if($name =~ /^SYS_E?NOSYS$/){
$name = "// $name";
}
if($name eq 'SYS_SYS_EXIT'){
$name = 'SYS_EXIT';
}
print " $name = $num; // $proto\n";
}
}
print <<EOF;
)
EOF

50
vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl generated vendored Normal file
View file

@ -0,0 +1,50 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generate system call table for FreeBSD from master list
# (for example, /usr/src/sys/kern/syscalls.master).
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $command = "mksysnum_freebsd.pl " . join(' ', @ARGV);
print <<EOF;
// $command
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix
const (
EOF
while(<>){
if(/^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$/){
my $num = $1;
my $proto = $2;
my $name = "SYS_$3";
$name =~ y/a-z/A-Z/;
# There are multiple entries for enosys and nosys, so comment them out.
if($name =~ /^SYS_E?NOSYS$/){
$name = "// $name";
}
if($name eq 'SYS_SYS_EXIT'){
$name = 'SYS_EXIT';
}
print " $name = $num; // $proto\n";
}
}
print <<EOF;
)
EOF

58
vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl generated vendored Normal file
View file

@ -0,0 +1,58 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generate system call table for OpenBSD from master list
# (for example, /usr/src/sys/kern/syscalls.master).
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $command = "mksysnum_netbsd.pl " . join(' ', @ARGV);
print <<EOF;
// $command
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix
const (
EOF
my $line = '';
while(<>){
if($line =~ /^(.*)\\$/) {
# Handle continuation
$line = $1;
$_ =~ s/^\s+//;
$line .= $_;
} else {
# New line
$line = $_;
}
next if $line =~ /\\$/;
if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) {
my $num = $1;
my $proto = $6;
my $compat = $8;
my $name = "$7_$9";
$name = "$7_$11" if $11 ne '';
$name =~ y/a-z/A-Z/;
if($compat eq '' || $compat eq '13' || $compat eq '30' || $compat eq '50') {
print " $name = $num; // $proto\n";
}
}
}
print <<EOF;
)
EOF

50
vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl generated vendored Normal file
View file

@ -0,0 +1,50 @@
#!/usr/bin/env perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generate system call table for OpenBSD from master list
# (for example, /usr/src/sys/kern/syscalls.master).
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $command = "mksysnum_openbsd.pl " . join(' ', @ARGV);
print <<EOF;
// $command
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix
const (
EOF
while(<>){
if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){
my $num = $1;
my $proto = $3;
my $name = $4;
$name =~ y/a-z/A-Z/;
# There are multiple entries for enosys and nosys, so comment them out.
if($name =~ /^SYS_E?NOSYS$/){
$name = "// $name";
}
if($name eq 'SYS_SYS_EXIT'){
$name = 'SYS_EXIT';
}
print " $name = $num; // $proto\n";
}
}
print <<EOF;
)
EOF

View file

@ -133,7 +133,10 @@ var consts = map[string]string{
// locales is taken from
// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
var locales = []struct{ tag, name string }{
{"und", "windows-1252"}, // The default value.
// The default value. Explicitly state latin to benefit from the exact
// script option, while still making 1252 the default encoding for languages
// written in Latin script.
{"und_Latn", "windows-1252"},
{"ar", "windows-1256"},
{"ba", "windows-1251"},
{"be", "windows-1251"},

View file

@ -50,7 +50,7 @@ func LanguageDefault(tag language.Tag) string {
for _, t := range strings.Split(locales, " ") {
tags = append(tags, language.MustParse(t))
}
matcher = language.NewMatcher(tags)
matcher = language.NewMatcher(tags, language.PreferSameScript(true))
})
_, i, _ := matcher.Match(tag)
return canonical[localeMap[i]] // Default is Windows-1252.

View file

@ -313,7 +313,7 @@ var nameMap = map[string]htmlEncoding{
}
var localeMap = []htmlEncoding{
windows1252, // und
windows1252, // und_Latn
windows1256, // ar
windows1251, // ba
windows1251, // be
@ -349,4 +349,4 @@ var localeMap = []htmlEncoding{
big5, // zh-hant
}
const locales = "und ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant"
const locales = "und_Latn ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant"

View file

@ -1,351 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gen
import (
"bytes"
"encoding/gob"
"fmt"
"hash"
"hash/fnv"
"io"
"log"
"os"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
// This file contains utilities for generating code.
// TODO: other write methods like:
// - slices, maps, types, etc.
// CodeWriter is a utility for writing structured code. It computes the content
// hash and size of written content. It ensures there are newlines between
// written code blocks.
type CodeWriter struct {
buf bytes.Buffer
Size int
Hash hash.Hash32 // content hash
gob *gob.Encoder
// For comments we skip the usual one-line separator if they are followed by
// a code block.
skipSep bool
}
func (w *CodeWriter) Write(p []byte) (n int, err error) {
return w.buf.Write(p)
}
// NewCodeWriter returns a new CodeWriter.
func NewCodeWriter() *CodeWriter {
h := fnv.New32()
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
}
// WriteGoFile appends the buffer with the total size of all created structures
// and writes it as a Go file to the the given file with the given package name.
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
f, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer f.Close()
if _, err = w.WriteGo(f, pkg); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteGo appends the buffer with the total size of all created structures and
// writes it as a Go file to the the given writer with the given package name.
func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {
sz := w.Size
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
defer w.buf.Reset()
return WriteGo(out, pkg, w.buf.Bytes())
}
func (w *CodeWriter) printf(f string, x ...interface{}) {
fmt.Fprintf(w, f, x...)
}
func (w *CodeWriter) insertSep() {
if w.skipSep {
w.skipSep = false
return
}
// Use at least two newlines to ensure a blank space between the previous
// block. WriteGoFile will remove extraneous newlines.
w.printf("\n\n")
}
// WriteComment writes a comment block. All line starts are prefixed with "//".
// Initial empty lines are gobbled. The indentation for the first line is
// stripped from consecutive lines.
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
s := fmt.Sprintf(comment, args...)
s = strings.Trim(s, "\n")
// Use at least two newlines to ensure a blank space between the previous
// block. WriteGoFile will remove extraneous newlines.
w.printf("\n\n// ")
w.skipSep = true
// strip first indent level.
sep := "\n"
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
sep += s[:1]
}
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
w.printf("\n")
}
func (w *CodeWriter) writeSizeInfo(size int) {
w.printf("// Size: %d bytes\n", size)
}
// WriteConst writes a constant of the given name and value.
func (w *CodeWriter) WriteConst(name string, x interface{}) {
w.insertSep()
v := reflect.ValueOf(x)
switch v.Type().Kind() {
case reflect.String:
w.printf("const %s %s = ", name, typeName(x))
w.WriteString(v.String())
w.printf("\n")
default:
w.printf("const %s = %#v\n", name, x)
}
}
// WriteVar writes a variable of the given name and value.
func (w *CodeWriter) WriteVar(name string, x interface{}) {
w.insertSep()
v := reflect.ValueOf(x)
oldSize := w.Size
sz := int(v.Type().Size())
w.Size += sz
switch v.Type().Kind() {
case reflect.String:
w.printf("var %s %s = ", name, typeName(x))
w.WriteString(v.String())
case reflect.Struct:
w.gob.Encode(x)
fallthrough
case reflect.Slice, reflect.Array:
w.printf("var %s = ", name)
w.writeValue(v)
w.writeSizeInfo(w.Size - oldSize)
default:
w.printf("var %s %s = ", name, typeName(x))
w.gob.Encode(x)
w.writeValue(v)
w.writeSizeInfo(w.Size - oldSize)
}
w.printf("\n")
}
func (w *CodeWriter) writeValue(v reflect.Value) {
x := v.Interface()
switch v.Kind() {
case reflect.String:
w.WriteString(v.String())
case reflect.Array:
// Don't double count: callers of WriteArray count on the size being
// added, so we need to discount it here.
w.Size -= int(v.Type().Size())
w.writeSlice(x, true)
case reflect.Slice:
w.writeSlice(x, false)
case reflect.Struct:
w.printf("%s{\n", typeName(v.Interface()))
t := v.Type()
for i := 0; i < v.NumField(); i++ {
w.printf("%s: ", t.Field(i).Name)
w.writeValue(v.Field(i))
w.printf(",\n")
}
w.printf("}")
default:
w.printf("%#v", x)
}
}
// WriteString writes a string literal.
func (w *CodeWriter) WriteString(s string) {
s = strings.Replace(s, `\`, `\\`, -1)
io.WriteString(w.Hash, s) // content hash
w.Size += len(s)
const maxInline = 40
if len(s) <= maxInline {
w.printf("%q", s)
return
}
// We will render the string as a multi-line string.
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
// When starting on its own line, go fmt indents line 2+ an extra level.
n, max := maxWidth, maxWidth-4
// As per https://golang.org/issue/18078, the compiler has trouble
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
// for large N. We insert redundant, explicit parentheses to work around
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
// ... + s127) + etc + (etc + ... + sN).
explicitParens, extraComment := len(s) > 128*1024, ""
if explicitParens {
w.printf(`(`)
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
}
// Print "" +\n, if a string does not start on its own line.
b := w.buf.Bytes()
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
n, max = maxWidth, maxWidth
}
w.printf(`"`)
for sz, p, nLines := 0, 0, 0; p < len(s); {
var r rune
r, sz = utf8.DecodeRuneInString(s[p:])
out := s[p : p+sz]
chars := 1
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
switch sz {
case 1:
out = fmt.Sprintf("\\x%02x", s[p])
case 2, 3:
out = fmt.Sprintf("\\u%04x", r)
case 4:
out = fmt.Sprintf("\\U%08x", r)
}
chars = len(out)
}
if n -= chars; n < 0 {
nLines++
if explicitParens && nLines&63 == 63 {
w.printf("\") + (\"")
}
w.printf("\" +\n\"")
n = max - len(out)
}
w.printf("%s", out)
p += sz
}
w.printf(`"`)
if explicitParens {
w.printf(`)`)
}
}
// WriteSlice writes a slice value.
func (w *CodeWriter) WriteSlice(x interface{}) {
w.writeSlice(x, false)
}
// WriteArray writes an array value.
func (w *CodeWriter) WriteArray(x interface{}) {
w.writeSlice(x, true)
}
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
v := reflect.ValueOf(x)
w.gob.Encode(v.Len())
w.Size += v.Len() * int(v.Type().Elem().Size())
name := typeName(x)
if isArray {
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
}
if isArray {
w.printf("%s{\n", name)
} else {
w.printf("%s{ // %d elements\n", name, v.Len())
}
switch kind := v.Type().Elem().Kind(); kind {
case reflect.String:
for _, s := range x.([]string) {
w.WriteString(s)
w.printf(",\n")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// nLine and nBlock are the number of elements per line and block.
nLine, nBlock, format := 8, 64, "%d,"
switch kind {
case reflect.Uint8:
format = "%#02x,"
case reflect.Uint16:
format = "%#04x,"
case reflect.Uint32:
nLine, nBlock, format = 4, 32, "%#08x,"
case reflect.Uint, reflect.Uint64:
nLine, nBlock, format = 4, 32, "%#016x,"
case reflect.Int8:
nLine = 16
}
n := nLine
for i := 0; i < v.Len(); i++ {
if i%nBlock == 0 && v.Len() > nBlock {
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
}
x := v.Index(i).Interface()
w.gob.Encode(x)
w.printf(format, x)
if n--; n == 0 {
n = nLine
w.printf("\n")
}
}
w.printf("\n")
case reflect.Struct:
zero := reflect.Zero(v.Type().Elem()).Interface()
for i := 0; i < v.Len(); i++ {
x := v.Index(i).Interface()
w.gob.EncodeValue(v)
if !reflect.DeepEqual(zero, x) {
line := fmt.Sprintf("%#v,\n", x)
line = line[strings.IndexByte(line, '{'):]
w.printf("%d: ", i)
w.printf(line)
}
}
case reflect.Array:
for i := 0; i < v.Len(); i++ {
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
}
default:
panic("gen: slice elem type not supported")
}
w.printf("}")
}
// WriteType writes a definition of the type of the given value and returns the
// type name.
func (w *CodeWriter) WriteType(x interface{}) string {
t := reflect.TypeOf(x)
w.printf("type %s struct {\n", t.Name())
for i := 0; i < t.NumField(); i++ {
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
}
w.printf("}\n")
return t.Name()
}
// typeName returns the name of the go type of x.
func typeName(x interface{}) string {
t := reflect.ValueOf(x).Type()
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
}

View file

@ -1,281 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gen contains common code for the various code generation tools in the
// text repository. Its usage ensures consistency between tools.
//
// This package defines command line flags that are common to most generation
// tools. The flags allow for specifying specific Unicode and CLDR versions
// in the public Unicode data repository (http://www.unicode.org/Public).
//
// A local Unicode data mirror can be set through the flag -local or the
// environment variable UNICODE_DIR. The former takes precedence. The local
// directory should follow the same structure as the public repository.
//
// IANA data can also optionally be mirrored by putting it in the iana directory
// rooted at the top of the local mirror. Beware, though, that IANA data is not
// versioned. So it is up to the developer to use the right version.
package gen // import "golang.org/x/text/internal/gen"
import (
"bytes"
"flag"
"fmt"
"go/build"
"go/format"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"sync"
"unicode"
"golang.org/x/text/unicode/cldr"
)
var (
url = flag.String("url",
"http://www.unicode.org/Public",
"URL of Unicode database directory")
iana = flag.String("iana",
"http://www.iana.org",
"URL of the IANA repository")
unicodeVersion = flag.String("unicode",
getEnv("UNICODE_VERSION", unicode.Version),
"unicode version to use")
cldrVersion = flag.String("cldr",
getEnv("CLDR_VERSION", cldr.Version),
"cldr version to use")
)
func getEnv(name, def string) string {
if v := os.Getenv(name); v != "" {
return v
}
return def
}
// Init performs common initialization for a gen command. It parses the flags
// and sets up the standard logging parameters.
func Init() {
log.SetPrefix("")
log.SetFlags(log.Lshortfile)
flag.Parse()
}
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package %s
`
// UnicodeVersion reports the requested Unicode version.
func UnicodeVersion() string {
return *unicodeVersion
}
// UnicodeVersion reports the requested CLDR version.
func CLDRVersion() string {
return *cldrVersion
}
// IsLocal reports whether data files are available locally.
func IsLocal() bool {
dir, err := localReadmeFile()
if err != nil {
return false
}
if _, err = os.Stat(dir); err != nil {
return false
}
return true
}
// OpenUCDFile opens the requested UCD file. The file is specified relative to
// the public Unicode root directory. It will call log.Fatal if there are any
// errors.
func OpenUCDFile(file string) io.ReadCloser {
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
}
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
// are any errors.
func OpenCLDRCoreZip() io.ReadCloser {
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
}
// OpenUnicodeFile opens the requested file of the requested category from the
// root of the Unicode data archive. The file is specified relative to the
// public Unicode root directory. If version is "", it will use the default
// Unicode version. It will call log.Fatal if there are any errors.
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
if version == "" {
version = UnicodeVersion()
}
return openUnicode(path.Join(category, version, file))
}
// OpenIANAFile opens the requested IANA file. The file is specified relative
// to the IANA root, which is typically either http://www.iana.org or the
// iana directory in the local mirror. It will call log.Fatal if there are any
// errors.
func OpenIANAFile(path string) io.ReadCloser {
return Open(*iana, "iana", path)
}
var (
dirMutex sync.Mutex
localDir string
)
const permissions = 0755
func localReadmeFile() (string, error) {
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
if err != nil {
return "", fmt.Errorf("Could not locate package: %v", err)
}
return filepath.Join(p.Dir, "DATA", "README"), nil
}
func getLocalDir() string {
dirMutex.Lock()
defer dirMutex.Unlock()
readme, err := localReadmeFile()
if err != nil {
log.Fatal(err)
}
dir := filepath.Dir(readme)
if _, err := os.Stat(readme); err != nil {
if err := os.MkdirAll(dir, permissions); err != nil {
log.Fatalf("Could not create directory: %v", err)
}
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
}
return dir
}
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
This directory contains downloaded files used to generate the various tables
in the golang.org/x/text subrepo.
Note that the language subtag repo (iana/assignments/language-subtag-registry)
and all other times in the iana subdirectory are not versioned and will need
to be periodically manually updated. The easiest way to do this is to remove
the entire iana directory. This is mostly of concern when updating the language
package.
`
// Open opens subdir/path if a local directory is specified and the file exists,
// where subdir is a directory relative to the local root, or fetches it from
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
func Open(urlRoot, subdir, path string) io.ReadCloser {
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
return open(file, urlRoot, path)
}
func openUnicode(path string) io.ReadCloser {
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
return open(file, *url, path)
}
// TODO: automatically periodically update non-versioned files.
func open(file, urlRoot, path string) io.ReadCloser {
if f, err := os.Open(file); err == nil {
return f
}
r := get(urlRoot, path)
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
log.Fatalf("Could not download file: %v", err)
}
os.MkdirAll(filepath.Dir(file), permissions)
if err := ioutil.WriteFile(file, b, permissions); err != nil {
log.Fatalf("Could not create file: %v", err)
}
return ioutil.NopCloser(bytes.NewReader(b))
}
func get(root, path string) io.ReadCloser {
url := root + "/" + path
fmt.Printf("Fetching %s...", url)
defer fmt.Println(" done.")
resp, err := http.Get(url)
if err != nil {
log.Fatalf("HTTP GET: %v", err)
}
if resp.StatusCode != 200 {
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
}
return resp.Body
}
// TODO: use Write*Version in all applicable packages.
// WriteUnicodeVersion writes a constant for the Unicode version from which the
// tables are generated.
func WriteUnicodeVersion(w io.Writer) {
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
}
// WriteCLDRVersion writes a constant for the CLDR version from which the
// tables are generated.
func WriteCLDRVersion(w io.Writer) {
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
}
// WriteGoFile prepends a standard file comment and package statement to the
// given bytes, applies gofmt, and writes them to a file with the given name.
// It will call log.Fatal if there are any errors.
func WriteGoFile(filename, pkg string, b []byte) {
w, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer w.Close()
if _, err = WriteGo(w, pkg, b); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteGo prepends a standard file comment and package statement to the given
// bytes, applies gofmt, and writes them to w.
func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) {
src := []byte(fmt.Sprintf(header, pkg))
src = append(src, b...)
formatted, err := format.Source(src)
if err != nil {
// Print the generated code even in case of an error so that the
// returned error can be meaningfully interpreted.
n, _ = w.Write(src)
return n, err
}
return w.Write(formatted)
}
// Repackage rewrites a Go file from belonging to package main to belonging to
// the given package.
func Repackage(inFile, outFile, pkg string) {
src, err := ioutil.ReadFile(inFile)
if err != nil {
log.Fatalf("reading %s: %v", inFile, err)
}
const toDelete = "package main\n\n"
i := bytes.Index(src, []byte(toDelete))
if i < 0 {
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
}
w := &bytes.Buffer{}
w.Write(src[i+len(toDelete):])
WriteGoFile(outFile, pkg, w.Bytes())
}

View file

@ -1,58 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package triegen
// This file defines Compacter and its implementations.
import "io"
// A Compacter generates an alternative, more space-efficient way to store a
// trie value block. A trie value block holds all possible values for the last
// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block
// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0).
type Compacter interface {
// Size returns whether the Compacter could encode the given block as well
// as its size in case it can. len(v) is always 64.
Size(v []uint64) (sz int, ok bool)
// Store stores the block using the Compacter's compression method.
// It returns a handle with which the block can be retrieved.
// len(v) is always 64.
Store(v []uint64) uint32
// Print writes the data structures associated to the given store to w.
Print(w io.Writer) error
// Handler returns the name of a function that gets called during trie
// lookup for blocks generated by the Compacter. The function should be of
// the form func (n uint32, b byte) uint64, where n is the index returned by
// the Compacter's Store method and b is the last byte of the UTF-8
// encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the
// block.
Handler() string
}
// simpleCompacter is the default Compacter used by builder. It implements a
// normal trie block.
type simpleCompacter builder
func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) {
return blockSize * b.ValueSize, true
}
func (b *simpleCompacter) Store(v []uint64) uint32 {
h := uint32(len(b.ValueBlocks) - blockOffset)
b.ValueBlocks = append(b.ValueBlocks, v)
return h
}
func (b *simpleCompacter) Print(io.Writer) error {
// Structures are printed in print.go.
return nil
}
func (b *simpleCompacter) Handler() string {
panic("Handler should be special-cased for this Compacter")
}

View file

@ -1,251 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package triegen
import (
"bytes"
"fmt"
"io"
"strings"
"text/template"
)
// print writes all the data structures as well as the code necessary to use the
// trie to w.
func (b *builder) print(w io.Writer) error {
b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize
b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize
b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize
b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize
b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize
// If we only have one root trie, all starter blocks are at position 0 and
// we can access the arrays directly.
if len(b.Trie) == 1 {
// At this point we cannot refer to the generated tables directly.
b.ASCIIBlock = b.Name + "Values"
b.StarterBlock = b.Name + "Index"
} else {
// Otherwise we need to have explicit starter indexes in the trie
// structure.
b.ASCIIBlock = "t.ascii"
b.StarterBlock = "t.utf8Start"
}
b.SourceType = "[]byte"
if err := lookupGen.Execute(w, b); err != nil {
return err
}
b.SourceType = "string"
if err := lookupGen.Execute(w, b); err != nil {
return err
}
if err := trieGen.Execute(w, b); err != nil {
return err
}
for _, c := range b.Compactions {
if err := c.c.Print(w); err != nil {
return err
}
}
return nil
}
func printValues(n int, values []uint64) string {
w := &bytes.Buffer{}
boff := n * blockSize
fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff)
var newline bool
for i, v := range values {
if i%6 == 0 {
newline = true
}
if v != 0 {
if newline {
fmt.Fprintf(w, "\n")
newline = false
}
fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v)
}
}
return w.String()
}
func printIndex(b *builder, nr int, n *node) string {
w := &bytes.Buffer{}
boff := nr * blockSize
fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff)
var newline bool
for i, c := range n.children {
if i%8 == 0 {
newline = true
}
if c != nil {
v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index)
if v != 0 {
if newline {
fmt.Fprintf(w, "\n")
newline = false
}
fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v)
}
}
}
return w.String()
}
var (
trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{
"printValues": printValues,
"printIndex": printIndex,
"title": strings.Title,
"dec": func(x int) int { return x - 1 },
"psize": func(n int) string {
return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024)
},
}).Parse(trieTemplate))
lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate))
)
// TODO: consider the return type of lookup. It could be uint64, even if the
// internal value type is smaller. We will have to verify this with the
// performance of unicode/norm, which is very sensitive to such changes.
const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}}
// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}.
type {{.Name}}Trie struct { {{if $multi}}
ascii []{{.ValueType}} // index for ASCII bytes
utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0
{{end}}}
func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}}
h := {{.Name}}TrieHandles[i]
return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] }
}
type {{.Name}}TrieHandle struct {
ascii, multi {{.IndexType}}
}
// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes
var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{
{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}}
{{end}}}{{else}}
return &{{.Name}}Trie{}
}
{{end}}
// lookupValue determines the type of block n and looks up the value for b.
func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} {
switch { {{range $i, $c := .Compactions}}
{{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}}
n -= {{$c.Offset}}{{end}}
return {{print $b.ValueType}}({{$c.Handler}}){{end}}
}
}
// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes
// The third block is the zero block.
var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} {
{{range $i, $v := .ValueBlocks}}{{printValues $i $v}}
{{end}}}
// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes
// Block 0 is the zero block.
var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} {
{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}}
{{end}}}
`
// TODO: consider allowing zero-length strings after evaluating performance with
// unicode/norm.
const lookupTemplate = `
// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and
// the width in bytes of this encoding. The size will be 0 if s does not
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) {
c0 := s[0]
switch {
case c0 < 0x80: // is ASCII
return {{.ASCIIBlock}}[c0], 1
case c0 < 0xC2:
return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
case c0 < 0xE0: // 2-byte UTF-8
if len(s) < 2 {
return 0, 0
}
i := {{.StarterBlock}}[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return 0, 1 // Illegal UTF-8: not a continuation byte.
}
return t.lookupValue(uint32(i), c1), 2
case c0 < 0xF0: // 3-byte UTF-8
if len(s) < 3 {
return 0, 0
}
i := {{.StarterBlock}}[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return 0, 1 // Illegal UTF-8: not a continuation byte.
}
o := uint32(i)<<6 + uint32(c1)
i = {{.Name}}Index[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return 0, 2 // Illegal UTF-8: not a continuation byte.
}
return t.lookupValue(uint32(i), c2), 3
case c0 < 0xF8: // 4-byte UTF-8
if len(s) < 4 {
return 0, 0
}
i := {{.StarterBlock}}[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return 0, 1 // Illegal UTF-8: not a continuation byte.
}
o := uint32(i)<<6 + uint32(c1)
i = {{.Name}}Index[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return 0, 2 // Illegal UTF-8: not a continuation byte.
}
o = uint32(i)<<6 + uint32(c2)
i = {{.Name}}Index[o]
c3 := s[3]
if c3 < 0x80 || 0xC0 <= c3 {
return 0, 3 // Illegal UTF-8: not a continuation byte.
}
return t.lookupValue(uint32(i), c3), 4
}
// Illegal rune
return 0, 1
}
// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s.
// s must start with a full and valid UTF-8 encoded rune.
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} {
c0 := s[0]
if c0 < 0x80 { // is ASCII
return {{.ASCIIBlock}}[c0]
}
i := {{.StarterBlock}}[c0]
if c0 < 0xE0 { // 2-byte UTF-8
return t.lookupValue(uint32(i), s[1])
}
i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])]
if c0 < 0xF0 { // 3-byte UTF-8
return t.lookupValue(uint32(i), s[2])
}
i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])]
if c0 < 0xF8 { // 4-byte UTF-8
return t.lookupValue(uint32(i), s[3])
}
return 0
}
`

View file

@ -1,494 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package triegen implements a code generator for a trie for associating
// unsigned integer values with UTF-8 encoded runes.
//
// Many of the go.text packages use tries for storing per-rune information. A
// trie is especially useful if many of the runes have the same value. If this
// is the case, many blocks can be expected to be shared allowing for
// information on many runes to be stored in little space.
//
// As most of the lookups are done directly on []byte slices, the tries use the
// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to
// runes and contributes a little bit to better performance. It also naturally
// provides a fast path for ASCII.
//
// Space is also an issue. There are many code points defined in Unicode and as
// a result tables can get quite large. So every byte counts. The triegen
// package automatically chooses the smallest integer values to represent the
// tables. Compacters allow further compression of the trie by allowing for
// alternative representations of individual trie blocks.
//
// triegen allows generating multiple tries as a single structure. This is
// useful when, for example, one wants to generate tries for several languages
// that have a lot of values in common. Some existing libraries for
// internationalization store all per-language data as a dynamically loadable
// chunk. The go.text packages are designed with the assumption that the user
// typically wants to compile in support for all supported languages, in line
// with the approach common to Go to create a single standalone binary. The
// multi-root trie approach can give significant storage savings in this
// scenario.
//
// triegen generates both tables and code. The code is optimized to use the
// automatically chosen data types. The following code is generated for a Trie
// or multiple Tries named "foo":
// - type fooTrie
// The trie type.
//
// - func newFooTrie(x int) *fooTrie
// Trie constructor, where x is the index of the trie passed to Gen.
//
// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int)
// The lookup method, where uintX is automatically chosen.
//
// - func lookupString, lookupUnsafe and lookupStringUnsafe
// Variants of the above.
//
// - var fooValues and fooIndex and any tables generated by Compacters.
// The core trie data.
//
// - var fooTrieHandles
// Indexes of starter blocks in case of multiple trie roots.
//
// It is recommended that users test the generated trie by checking the returned
// value for every rune. Such exhaustive tests are possible as the the number of
// runes in Unicode is limited.
package triegen // import "golang.org/x/text/internal/triegen"
// TODO: Arguably, the internally optimized data types would not have to be
// exposed in the generated API. We could also investigate not generating the
// code, but using it through a package. We would have to investigate the impact
// on performance of making such change, though. For packages like unicode/norm,
// small changes like this could tank performance.
import (
"encoding/binary"
"fmt"
"hash/crc64"
"io"
"log"
"unicode/utf8"
)
// builder builds a set of tries for associating values with runes. The set of
// tries can share common index and value blocks.
type builder struct {
Name string
// ValueType is the type of the trie values looked up.
ValueType string
// ValueSize is the byte size of the ValueType.
ValueSize int
// IndexType is the type of trie index values used for all UTF-8 bytes of
// a rune except the last one.
IndexType string
// IndexSize is the byte size of the IndexType.
IndexSize int
// SourceType is used when generating the lookup functions. If the user
// requests StringSupport, all lookup functions will be generated for
// string input as well.
SourceType string
Trie []*Trie
IndexBlocks []*node
ValueBlocks [][]uint64
Compactions []compaction
Checksum uint64
ASCIIBlock string
StarterBlock string
indexBlockIdx map[uint64]int
valueBlockIdx map[uint64]nodeIndex
asciiBlockIdx map[uint64]int
// Stats are used to fill out the template.
Stats struct {
NValueEntries int
NValueBytes int
NIndexEntries int
NIndexBytes int
NHandleBytes int
}
err error
}
// A nodeIndex encodes the index of a node, which is defined by the compaction
// which stores it and an index within the compaction. For internal nodes, the
// compaction is always 0.
type nodeIndex struct {
compaction int
index int
}
// compaction keeps track of stats used for the compaction.
type compaction struct {
c Compacter
blocks []*node
maxHandle uint32
totalSize int
// Used by template-based generator and thus exported.
Cutoff uint32
Offset uint32
Handler string
}
func (b *builder) setError(err error) {
if b.err == nil {
b.err = err
}
}
// An Option can be passed to Gen.
type Option func(b *builder) error
// Compact configures the trie generator to use the given Compacter.
func Compact(c Compacter) Option {
return func(b *builder) error {
b.Compactions = append(b.Compactions, compaction{
c: c,
Handler: c.Handler() + "(n, b)"})
return nil
}
}
// Gen writes Go code for a shared trie lookup structure to w for the given
// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will
// return the *nameTrie for tries[x]. A value can be looked up by using one of
// the various lookup methods defined on nameTrie. It returns the table size of
// the generated trie.
func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) {
// The index contains two dummy blocks, followed by the zero block. The zero
// block is at offset 0x80, so that the offset for the zero block for
// continuation bytes is 0.
b := &builder{
Name: name,
Trie: tries,
IndexBlocks: []*node{{}, {}, {}},
Compactions: []compaction{{
Handler: name + "Values[n<<6+uint32(b)]",
}},
// The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero
// block.
indexBlockIdx: map[uint64]int{0: 0},
valueBlockIdx: map[uint64]nodeIndex{0: {}},
asciiBlockIdx: map[uint64]int{},
}
b.Compactions[0].c = (*simpleCompacter)(b)
for _, f := range opts {
if err := f(b); err != nil {
return 0, err
}
}
b.build()
if b.err != nil {
return 0, b.err
}
if err = b.print(w); err != nil {
return 0, err
}
return b.Size(), nil
}
// A Trie represents a single root node of a trie. A builder may build several
// overlapping tries at once.
type Trie struct {
root *node
hiddenTrie
}
// hiddenTrie contains values we want to be visible to the template generator,
// but hidden from the API documentation.
type hiddenTrie struct {
Name string
Checksum uint64
ASCIIIndex int
StarterIndex int
}
// NewTrie returns a new trie root.
func NewTrie(name string) *Trie {
return &Trie{
&node{
children: make([]*node, blockSize),
values: make([]uint64, utf8.RuneSelf),
},
hiddenTrie{Name: name},
}
}
// Gen is a convenience wrapper around the Gen func passing t as the only trie
// and uses the name passed to NewTrie. It returns the size of the generated
// tables.
func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) {
return Gen(w, t.Name, []*Trie{t}, opts...)
}
// node is a node of the intermediate trie structure.
type node struct {
// children holds this node's children. It is always of length 64.
// A child node may be nil.
children []*node
// values contains the values of this node. If it is non-nil, this node is
// either a root or leaf node:
// For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F].
// For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF].
values []uint64
index nodeIndex
}
// Insert associates value with the given rune. Insert will panic if a non-zero
// value is passed for an invalid rune.
func (t *Trie) Insert(r rune, value uint64) {
if value == 0 {
return
}
s := string(r)
if []rune(s)[0] != r && value != 0 {
// Note: The UCD tables will always assign what amounts to a zero value
// to a surrogate. Allowing a zero value for an illegal rune allows
// users to iterate over [0..MaxRune] without having to explicitly
// exclude surrogates, which would be tedious.
panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r))
}
if len(s) == 1 {
// It is a root node value (ASCII).
t.root.values[s[0]] = value
return
}
n := t.root
for ; len(s) > 1; s = s[1:] {
if n.children == nil {
n.children = make([]*node, blockSize)
}
p := s[0] % blockSize
c := n.children[p]
if c == nil {
c = &node{}
n.children[p] = c
}
if len(s) > 2 && c.values != nil {
log.Fatalf("triegen: insert(%U): found internal node with values", r)
}
n = c
}
if n.values == nil {
n.values = make([]uint64, blockSize)
}
if n.children != nil {
log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r)
}
n.values[s[0]-0x80] = value
}
// Size returns the number of bytes the generated trie will take to store. It
// needs to be exported as it is used in the templates.
func (b *builder) Size() int {
// Index blocks.
sz := len(b.IndexBlocks) * blockSize * b.IndexSize
// Skip the first compaction, which represents the normal value blocks, as
// its totalSize does not account for the ASCII blocks, which are managed
// separately.
sz += len(b.ValueBlocks) * blockSize * b.ValueSize
for _, c := range b.Compactions[1:] {
sz += c.totalSize
}
// TODO: this computation does not account for the fixed overhead of a using
// a compaction, either code or data. As for data, though, the typical
// overhead of data is in the order of bytes (2 bytes for cases). Further,
// the savings of using a compaction should anyway be substantial for it to
// be worth it.
// For multi-root tries, we also need to account for the handles.
if len(b.Trie) > 1 {
sz += 2 * b.IndexSize * len(b.Trie)
}
return sz
}
func (b *builder) build() {
// Compute the sizes of the values.
var vmax uint64
for _, t := range b.Trie {
vmax = maxValue(t.root, vmax)
}
b.ValueType, b.ValueSize = getIntType(vmax)
// Compute all block allocations.
// TODO: first compute the ASCII blocks for all tries and then the other
// nodes. ASCII blocks are more restricted in placement, as they require two
// blocks to be placed consecutively. Processing them first may improve
// sharing (at least one zero block can be expected to be saved.)
for _, t := range b.Trie {
b.Checksum += b.buildTrie(t)
}
// Compute the offsets for all the Compacters.
offset := uint32(0)
for i := range b.Compactions {
c := &b.Compactions[i]
c.Offset = offset
offset += c.maxHandle + 1
c.Cutoff = offset
}
// Compute the sizes of indexes.
// TODO: different byte positions could have different sizes. So far we have
// not found a case where this is beneficial.
imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff)
for _, ib := range b.IndexBlocks {
if x := uint64(ib.index.index); x > imax {
imax = x
}
}
b.IndexType, b.IndexSize = getIntType(imax)
}
func maxValue(n *node, max uint64) uint64 {
if n == nil {
return max
}
for _, c := range n.children {
max = maxValue(c, max)
}
for _, v := range n.values {
if max < v {
max = v
}
}
return max
}
func getIntType(v uint64) (string, int) {
switch {
case v < 1<<8:
return "uint8", 1
case v < 1<<16:
return "uint16", 2
case v < 1<<32:
return "uint32", 4
}
return "uint64", 8
}
const (
blockSize = 64
// Subtract two blocks to offset 0x80, the first continuation byte.
blockOffset = 2
// Subtract three blocks to offset 0xC0, the first non-ASCII starter.
rootBlockOffset = 3
)
var crcTable = crc64.MakeTable(crc64.ISO)
func (b *builder) buildTrie(t *Trie) uint64 {
n := t.root
// Get the ASCII offset. For the first trie, the ASCII block will be at
// position 0.
hasher := crc64.New(crcTable)
binary.Write(hasher, binary.BigEndian, n.values)
hash := hasher.Sum64()
v, ok := b.asciiBlockIdx[hash]
if !ok {
v = len(b.ValueBlocks)
b.asciiBlockIdx[hash] = v
b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:])
if v == 0 {
// Add the zero block at position 2 so that it will be assigned a
// zero reference in the lookup blocks.
// TODO: always do this? This would allow us to remove a check from
// the trie lookup, but at the expense of extra space. Analyze
// performance for unicode/norm.
b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize))
}
}
t.ASCIIIndex = v
// Compute remaining offsets.
t.Checksum = b.computeOffsets(n, true)
// We already subtracted the normal blockOffset from the index. Subtract the
// difference for starter bytes.
t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset)
return t.Checksum
}
func (b *builder) computeOffsets(n *node, root bool) uint64 {
// For the first trie, the root lookup block will be at position 3, which is
// the offset for UTF-8 non-ASCII starter bytes.
first := len(b.IndexBlocks) == rootBlockOffset
if first {
b.IndexBlocks = append(b.IndexBlocks, n)
}
// We special-case the cases where all values recursively are 0. This allows
// for the use of a zero block to which all such values can be directed.
hash := uint64(0)
if n.children != nil || n.values != nil {
hasher := crc64.New(crcTable)
for _, c := range n.children {
var v uint64
if c != nil {
v = b.computeOffsets(c, false)
}
binary.Write(hasher, binary.BigEndian, v)
}
binary.Write(hasher, binary.BigEndian, n.values)
hash = hasher.Sum64()
}
if first {
b.indexBlockIdx[hash] = rootBlockOffset - blockOffset
}
// Compacters don't apply to internal nodes.
if n.children != nil {
v, ok := b.indexBlockIdx[hash]
if !ok {
v = len(b.IndexBlocks) - blockOffset
b.IndexBlocks = append(b.IndexBlocks, n)
b.indexBlockIdx[hash] = v
}
n.index = nodeIndex{0, v}
} else {
h, ok := b.valueBlockIdx[hash]
if !ok {
bestI, bestSize := 0, blockSize*b.ValueSize
for i, c := range b.Compactions[1:] {
if sz, ok := c.c.Size(n.values); ok && bestSize > sz {
bestI, bestSize = i+1, sz
}
}
c := &b.Compactions[bestI]
c.totalSize += bestSize
v := c.c.Store(n.values)
if c.maxHandle < v {
c.maxHandle = v
}
h = nodeIndex{bestI, int(v)}
b.valueBlockIdx[hash] = h
}
n.index = h
}
return hash
}

View file

@ -1,376 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ucd provides a parser for Unicode Character Database files, the
// format of which is defined in http://www.unicode.org/reports/tr44/. See
// http://www.unicode.org/Public/UCD/latest/ucd/ for example files.
//
// It currently does not support substitutions of missing fields.
package ucd // import "golang.org/x/text/internal/ucd"
import (
"bufio"
"bytes"
"errors"
"io"
"log"
"regexp"
"strconv"
"strings"
)
// UnicodeData.txt fields.
const (
CodePoint = iota
Name
GeneralCategory
CanonicalCombiningClass
BidiClass
DecompMapping
DecimalValue
DigitValue
NumericValue
BidiMirrored
Unicode1Name
ISOComment
SimpleUppercaseMapping
SimpleLowercaseMapping
SimpleTitlecaseMapping
)
// Parse calls f for each entry in the given reader of a UCD file. It will close
// the reader upon return. It will call log.Fatal if any error occurred.
//
// This implements the most common usage pattern of using Parser.
func Parse(r io.ReadCloser, f func(p *Parser)) {
defer r.Close()
p := New(r)
for p.Next() {
f(p)
}
if err := p.Err(); err != nil {
r.Close() // os.Exit will cause defers not to be called.
log.Fatal(err)
}
}
// An Option is used to configure a Parser.
type Option func(p *Parser)
func keepRanges(p *Parser) {
p.keepRanges = true
}
var (
// KeepRanges prevents the expansion of ranges. The raw ranges can be
// obtained by calling Range(0) on the parser.
KeepRanges Option = keepRanges
)
// The Part option register a handler for lines starting with a '@'. The text
// after a '@' is available as the first field. Comments are handled as usual.
func Part(f func(p *Parser)) Option {
return func(p *Parser) {
p.partHandler = f
}
}
// The CommentHandler option passes comments that are on a line by itself to
// a given handler.
func CommentHandler(f func(s string)) Option {
return func(p *Parser) {
p.commentHandler = f
}
}
// A Parser parses Unicode Character Database (UCD) files.
type Parser struct {
scanner *bufio.Scanner
keepRanges bool // Don't expand rune ranges in field 0.
err error
comment []byte
field [][]byte
// parsedRange is needed in case Range(0) is called more than once for one
// field. In some cases this requires scanning ahead.
parsedRange bool
rangeStart, rangeEnd rune
partHandler func(p *Parser)
commentHandler func(s string)
}
func (p *Parser) setError(err error) {
if p.err == nil {
p.err = err
}
}
func (p *Parser) getField(i int) []byte {
if i >= len(p.field) {
return nil
}
return p.field[i]
}
// Err returns a non-nil error if any error occurred during parsing.
func (p *Parser) Err() error {
return p.err
}
// New returns a Parser for the given Reader.
func New(r io.Reader, o ...Option) *Parser {
p := &Parser{
scanner: bufio.NewScanner(r),
}
for _, f := range o {
f(p)
}
return p
}
// Next parses the next line in the file. It returns true if a line was parsed
// and false if it reached the end of the file.
func (p *Parser) Next() bool {
if !p.keepRanges && p.rangeStart < p.rangeEnd {
p.rangeStart++
return true
}
p.comment = nil
p.field = p.field[:0]
p.parsedRange = false
for p.scanner.Scan() {
b := p.scanner.Bytes()
if len(b) == 0 {
continue
}
if b[0] == '#' {
if p.commentHandler != nil {
p.commentHandler(strings.TrimSpace(string(b[1:])))
}
continue
}
// Parse line
if i := bytes.IndexByte(b, '#'); i != -1 {
p.comment = bytes.TrimSpace(b[i+1:])
b = b[:i]
}
if b[0] == '@' {
if p.partHandler != nil {
p.field = append(p.field, bytes.TrimSpace(b[1:]))
p.partHandler(p)
p.field = p.field[:0]
}
p.comment = nil
continue
}
for {
i := bytes.IndexByte(b, ';')
if i == -1 {
p.field = append(p.field, bytes.TrimSpace(b))
break
}
p.field = append(p.field, bytes.TrimSpace(b[:i]))
b = b[i+1:]
}
if !p.keepRanges {
p.rangeStart, p.rangeEnd = p.getRange(0)
}
return true
}
p.setError(p.scanner.Err())
return false
}
func parseRune(b []byte) (rune, error) {
if len(b) > 2 && b[0] == 'U' && b[1] == '+' {
b = b[2:]
}
x, err := strconv.ParseUint(string(b), 16, 32)
return rune(x), err
}
func (p *Parser) parseRune(b []byte) rune {
x, err := parseRune(b)
p.setError(err)
return x
}
// Rune parses and returns field i as a rune.
func (p *Parser) Rune(i int) rune {
if i > 0 || p.keepRanges {
return p.parseRune(p.getField(i))
}
return p.rangeStart
}
// Runes interprets and returns field i as a sequence of runes.
func (p *Parser) Runes(i int) (runes []rune) {
add := func(b []byte) {
if b = bytes.TrimSpace(b); len(b) > 0 {
runes = append(runes, p.parseRune(b))
}
}
for b := p.getField(i); ; {
i := bytes.IndexByte(b, ' ')
if i == -1 {
add(b)
break
}
add(b[:i])
b = b[i+1:]
}
return
}
var (
errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>")
// reRange matches one line of a legacy rune range.
reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$")
)
// Range parses and returns field i as a rune range. A range is inclusive at
// both ends. If the field only has one rune, first and last will be identical.
// It supports the legacy format for ranges used in UnicodeData.txt.
func (p *Parser) Range(i int) (first, last rune) {
if !p.keepRanges {
return p.rangeStart, p.rangeStart
}
return p.getRange(i)
}
func (p *Parser) getRange(i int) (first, last rune) {
b := p.getField(i)
if k := bytes.Index(b, []byte("..")); k != -1 {
return p.parseRune(b[:k]), p.parseRune(b[k+2:])
}
// The first field may not be a rune, in which case we may ignore any error
// and set the range as 0..0.
x, err := parseRune(b)
if err != nil {
// Disable range parsing henceforth. This ensures that an error will be
// returned if the user subsequently will try to parse this field as
// a Rune.
p.keepRanges = true
}
// Special case for UnicodeData that was retained for backwards compatibility.
if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) {
if p.parsedRange {
return p.rangeStart, p.rangeEnd
}
mf := reRange.FindStringSubmatch(p.scanner.Text())
if mf == nil || !p.scanner.Scan() {
p.setError(errIncorrectLegacyRange)
return x, x
}
// Using Bytes would be more efficient here, but Text is a lot easier
// and this is not a frequent case.
ml := reRange.FindStringSubmatch(p.scanner.Text())
if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] {
p.setError(errIncorrectLegacyRange)
return x, x
}
p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])])
p.parsedRange = true
return p.rangeStart, p.rangeEnd
}
return x, x
}
// bools recognizes all valid UCD boolean values.
var bools = map[string]bool{
"": false,
"N": false,
"No": false,
"F": false,
"False": false,
"Y": true,
"Yes": true,
"T": true,
"True": true,
}
// Bool parses and returns field i as a boolean value.
func (p *Parser) Bool(i int) bool {
b := p.getField(i)
for s, v := range bools {
if bstrEq(b, s) {
return v
}
}
p.setError(strconv.ErrSyntax)
return false
}
// Int parses and returns field i as an integer value.
func (p *Parser) Int(i int) int {
x, err := strconv.ParseInt(string(p.getField(i)), 10, 64)
p.setError(err)
return int(x)
}
// Uint parses and returns field i as an unsigned integer value.
func (p *Parser) Uint(i int) uint {
x, err := strconv.ParseUint(string(p.getField(i)), 10, 64)
p.setError(err)
return uint(x)
}
// Float parses and returns field i as a decimal value.
func (p *Parser) Float(i int) float64 {
x, err := strconv.ParseFloat(string(p.getField(i)), 64)
p.setError(err)
return x
}
// String parses and returns field i as a string value.
func (p *Parser) String(i int) string {
return string(p.getField(i))
}
// Strings parses and returns field i as a space-separated list of strings.
func (p *Parser) Strings(i int) []string {
ss := strings.Split(string(p.getField(i)), " ")
for i, s := range ss {
ss[i] = strings.TrimSpace(s)
}
return ss
}
// Comment returns the comments for the current line.
func (p *Parser) Comment() string {
return string(p.comment)
}
var errUndefinedEnum = errors.New("ucd: undefined enum value")
// Enum interprets and returns field i as a value that must be one of the values
// in enum.
func (p *Parser) Enum(i int, enum ...string) string {
b := p.getField(i)
for _, s := range enum {
if bstrEq(b, s) {
return s
}
}
p.setError(errUndefinedEnum)
return ""
}
func bstrEq(b []byte, s string) bool {
if len(b) != len(s) {
return false
}
for i, c := range b {
if c != s[i] {
return false
}
}
return true
}

16
vendor/golang.org/x/text/language/Makefile generated vendored Normal file
View file

@ -0,0 +1,16 @@
# Copyright 2013 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
CLEANFILES+=maketables
maketables: maketables.go
go build $^
tables: maketables
./maketables > tables.go
gofmt -w -s tables.go
# Build (but do not run) maketables during testing,
# just to make sure it still compiles.
testshort: maketables

102
vendor/golang.org/x/text/language/doc.go generated vendored Normal file
View file

@ -0,0 +1,102 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package language implements BCP 47 language tags and related functionality.
//
// The most important function of package language is to match a list of
// user-preferred languages to a list of supported languages.
// It alleviates the developer of dealing with the complexity of this process
// and provides the user with the best experience
// (see https://blog.golang.org/matchlang).
//
//
// Matching preferred against supported languages
//
// A Matcher for an application that supports English, Australian English,
// Danish, and standard Mandarin can be created as follows:
//
// var matcher = language.NewMatcher([]language.Tag{
// language.English, // The first language is used as fallback.
// language.MustParse("en-AU"),
// language.Danish,
// language.Chinese,
// })
//
// This list of supported languages is typically implied by the languages for
// which there exists translations of the user interface.
//
// User-preferred languages usually come as a comma-separated list of BCP 47
// language tags.
// The MatchString finds best matches for such strings:
//
// handler(w http.ResponseWriter, r *http.Request) {
// lang, _ := r.Cookie("lang")
// accept := r.Header.Get("Accept-Language")
// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
//
// // tag should now be used for the initialization of any
// // locale-specific service.
// }
//
// The Matcher's Match method can be used to match Tags directly.
//
// Matchers are aware of the intricacies of equivalence between languages, such
// as deprecated subtags, legacy tags, macro languages, mutual
// intelligibility between scripts and languages, and transparently passing
// BCP 47 user configuration.
// For instance, it will know that a reader of Bokmål Danish can read Norwegian
// and will know that Cantonese ("yue") is a good match for "zh-HK".
//
//
// Using match results
//
// To guarantee a consistent user experience to the user it is important to
// use the same language tag for the selection of any locale-specific services.
// For example, it is utterly confusing to substitute spelled-out numbers
// or dates in one language in text of another language.
// More subtly confusing is using the wrong sorting order or casing
// algorithm for a certain language.
//
// All the packages in x/text that provide locale-specific services
// (e.g. collate, cases) should be initialized with the tag that was
// obtained at the start of an interaction with the user.
//
// Note that Tag that is returned by Match and MatchString may differ from any
// of the supported languages, as it may contain carried over settings from
// the user tags.
// This may be inconvenient when your application has some additional
// locale-specific data for your supported languages.
// Match and MatchString both return the index of the matched supported tag
// to simplify associating such data with the matched tag.
//
//
// Canonicalization
//
// If one uses the Matcher to compare languages one does not need to
// worry about canonicalization.
//
// The meaning of a Tag varies per application. The language package
// therefore delays canonicalization and preserves information as much
// as possible. The Matcher, however, will always take into account that
// two different tags may represent the same language.
//
// By default, only legacy and deprecated tags are converted into their
// canonical equivalent. All other information is preserved. This approach makes
// the confidence scores more accurate and allows matchers to distinguish
// between variants that are otherwise lost.
//
// As a consequence, two tags that should be treated as identical according to
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
// Matcher handles such distinctions, though, and is aware of the
// equivalence relations. The CanonType type can be used to alter the
// canonicalization form.
//
// References
//
// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
//
package language // import "golang.org/x/text/language"
// TODO: explanation on how to match languages for your own locale-specific
// service.

View file

@ -698,8 +698,8 @@ func (b *builder) computeRegionGroups() {
b.groups[group] = index(len(b.groups))
}
}
if len(b.groups) > 32 {
log.Fatalf("only 32 groups supported, found %d", len(b.groups))
if len(b.groups) > 64 {
log.Fatalf("only 64 groups supported, found %d", len(b.groups))
}
b.writeConst("nRegionGroups", len(b.groups))
}
@ -1417,20 +1417,27 @@ func (b *builder) writeMatchData() {
}
b.writeSlice("regionToGroups", regionToGroups)
// maps language id to in- and out-of-group region.
paradigmLocales := [][3]uint16{}
locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ")
for i := 0; i < len(locales); i += 2 {
x := [3]uint16{}
for j := 0; j < 2; j++ {
pc := strings.SplitN(locales[i+j], "-", 2)
x[0] = b.langIndex(pc[0])
if len(pc) == 2 {
x[1+j] = uint16(b.region.index(pc[1]))
}
}
paradigmLocales = append(paradigmLocales, x)
}
b.writeSlice("paradigmLocales", paradigmLocales)
b.writeType(mutualIntelligibility{})
b.writeType(scriptIntelligibility{})
b.writeType(regionIntelligibility{})
matchLang := []mutualIntelligibility{{
// TODO: remove once CLDR is fixed.
want: uint16(b.langIndex("sr")),
have: uint16(b.langIndex("hr")),
distance: uint8(5),
}, {
want: uint16(b.langIndex("sr")),
have: uint16(b.langIndex("bs")),
distance: uint8(5),
}}
matchLang := []mutualIntelligibility{}
matchScript := []scriptIntelligibility{}
matchRegion := []regionIntelligibility{}
// Convert the languageMatch entries in lists keyed by desired language.
@ -1490,8 +1497,14 @@ func (b *builder) writeMatchData() {
if desired == supported && desired == "*_*_*" {
continue
}
if desired != supported { // (Weird but correct.)
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
if desired != supported {
// This is now supported by CLDR, but only one case, which
// should already be covered by paradigm locales. For instance,
// test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
// testdata/CLDRLocaleMatcherTest.txt tests this.
if supported != "en_*_GB" {
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
}
continue
}
ri := regionIntelligibility{
@ -1559,7 +1572,7 @@ func (b *builder) writeRegionInclusionData() {
}
}
regionContainment := make([]uint32, len(b.groups))
regionContainment := make([]uint64, len(b.groups))
for _, g := range b.groups {
l := containment[g]
@ -1577,10 +1590,10 @@ func (b *builder) writeRegionInclusionData() {
b.writeSlice("regionContainment", regionContainment)
regionInclusion := make([]uint8, len(b.region.s))
bvs := make(map[uint32]index)
bvs := make(map[uint64]index)
// Make the first bitvector positions correspond with the groups.
for r, i := range b.groups {
bv := uint32(1 << i)
bv := uint64(1 << i)
for _, g := range mm[r] {
bv |= 1 << g
}
@ -1589,7 +1602,7 @@ func (b *builder) writeRegionInclusionData() {
}
for r := 1; r < len(b.region.s); r++ {
if _, ok := b.groups[r]; !ok {
bv := uint32(0)
bv := uint64(0)
for _, g := range mm[r] {
bv |= 1 << g
}
@ -1604,9 +1617,9 @@ func (b *builder) writeRegionInclusionData() {
}
}
b.writeSlice("regionInclusion", regionInclusion)
regionInclusionBits := make([]uint32, len(bvs))
regionInclusionBits := make([]uint64, len(bvs))
for k, v := range bvs {
regionInclusionBits[v] = uint32(k)
regionInclusionBits[v] = uint64(k)
}
// Add bit vectors for increasingly large distances until a fixed point is reached.
regionInclusionNext := []uint8{}

View file

@ -49,7 +49,7 @@ func main() {
defer func() {
buf := &bytes.Buffer{}
if _, err = w.WriteGo(buf, "language"); err != nil {
if _, err = w.WriteGo(buf, "language", ""); err != nil {
log.Fatalf("Error formatting file index.go: %v", err)
}

File diff suppressed because it is too large Load diff

View file

@ -2,105 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run maketables.go gen_common.go -output tables.go
//go:generate go run gen.go gen_common.go -output tables.go
//go:generate go run gen_index.go
// Package language implements BCP 47 language tags and related functionality.
//
// The Tag type, which is used to represent languages, is agnostic to the
// meaning of its subtags. Tags are not fully canonicalized to preserve
// information that may be valuable in certain contexts. As a consequence, two
// different tags may represent identical languages.
//
// Initializing language- or locale-specific components usually consists of
// two steps. The first step is to select a display language based on the
// preferred languages of the user and the languages supported by an application.
// The second step is to create the language-specific services based on
// this selection. Each is discussed in more details below.
//
// Matching preferred against supported languages
//
// An application may support various languages. This list is typically limited
// by the languages for which there exists translations of the user interface.
// Similarly, a user may provide a list of preferred languages which is limited
// by the languages understood by this user.
// An application should use a Matcher to find the best supported language based
// on the user's preferred list.
// Matchers are aware of the intricacies of equivalence between languages.
// The default Matcher implementation takes into account things such as
// deprecated subtags, legacy tags, and mutual intelligibility between scripts
// and languages.
//
// A Matcher for English, Australian English, Danish, and standard Mandarin can
// be defined as follows:
//
// var matcher = language.NewMatcher([]language.Tag{
// language.English, // The first language is used as fallback.
// language.MustParse("en-AU"),
// language.Danish,
// language.Chinese,
// })
//
// The following code selects the best match for someone speaking Spanish and
// Norwegian:
//
// preferred := []language.Tag{ language.Spanish, language.Norwegian }
// tag, _, _ := matcher.Match(preferred...)
//
// In this case, the best match is Danish, as Danish is sufficiently a match to
// Norwegian to not have to fall back to the default.
// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header.
//
// Selecting language-specific services
//
// One should always use the Tag returned by the Matcher to create an instance
// of any of the language-specific services provided by the text repository.
// This prevents the mixing of languages, such as having a different language for
// messages and display names, as well as improper casing or sorting order for
// the selected language.
// Using the returned Tag also allows user-defined settings, such as collation
// order or numbering system to be transparently passed as options.
//
// If you have language-specific data in your application, however, it will in
// most cases suffice to use the index returned by the matcher to identify
// the user language.
// The following loop provides an alternative in case this is not sufficient:
//
// supported := map[language.Tag]data{
// language.English: enData,
// language.MustParse("en-AU"): enAUData,
// language.Danish: daData,
// language.Chinese: zhData,
// }
// tag, _, _ := matcher.Match(preferred...)
// for ; tag != language.Und; tag = tag.Parent() {
// if v, ok := supported[tag]; ok {
// return v
// }
// }
// return enData // should not reach here
//
// Repeatedly taking the Parent of the tag returned by Match will eventually
// match one of the tags used to initialize the Matcher.
//
// Canonicalization
//
// By default, only legacy and deprecated tags are converted into their
// canonical equivalent. All other information is preserved. This approach makes
// the confidence scores more accurate and allows matchers to distinguish
// between variants that are otherwise lost.
//
// As a consequence, two tags that should be treated as identical according to
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
// Matchers will handle such distinctions, though, and are aware of the
// equivalence relations. The CanonType type can be used to alter the
// canonicalization form.
//
// References
//
// BCP 47 - Tags for Identifying Languages
// http://tools.ietf.org/html/bcp47
package language // import "golang.org/x/text/language"
package language
// TODO: Remove above NOTE after:
// - verifying that tables are dropped correctly (most notably matcher tables).
@ -129,8 +34,15 @@ const (
// specific language or locale. All language tag values are guaranteed to be
// well-formed.
type Tag struct {
lang langID
region regionID
lang langID
region regionID
// TODO: we will soon run out of positions for script. Idea: instead of
// storing lang, region, and script codes, store only the compact index and
// have a lookup table from this code to its expansion. This greatly speeds
// up table lookup, speed up common variant cases.
// This will also immediately free up 3 extra bytes. Also, the pVariant
// field can now be moved to the lookup table, as the compact index uniquely
// determines the offset of a possible variant.
script scriptID
pVariant byte // offset in str, includes preceding '-'
pExt uint16 // offset of first extension, includes preceding '-'
@ -387,6 +299,26 @@ func (t Tag) String() string {
return string(buf[:t.genCoreBytes(buf[:])])
}
// MarshalText implements encoding.TextMarshaler.
func (t Tag) MarshalText() (text []byte, err error) {
if t.str != "" {
text = append(text, t.str...)
} else if t.script == 0 && t.region == 0 {
text = append(text, t.lang.String()...)
} else {
buf := [maxCoreSize]byte{}
text = buf[:t.genCoreBytes(buf[:])]
}
return text, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (t *Tag) UnmarshalText(text []byte) error {
tag, err := Raw.Parse(string(text))
*t = tag
return err
}
// Base returns the base language of the language tag. If the base language is
// unspecified, an attempt will be made to infer it from the context.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.

View file

@ -6,6 +6,39 @@ package language
import "errors"
// A MatchOption configures a Matcher.
type MatchOption func(*matcher)
// PreferSameScript will, in the absence of a match, result in the first
// preferred tag with the same script as a supported tag to match this supported
// tag. The default is currently true, but this may change in the future.
func PreferSameScript(preferSame bool) MatchOption {
return func(m *matcher) { m.preferSameScript = preferSame }
}
// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
// There doesn't seem to be too much need for multiple types.
// Making it a concrete type allows MatchStrings to be a method, which will
// improve its discoverability.
// MatchStrings parses and matches the given strings until one of them matches
// the language in the Matcher. A string may be an Accept-Language header as
// handled by ParseAcceptLanguage. The default language is returned if no
// other language matched.
func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
for _, accept := range lang {
desired, _, err := ParseAcceptLanguage(accept)
if err != nil {
continue
}
if tag, index, conf := m.Match(desired...); conf != No {
return tag, index
}
}
tag, index, _ = m.Match()
return
}
// Matcher is the interface that wraps the Match method.
//
// Match returns the best match for any of the given tags, along with
@ -36,25 +69,45 @@ func Comprehends(speaker, alternative Tag) Confidence {
// matched tag in t, but is augmented with the Unicode extension ('u')of the
// corresponding preferred tag. This allows user locale options to be passed
// transparently.
func NewMatcher(t []Tag) Matcher {
return newMatcher(t)
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
return newMatcher(t, options)
}
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
match, w, c := m.getBest(want...)
if match == nil {
t = m.default_.tag
} else {
if match != nil {
t, index = match.tag, match.index
} else {
// TODO: this should be an option
t = m.default_.tag
if m.preferSameScript {
outer:
for _, w := range want {
script, _ := w.Script()
if script.scriptID == 0 {
// Don't do anything if there is no script, such as with
// private subtags.
continue
}
for i, h := range m.supported {
if script.scriptID == h.maxScript {
t, index = h.tag, i
break outer
}
}
}
}
// TODO: select first language tag based on script.
}
if w.region != 0 && t.region != 0 && t.region.contains(w.region) {
t, _ = Raw.Compose(t, Region{w.region})
}
// Copy options from the user-provided tag into the result tag. This is hard
// to do after the fact, so we do it here.
// TODO: consider also adding in variants that are compatible with the
// matched language.
// TODO: Add back region if it is non-ambiguous? Or create another tag to
// preserve the region?
if u, ok := w.Extension('u'); ok {
t, _ = Raw.Compose(t, u)
// TODO: add in alternative variants to -u-va-.
// TODO: add preferred region to -u-rg-.
if e := w.Extensions(); len(e) > 0 {
t, _ = Raw.Compose(t, e)
}
return t, index, c
}
@ -91,7 +144,7 @@ var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
// addLikelySubtags sets subtags to their most likely value, given the locale.
// In most cases this means setting fields for unknown values, but in some
// cases it may alter a value. It returns a ErrMissingLikelyTagsData error
// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
// if the given locale cannot be expanded.
func (t Tag) addLikelySubtags() (Tag, error) {
id, err := addTags(t)
@ -300,8 +353,9 @@ func minimizeTags(t Tag) (Tag, error) {
// 1) compute the match between the two tags.
// 2) if the match is better than the previous best match, replace it
// with the new match. (see next section)
// b) if the current best match is above a certain threshold, return this
// match without proceeding to the next tag in "desired". [See Note 1]
// b) if the current best match is Exact and pin is true the result will be
// frozen to the language found thusfar, although better matches may
// still be found for the same language.
// 3) If the best match so far is below a certain threshold, return "default".
//
// Ranking:
@ -350,9 +404,6 @@ func minimizeTags(t Tag) (Tag, error) {
// found wins.
//
// Notes:
// [1] Note that even if we may not have a perfect match, if a match is above a
// certain threshold, it is considered a better match than any other match
// to a tag later in the list of preferred language tags.
// [2] In practice, as matching of Exact is done in a separate phase from
// matching the other levels, we reuse the Exact level to mean MaxExact in
// the second phase. As a consequence, we only need the levels defined by
@ -388,16 +439,18 @@ func minimizeTags(t Tag) (Tag, error) {
// matcher keeps a set of supported language tags, indexed by language.
type matcher struct {
default_ *haveTag
index map[langID]*matchHeader
passSettings bool
default_ *haveTag
supported []*haveTag
index map[langID]*matchHeader
passSettings bool
preferSameScript bool
}
// matchHeader has the lists of tags for exact matches and matches based on
// maximized and canonicalized tags for a given language.
type matchHeader struct {
exact []*haveTag
max []*haveTag
haveTags []*haveTag
original bool
}
// haveTag holds a supported Tag and its maximized script and region. The maximized
@ -427,7 +480,7 @@ type haveTag struct {
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
max := tag
if tag.lang != 0 {
if tag.lang != 0 || tag.region != 0 || tag.script != 0 {
max, _ = max.canonicalize(All)
max, _ = addTags(max)
max.remakeString()
@ -452,29 +505,27 @@ func altScript(l langID, s scriptID) scriptID {
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
// Tags that have the same maximized values are linked by index.
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
h.original = h.original || exact
// Don't add new exact matches.
for _, v := range h.exact {
for _, v := range h.haveTags {
if v.tag.equalsRest(n.tag) {
return
}
}
if exact {
h.exact = append(h.exact, &n)
}
// Allow duplicate maximized tags, but create a linked list to allow quickly
// comparing the equivalents and bail out.
for i, v := range h.max {
for i, v := range h.haveTags {
if v.maxScript == n.maxScript &&
v.maxRegion == n.maxRegion &&
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
for h.max[i].nextMax != 0 {
i = int(h.max[i].nextMax)
for h.haveTags[i].nextMax != 0 {
i = int(h.haveTags[i].nextMax)
}
h.max[i].nextMax = uint16(len(h.max))
h.haveTags[i].nextMax = uint16(len(h.haveTags))
break
}
}
h.max = append(h.max, &n)
h.haveTags = append(h.haveTags, &n)
}
// header returns the matchHeader for the given language. It creates one if
@ -501,9 +552,13 @@ func toConf(d uint8) Confidence {
// newMatcher builds an index for the given supported tags and returns it as
// a matcher. It also expands the index by considering various equivalence classes
// for a given tag.
func newMatcher(supported []Tag) *matcher {
func newMatcher(supported []Tag, options []MatchOption) *matcher {
m := &matcher{
index: make(map[langID]*matchHeader),
index: make(map[langID]*matchHeader),
preferSameScript: true,
}
for _, o := range options {
o(m)
}
if len(supported) == 0 {
m.default_ = &haveTag{}
@ -514,25 +569,28 @@ func newMatcher(supported []Tag) *matcher {
for i, tag := range supported {
pair, _ := makeHaveTag(tag, i)
m.header(tag.lang).addIfNew(pair, true)
m.supported = append(m.supported, &pair)
}
m.default_ = m.header(supported[0].lang).exact[0]
m.default_ = m.header(supported[0].lang).haveTags[0]
// Keep these in two different loops to support the case that two equivalent
// languages are distinguished, such as iw and he.
for i, tag := range supported {
pair, max := makeHaveTag(tag, i)
if max != tag.lang {
m.header(max).addIfNew(pair, false)
m.header(max).addIfNew(pair, true)
}
}
// update is used to add indexes in the map for equivalent languages.
// If force is true, the update will also apply to derived entries. To
// avoid applying a "transitive closure", use false.
update := func(want, have uint16, conf Confidence, force bool) {
// update will only add entries to original indexes, thus not computing any
// transitive relations.
update := func(want, have uint16, conf Confidence) {
if hh := m.index[langID(have)]; hh != nil {
if !force && len(hh.exact) == 0 {
if !hh.original {
return
}
hw := m.header(langID(want))
for _, ht := range hh.max {
for _, ht := range hh.haveTags {
v := *ht
if conf < v.conf {
v.conf = conf
@ -541,7 +599,7 @@ func newMatcher(supported []Tag) *matcher {
if v.altScript != 0 {
v.altScript = altScript(langID(want), v.maxScript)
}
hw.addIfNew(v, conf == Exact && len(hh.exact) > 0)
hw.addIfNew(v, conf == Exact && hh.original)
}
}
}
@ -549,9 +607,9 @@ func newMatcher(supported []Tag) *matcher {
// Add entries for languages with mutual intelligibility as defined by CLDR's
// languageMatch data.
for _, ml := range matchLang {
update(ml.want, ml.have, toConf(ml.distance), false)
update(ml.want, ml.have, toConf(ml.distance))
if !ml.oneway {
update(ml.have, ml.want, toConf(ml.distance), false)
update(ml.have, ml.want, toConf(ml.distance))
}
}
@ -561,10 +619,6 @@ func newMatcher(supported []Tag) *matcher {
// (their canonicalization simply substitutes a different language code, but
// nothing else), the match confidence is Exact, otherwise it is High.
for i, lm := range langAliasMap {
if lm.from == _sh {
continue
}
// If deprecated codes match and there is no fiddling with the script or
// or region, we consider it an exact match.
conf := Exact
@ -572,9 +626,9 @@ func newMatcher(supported []Tag) *matcher {
if !isExactEquivalent(langID(lm.from)) {
conf = High
}
update(lm.to, lm.from, conf, true)
update(lm.to, lm.from, conf)
}
update(lm.from, lm.to, conf, true)
update(lm.from, lm.to, conf)
}
return m
}
@ -583,28 +637,29 @@ func newMatcher(supported []Tag) *matcher {
// account the order of preference of the given tags.
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
best := bestMatch{}
for _, w := range want {
for i, w := range want {
var max Tag
// Check for exact match first.
h := m.index[w.lang]
if w.lang != 0 {
// Base language is defined.
if h == nil {
continue
}
for i := range h.exact {
have := h.exact[i]
if have.tag.equalsRest(w) {
return have, w, Exact
}
// Base language is defined.
max, _ = w.canonicalize(Legacy | Deprecated | Macro)
// A region that is added through canonicalization is stronger than
// a maximized region: set it in the original (e.g. mo -> ro-MD).
if w.region != max.region {
w.region = max.region
}
max, _ = w.canonicalize(Legacy | Deprecated)
// TODO: should we do the same for scripts?
// See test case: en, sr, nl ; sh ; sr
max, _ = addTags(max)
} else {
// Base language is not defined.
if h != nil {
for i := range h.exact {
have := h.exact[i]
for i := range h.haveTags {
have := h.haveTags[i]
if have.tag.equalsRest(w) {
return have, w, Exact
}
@ -620,16 +675,23 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
continue
}
}
pin := true
for _, t := range want[i+1:] {
if w.lang == t.lang {
pin = false
break
}
}
// Check for match based on maximized tag.
for i := range h.max {
have := h.max[i]
best.update(have, w, max.script, max.region)
for i := range h.haveTags {
have := h.haveTags[i]
best.update(have, w, max.script, max.region, pin)
if best.conf == Exact {
for have.nextMax != 0 {
have = h.max[have.nextMax]
best.update(have, w, max.script, max.region)
have = h.haveTags[have.nextMax]
best.update(have, w, max.script, max.region, pin)
}
return best.have, best.want, High
return best.have, best.want, best.conf
}
}
}
@ -644,42 +706,68 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
// bestMatch accumulates the best match so far.
type bestMatch struct {
have *haveTag
want Tag
conf Confidence
have *haveTag
want Tag
conf Confidence
pinnedRegion regionID
pinLanguage bool
sameRegionGroup bool
// Cached results from applying tie-breaking rules.
origLang bool
origReg bool
regDist uint8
origScript bool
parentDist uint8 // 255 if have is not an ancestor of want tag.
origLang bool
origReg bool
paradigmReg bool
regGroupDist uint8
origScript bool
}
// update updates the existing best match if the new pair is considered to be a
// better match.
// To determine if the given pair is a better match, it first computes the rough
// confidence level. If this surpasses the current match, it will replace it and
// update the tie-breaker rule cache. If there is a tie, it proceeds with applying
// a series of tie-breaker rules. If there is no conclusive winner after applying
// the tie-breaker rules, it leaves the current match as the preferred match.
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) {
// better match. To determine if the given pair is a better match, it first
// computes the rough confidence level. If this surpasses the current match, it
// will replace it and update the tie-breaker rule cache. If there is a tie, it
// proceeds with applying a series of tie-breaker rules. If there is no
// conclusive winner after applying the tie-breaker rules, it leaves the current
// match as the preferred match.
//
// If pin is true and have and tag are a strong match, it will henceforth only
// consider matches for this language. This corresponds to the nothing that most
// users have a strong preference for the first defined language. A user can
// still prefer a second language over a dialect of the preferred language by
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
// be false.
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID, pin bool) {
// Bail if the maximum attainable confidence is below that of the current best match.
c := have.conf
if c < m.conf {
return
}
if have.maxScript != maxScript {
// Don't change the language once we already have found an exact match.
if m.pinLanguage && tag.lang != m.want.lang {
return
}
// Pin the region group if we are comparing tags for the same language.
if tag.lang == m.want.lang && m.sameRegionGroup {
_, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.lang)
if !sameGroup {
return
}
}
if c == Exact && have.maxScript == maxScript {
// If there is another language and then another entry of this language,
// don't pin anything, otherwise pin the language.
m.pinLanguage = pin
}
if have.tag.equalsRest(tag) {
} else if have.maxScript != maxScript {
// There is usually very little comprehension between different scripts.
// In a few cases there may still be Low comprehension. This possibility is
// pre-computed and stored in have.altScript.
// In a few cases there may still be Low comprehension. This possibility
// is pre-computed and stored in have.altScript.
if Low < m.conf || have.altScript != maxScript {
return
}
c = Low
} else if have.maxRegion != maxRegion {
// There is usually a small difference between languages across regions.
// We use the region distance (below) to disambiguate between equal matches.
if High < c {
// There is usually a small difference between languages across regions.
c = High
}
}
@ -715,10 +803,17 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
beaten = true
}
// Next we prefer smaller distances between regions, as defined by regionDist.
regDist := regionDist(have.maxRegion, maxRegion, tag.lang)
if !beaten && m.regDist != regDist {
if regDist > m.regDist {
regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang)
if !beaten && m.regGroupDist != regGroupDist {
if regGroupDist > m.regGroupDist {
return
}
beaten = true
}
paradigmReg := isParadigmLocale(tag.lang, have.maxRegion)
if !beaten && m.paradigmReg != paradigmReg {
if !paradigmReg {
return
}
beaten = true
@ -733,77 +828,52 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion
beaten = true
}
// Finally we prefer tags which have a closer parent relationship.
parentDist := parentDistance(have.tag.region, tag)
if !beaten && m.parentDist != parentDist {
if parentDist > m.parentDist {
return
}
beaten = true
}
// Update m to the newly found best match.
if beaten {
m.have = have
m.want = tag
m.conf = c
m.pinnedRegion = maxRegion
m.sameRegionGroup = sameGroup
m.origLang = origLang
m.origReg = origReg
m.paradigmReg = paradigmReg
m.origScript = origScript
m.regDist = regDist
m.parentDist = parentDist
m.regGroupDist = regGroupDist
}
}
// parentDistance returns the number of times Parent must be called before the
// regions match. It is assumed that it has already been checked that lang and
// script are identical. If haveRegion does not occur in the ancestor chain of
// tag, it returns 255.
func parentDistance(haveRegion regionID, tag Tag) uint8 {
p := tag.Parent()
d := uint8(1)
for haveRegion != p.region {
if p.region == 0 {
return 255
}
p = p.Parent()
d++
}
return d
}
// regionDist wraps regionDistance with some exceptions to the algorithmic distance.
func regionDist(a, b regionID, lang langID) uint8 {
if lang == _en {
// Two variants of non-US English are close to each other, regardless of distance.
if a != _US && b != _US {
return 2
func isParadigmLocale(lang langID, r regionID) bool {
for _, e := range paradigmLocales {
if langID(e[0]) == lang && (r == regionID(e[1]) || r == regionID(e[2])) {
return true
}
}
return uint8(regionDistance(a, b))
return false
}
// regionDistance computes the distance between two regions based on the
// distance in the graph of region containments as defined in CLDR. It iterates
// over increasingly inclusive sets of groups, represented as bit vectors, until
// the source bit vector has bits in common with the destination vector.
func regionDistance(a, b regionID) int {
if a == b {
return 0
// regionGroupDist computes the distance between two regions based on their
// CLDR grouping.
func regionGroupDist(a, b regionID, script scriptID, lang langID) (dist uint8, same bool) {
const defaultDistance = 4
aGroup := uint(regionToGroups[a]) << 1
bGroup := uint(regionToGroups[b]) << 1
for _, ri := range matchRegion {
if langID(ri.lang) == lang && (ri.script == 0 || scriptID(ri.script) == script) {
group := uint(1 << (ri.group &^ 0x80))
if 0x80&ri.group == 0 {
if aGroup&bGroup&group != 0 { // Both regions are in the group.
return ri.distance, ri.distance == defaultDistance
}
} else {
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
return ri.distance, ri.distance == defaultDistance
}
}
}
}
p, q := regionInclusion[a], regionInclusion[b]
if p < nRegionGroups {
p, q = q, p
}
set := regionInclusionBits
if q < nRegionGroups && set[p]&(1<<q) != 0 {
return 1
}
d := 2
for goal := set[q]; set[p]&goal == 0; p = regionInclusionNext[p] {
d++
}
return d
return defaultDistance, true
}
func (t Tag) variants() string {
@ -850,4 +920,14 @@ func init() {
notEquivalent = append(notEquivalent, langID(lm.from))
}
}
// Maximize undefined regions of paradigm locales.
for i, v := range paradigmLocales {
max, _ := addTags(Tag{lang: langID(v[0])})
if v[1] == 0 {
paradigmLocales[i][1] = uint16(max.region)
}
if v[2] == 0 {
paradigmLocales[i][2] = uint16(max.region)
}
}
}

View file

@ -765,7 +765,7 @@ func nextExtension(s string, p int) int {
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
// ParseAcceptLanguage parses the contents of a Accept-Language header as
// ParseAcceptLanguage parses the contents of an Accept-Language header as
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
// a list of corresponding quality weights. It is more permissive than RFC 2616
// and may return non-nil slices even if the input is not valid.

File diff suppressed because it is too large Load diff

View file

@ -1,100 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"encoding/xml"
"regexp"
"strconv"
)
// Elem is implemented by every XML element.
type Elem interface {
setEnclosing(Elem)
setName(string)
enclosing() Elem
GetCommon() *Common
}
type hidden struct {
CharData string `xml:",chardata"`
Alias *struct {
Common
Source string `xml:"source,attr"`
Path string `xml:"path,attr"`
} `xml:"alias"`
Def *struct {
Common
Choice string `xml:"choice,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
} `xml:"default"`
}
// Common holds several of the most common attributes and sub elements
// of an XML element.
type Common struct {
XMLName xml.Name
name string
enclElem Elem
Type string `xml:"type,attr,omitempty"`
Reference string `xml:"reference,attr,omitempty"`
Alt string `xml:"alt,attr,omitempty"`
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
Draft string `xml:"draft,attr,omitempty"`
hidden
}
// Default returns the default type to select from the enclosed list
// or "" if no default value is specified.
func (e *Common) Default() string {
if e.Def == nil {
return ""
}
if e.Def.Choice != "" {
return e.Def.Choice
} else if e.Def.Type != "" {
// Type is still used by the default element in collation.
return e.Def.Type
}
return ""
}
// GetCommon returns e. It is provided such that Common implements Elem.
func (e *Common) GetCommon() *Common {
return e
}
// Data returns the character data accumulated for this element.
func (e *Common) Data() string {
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
return e.CharData
}
func (e *Common) setName(s string) {
e.name = s
}
func (e *Common) enclosing() Elem {
return e.enclElem
}
func (e *Common) setEnclosing(en Elem) {
e.enclElem = en
}
// Escape characters that can be escaped without further escaping the string.
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
// It assumes the input string is correctly formatted.
func replaceUnicode(s string) string {
if s[1] == '#' {
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
return string(r)
}
r, _, _, _ := strconv.UnquoteChar(s, 0)
return string(r)
}

View file

@ -1,130 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run makexml.go -output xml.go
// Package cldr provides a parser for LDML and related XML formats.
// This package is intended to be used by the table generation tools
// for the various internationalization-related packages.
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
// is periodically amended, this package may change considerably over time.
// This mostly means that data may appear and disappear between versions.
// That is, old code should keep compiling for newer versions, but data
// may have moved or changed.
// CLDR version 22 is the first version supported by this package.
// Older versions may not work.
package cldr // import "golang.org/x/text/unicode/cldr"
import (
"fmt"
"sort"
)
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
type CLDR struct {
parent map[string][]string
locale map[string]*LDML
resolved map[string]*LDML
bcp47 *LDMLBCP47
supp *SupplementalData
}
func makeCLDR() *CLDR {
return &CLDR{
parent: make(map[string][]string),
locale: make(map[string]*LDML),
resolved: make(map[string]*LDML),
bcp47: &LDMLBCP47{},
supp: &SupplementalData{},
}
}
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
func (cldr *CLDR) BCP47() *LDMLBCP47 {
return nil
}
// Draft indicates the draft level of an element.
type Draft int
const (
Approved Draft = iota
Contributed
Provisional
Unconfirmed
)
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
// ParseDraft returns the Draft value corresponding to the given string. The
// empty string corresponds to Approved.
func ParseDraft(level string) (Draft, error) {
if level == "" {
return Approved, nil
}
for i, s := range drafts {
if level == s {
return Unconfirmed - Draft(i), nil
}
}
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
}
func (d Draft) String() string {
return drafts[len(drafts)-1-int(d)]
}
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
// Any draft element for which the draft level is higher than lev will be excluded.
// If multiple draft levels are available for a single element, the one with the
// lowest draft level will be selected, unless preferDraft is true, in which case
// the highest draft will be chosen.
// It is assumed that the underlying LDML is canonicalized.
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
// TODO: implement
cldr.resolved = make(map[string]*LDML)
}
// RawLDML returns the LDML XML for id in unresolved form.
// id must be one of the strings returned by Locales.
func (cldr *CLDR) RawLDML(loc string) *LDML {
return cldr.locale[loc]
}
// LDML returns the fully resolved LDML XML for loc, which must be one of
// the strings returned by Locales.
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
return cldr.resolve(loc)
}
// Supplemental returns the parsed supplemental data. If no such data was parsed,
// nil is returned.
func (cldr *CLDR) Supplemental() *SupplementalData {
return cldr.supp
}
// Locales returns the locales for which there exist files.
// Valid sublocales for which there is no file are not included.
// The root locale is always sorted first.
func (cldr *CLDR) Locales() []string {
loc := []string{"root"}
hasRoot := false
for l, _ := range cldr.locale {
if l == "root" {
hasRoot = true
continue
}
loc = append(loc, l)
}
sort.Strings(loc[1:])
if !hasRoot {
return loc[1:]
}
return loc
}
// Get fills in the fields of x based on the XPath path.
func Get(e Elem, path string) (res Elem, err error) {
return walkXPath(e, path)
}

View file

@ -1,359 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// RuleProcessor can be passed to Collator's Process method, which
// parses the rules and calls the respective method for each rule found.
type RuleProcessor interface {
Reset(anchor string, before int) error
Insert(level int, str, context, extend string) error
Index(id string)
}
const (
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
// of a grouping within an index.
// We ignore any rule that starts with this rune.
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
cldrIndex = "\uFDD0"
// specialAnchor is the format in which to represent logical reset positions,
// such as "first tertiary ignorable".
specialAnchor = "<%s/>"
)
// Process parses the rules for the tailorings of this collation
// and calls the respective methods of p for each rule found.
func (c Collation) Process(p RuleProcessor) (err error) {
if len(c.Cr) > 0 {
if len(c.Cr) > 1 {
return fmt.Errorf("multiple cr elements, want 0 or 1")
}
return processRules(p, c.Cr[0].Data())
}
if c.Rules.Any != nil {
return c.processXML(p)
}
return errors.New("no tailoring data")
}
// processRules parses rules in the Collation Rule Syntax defined in
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
func processRules(p RuleProcessor, s string) (err error) {
chk := func(s string, e error) string {
if err == nil {
err = e
}
return s
}
i := 0 // Save the line number for use after the loop.
scanner := bufio.NewScanner(strings.NewReader(s))
for ; scanner.Scan() && err == nil; i++ {
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
level := 5
var ch byte
switch ch, s = s[0], s[1:]; ch {
case '&': // followed by <anchor> or '[' <key> ']'
if s = skipSpace(s); consume(&s, '[') {
s = chk(parseSpecialAnchor(p, s))
} else {
s = chk(parseAnchor(p, 0, s))
}
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
for level = 1; consume(&s, '<'); level++ {
}
if level > 4 {
err = fmt.Errorf("level %d > 4", level)
}
fallthrough
case '=': // identity relation, optionally followed by *.
if consume(&s, '*') {
s = chk(parseSequence(p, level, s))
} else {
s = chk(parseOrder(p, level, s))
}
default:
chk("", fmt.Errorf("illegal operator %q", ch))
break
}
}
}
if chk("", scanner.Err()); err != nil {
return fmt.Errorf("%d: %v", i, err)
}
return nil
}
// parseSpecialAnchor parses the anchor syntax which is either of the form
// ['before' <level>] <anchor>
// or
// [<label>]
// The starting should already be consumed.
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
i := strings.IndexByte(s, ']')
if i == -1 {
return "", errors.New("unmatched bracket")
}
a := strings.TrimSpace(s[:i])
s = s[i+1:]
if strings.HasPrefix(a, "before ") {
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
if err != nil {
return s, err
}
return parseAnchor(p, int(l), s)
}
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
}
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
anchor, s, err := scanString(s)
if err != nil {
return s, err
}
return s, p.Reset(anchor, level)
}
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
var value, context, extend string
if value, s, err = scanString(s); err != nil {
return s, err
}
if strings.HasPrefix(value, cldrIndex) {
p.Index(value[len(cldrIndex):])
return
}
if consume(&s, '|') {
if context, s, err = scanString(s); err != nil {
return s, errors.New("missing string after context")
}
}
if consume(&s, '/') {
if extend, s, err = scanString(s); err != nil {
return s, errors.New("missing string after extension")
}
}
return s, p.Insert(level, value, context, extend)
}
// scanString scans a single input string.
func scanString(s string) (str, tail string, err error) {
if s = skipSpace(s); s == "" {
return s, s, errors.New("missing string")
}
buf := [16]byte{} // small but enough to hold most cases.
value := buf[:0]
for s != "" {
if consume(&s, '\'') {
i := strings.IndexByte(s, '\'')
if i == -1 {
return "", "", errors.New(`unmatched single quote`)
}
if i == 0 {
value = append(value, '\'')
} else {
value = append(value, s[:i]...)
}
s = s[i+1:]
continue
}
r, sz := utf8.DecodeRuneInString(s)
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
break
}
value = append(value, s[:sz]...)
s = s[sz:]
}
return string(value), skipSpace(s), nil
}
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
if s = skipSpace(s); s == "" {
return s, errors.New("empty sequence")
}
last := rune(0)
for s != "" {
r, sz := utf8.DecodeRuneInString(s)
s = s[sz:]
if r == '-' {
// We have a range. The first element was already written.
if last == 0 {
return s, errors.New("range without starter value")
}
r, sz = utf8.DecodeRuneInString(s)
s = s[sz:]
if r == utf8.RuneError || r < last {
return s, fmt.Errorf("invalid range %q-%q", last, r)
}
for i := last + 1; i <= r; i++ {
if err := p.Insert(level, string(i), "", ""); err != nil {
return s, err
}
}
last = 0
continue
}
if unicode.IsSpace(r) || unicode.IsPunct(r) {
break
}
// normal case
if err := p.Insert(level, string(r), "", ""); err != nil {
return s, err
}
last = r
}
return s, nil
}
func skipSpace(s string) string {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
// consumes returns whether the next byte is ch. If so, it gobbles it by
// updating s.
func consume(s *string, ch byte) (ok bool) {
if *s == "" || (*s)[0] != ch {
return false
}
*s = (*s)[1:]
return true
}
// The following code parses Collation rules of CLDR version 24 and before.
var lmap = map[byte]int{
'p': 1,
's': 2,
't': 3,
'i': 5,
}
type rulesElem struct {
Rules struct {
Common
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
} `xml:"rules"`
}
type rule struct {
Value string `xml:",chardata"`
Before string `xml:"before,attr"`
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
}
var emptyValueError = errors.New("cldr: empty rule value")
func (r *rule) value() (string, error) {
// Convert hexadecimal Unicode codepoint notation to a string.
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
r.Value = s
if s == "" {
if len(r.Any) != 1 {
return "", emptyValueError
}
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
r.Any = nil
} else if len(r.Any) != 0 {
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
}
return r.Value, nil
}
func (r rule) process(p RuleProcessor, name, context, extend string) error {
v, err := r.value()
if err != nil {
return err
}
switch name {
case "p", "s", "t", "i":
if strings.HasPrefix(v, cldrIndex) {
p.Index(v[len(cldrIndex):])
return nil
}
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
return err
}
case "pc", "sc", "tc", "ic":
level := lmap[name[0]]
for _, s := range v {
if err := p.Insert(level, string(s), context, extend); err != nil {
return err
}
}
default:
return fmt.Errorf("cldr: unsupported tag: %q", name)
}
return nil
}
// processXML parses the format of CLDR versions 24 and older.
func (c Collation) processXML(p RuleProcessor) (err error) {
// Collation is generated and defined in xml.go.
var v string
for _, r := range c.Rules.Any {
switch r.XMLName.Local {
case "reset":
level := 0
switch r.Before {
case "primary", "1":
level = 1
case "secondary", "2":
level = 2
case "tertiary", "3":
level = 3
case "":
default:
return fmt.Errorf("cldr: unknown level %q", r.Before)
}
v, err = r.value()
if err == nil {
err = p.Reset(v, level)
}
case "x":
var context, extend string
for _, r1 := range r.Any {
v, err = r1.value()
switch r1.XMLName.Local {
case "context":
context = v
case "extend":
extend = v
}
}
for _, r1 := range r.Any {
if t := r1.XMLName.Local; t == "context" || t == "extend" {
continue
}
r1.rule.process(p, r1.XMLName.Local, context, extend)
}
default:
err = r.rule.process(p, r.XMLName.Local, "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -1,171 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"archive/zip"
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
)
// A Decoder loads an archive of CLDR data.
type Decoder struct {
dirFilter []string
sectionFilter []string
loader Loader
cldr *CLDR
curLocale string
}
// SetSectionFilter takes a list top-level LDML element names to which
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
func (d *Decoder) SetSectionFilter(filter ...string) {
d.sectionFilter = filter
// TODO: automatically set dir filter
}
// SetDirFilter limits the loading of LDML XML files of the specied directories.
// Note that sections may be split across directories differently for different CLDR versions.
// For more robust code, use SetSectionFilter.
func (d *Decoder) SetDirFilter(dir ...string) {
d.dirFilter = dir
}
// A Loader provides access to the files of a CLDR archive.
type Loader interface {
Len() int
Path(i int) string
Reader(i int) (io.ReadCloser, error)
}
var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml")
// Decode loads and decodes the files represented by l.
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
d.cldr = makeCLDR()
for i := 0; i < l.Len(); i++ {
fname := l.Path(i)
if m := fileRe.FindStringSubmatch(fname); m != nil {
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
continue
}
var r io.Reader
if r, err = l.Reader(i); err == nil {
err = d.decode(m[1], m[2], r)
}
if err != nil {
return nil, err
}
}
}
d.cldr.finalize(d.sectionFilter)
return d.cldr, nil
}
func (d *Decoder) decode(dir, id string, r io.Reader) error {
var v interface{}
var l *LDML
cldr := d.cldr
switch {
case dir == "supplemental":
v = cldr.supp
case dir == "transforms":
return nil
case dir == "bcp47":
v = cldr.bcp47
case dir == "validity":
return nil
default:
ok := false
if v, ok = cldr.locale[id]; !ok {
l = &LDML{}
v, cldr.locale[id] = l, l
}
}
x := xml.NewDecoder(r)
if err := x.Decode(v); err != nil {
log.Printf("%s/%s: %v", dir, id, err)
return err
}
if l != nil {
if l.Identity == nil {
return fmt.Errorf("%s/%s: missing identity element", dir, id)
}
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
// is resolved.
// path := strings.Split(id, "_")
// if lang := l.Identity.Language.Type; lang != path[0] {
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
// }
}
return nil
}
type pathLoader []string
func makePathLoader(path string) (pl pathLoader, err error) {
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
pl = append(pl, path)
return err
})
return pl, err
}
func (pl pathLoader) Len() int {
return len(pl)
}
func (pl pathLoader) Path(i int) string {
return pl[i]
}
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
return os.Open(pl[i])
}
// DecodePath loads CLDR data from the given path.
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
loader, err := makePathLoader(path)
if err != nil {
return nil, err
}
return d.Decode(loader)
}
type zipLoader struct {
r *zip.Reader
}
func (zl zipLoader) Len() int {
return len(zl.r.File)
}
func (zl zipLoader) Path(i int) string {
return zl.r.File[i].Name
}
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
return zl.r.File[i].Open()
}
// DecodeZip loads CLDR data from the zip archive for which r is the source.
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
buffer, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
return nil, err
}
return d.Decode(zipLoader{archive})
}

View file

@ -1,400 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This tool generates types for the various XML formats of CLDR.
package main
import (
"archive/zip"
"bytes"
"encoding/xml"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strings"
"golang.org/x/text/internal/gen"
)
var outputFile = flag.String("output", "xml.go", "output file name")
func main() {
flag.Parse()
r := gen.OpenCLDRCoreZip()
buffer, err := ioutil.ReadAll(r)
if err != nil {
log.Fatal("Could not read zip file")
}
r.Close()
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
log.Fatalf("Could not read zip archive: %v", err)
}
var buf bytes.Buffer
version := gen.CLDRVersion()
for _, dtd := range files {
for _, f := range z.File {
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
r, err := f.Open()
failOnError(err)
b := makeBuilder(&buf, dtd)
b.parseDTD(r)
b.resolve(b.index[dtd.top[0]])
b.write()
if b.version != "" && version != b.version {
println(f.Name)
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
}
break
}
}
}
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
fmt.Fprintf(&buf, "const Version = %q\n", version)
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
}
func failOnError(err error) {
if err != nil {
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
os.Exit(1)
}
}
// configuration data per DTD type
type dtd struct {
file string // base file name
root string // Go name of the root XML element
top []string // create a different type for this section
skipElem []string // hard-coded or deprecated elements
skipAttr []string // attributes to exclude
predefined []string // hard-coded elements exist of the form <name>Elem
forceRepeat []string // elements to make slices despite DTD
}
var files = []dtd{
{
file: "ldmlBCP47",
root: "LDMLBCP47",
top: []string{"ldmlBCP47"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
},
{
file: "ldmlSupplemental",
root: "SupplementalData",
top: []string{"supplementalData"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
forceRepeat: []string{
"plurals", // data defined in plurals.xml and ordinals.xml
},
},
{
file: "ldml",
root: "LDML",
top: []string{
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
},
skipElem: []string{
"cp", // not used anywhere
"special", // not used anywhere
"fallback", // deprecated, not used
"alias", // in Common
"default", // in Common
},
skipAttr: []string{
"hiraganaQuarternary", // typo in DTD, correct version included as well
},
predefined: []string{"rules"},
},
}
var comments = map[string]string{
"ldmlBCP47": `
// LDMLBCP47 holds information on allowable values for various variables in LDML.
`,
"supplementalData": `
// SupplementalData holds information relevant for internationalization
// and proper use of CLDR, but that is not contained in the locale hierarchy.
`,
"ldml": `
// LDML is the top-level type for locale-specific data.
`,
"collation": `
// Collation contains rules that specify a certain sort-order,
// as a tailoring of the root order.
// The parsed rules are obtained by passing a RuleProcessor to Collation's
// Process method.
`,
"calendar": `
// Calendar specifies the fields used for formatting and parsing dates and times.
// The month and quarter names are identified numerically, starting at 1.
// The day (of the week) names are identified with short strings, since there is
// no universally-accepted numeric designation.
`,
"dates": `
// Dates contains information regarding the format and parsing of dates and times.
`,
"localeDisplayNames": `
// LocaleDisplayNames specifies localized display names for for scripts, languages,
// countries, currencies, and variants.
`,
"numbers": `
// Numbers supplies information for formatting and parsing numbers and currencies.
`,
}
type element struct {
name string // XML element name
category string // elements contained by this element
signature string // category + attrKey*
attr []*attribute // attributes supported by this element.
sub []struct { // parsed and evaluated sub elements of this element.
e *element
repeat bool // true if the element needs to be a slice
}
resolved bool // prevent multiple resolutions of this element.
}
type attribute struct {
name string
key string
list []string
tag string // Go tag
}
var (
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
reToken = regexp.MustCompile(`\w\-`)
)
// builder is used to read in the DTD files from CLDR and generate Go code
// to be used with the encoding/xml package.
type builder struct {
w io.Writer
index map[string]*element
elem []*element
info dtd
version string
}
func makeBuilder(w io.Writer, d dtd) builder {
return builder{
w: w,
index: make(map[string]*element),
elem: []*element{},
info: d,
}
}
// parseDTD parses a DTD file.
func (b *builder) parseDTD(r io.Reader) {
for d := xml.NewDecoder(r); ; {
t, err := d.Token()
if t == nil {
break
}
failOnError(err)
dir, ok := t.(xml.Directive)
if !ok {
continue
}
m := reHead.FindSubmatch(dir)
dir = dir[len(m[0]):]
ename := string(m[2])
el, elementFound := b.index[ename]
switch string(m[1]) {
case "ELEMENT":
if elementFound {
log.Fatal("parseDTD: duplicate entry for element %q", ename)
}
m := reElem.FindSubmatch(dir)
if m == nil {
log.Fatalf("parseDTD: invalid element %q", string(dir))
}
if len(m[0]) != len(dir) {
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
}
s := string(m[1])
el = &element{
name: ename,
category: s,
}
b.index[ename] = el
case "ATTLIST":
if !elementFound {
log.Fatalf("parseDTD: unknown element %q", ename)
}
s := string(dir)
m := reAttr.FindStringSubmatch(s)
if m == nil {
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
}
if m[4] == "FIXED" {
b.version = m[5]
} else {
switch m[1] {
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
case "type", "choice":
default:
el.attr = append(el.attr, &attribute{
name: m[1],
key: s,
list: reToken.FindAllString(m[3], -1),
})
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
}
}
}
}
}
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
// resolve takes a parsed element and converts it into structured data
// that can be used to generate the XML code.
func (b *builder) resolve(e *element) {
if e.resolved {
return
}
b.elem = append(b.elem, e)
e.resolved = true
s := e.category
found := make(map[string]bool)
sequenceStart := []int{}
for len(s) > 0 {
m := reCat.FindStringSubmatch(s)
if m == nil {
log.Fatalf("%s: invalid category string %q", e.name, s)
}
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
switch m[1] {
case "":
case "(":
sequenceStart = append(sequenceStart, len(e.sub))
case ")":
if len(sequenceStart) == 0 {
log.Fatalf("%s: unmatched closing parenthesis", e.name)
}
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
e.sub[i].repeat = e.sub[i].repeat || repeat
}
sequenceStart = sequenceStart[:len(sequenceStart)-1]
default:
if in(b.info.skipElem, m[1]) {
} else if sub, ok := b.index[m[1]]; ok {
if !found[sub.name] {
e.sub = append(e.sub, struct {
e *element
repeat bool
}{sub, repeat})
found[sub.name] = true
b.resolve(sub)
}
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
} else if m[1] != "EMPTY" {
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
}
}
s = s[len(m[0]):]
}
}
// return true if s is contained in set.
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
var repl = strings.NewReplacer("-", " ", "_", " ")
// title puts the first character or each character following '_' in title case and
// removes all occurrences of '_'.
func title(s string) string {
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
}
// writeElem generates Go code for a single element, recursively.
func (b *builder) writeElem(tab int, e *element) {
p := func(f string, x ...interface{}) {
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
fmt.Fprintf(b.w, f, x...)
}
if len(e.sub) == 0 && len(e.attr) == 0 {
p("Common")
return
}
p("struct {")
tab++
p("\nCommon")
for _, attr := range e.attr {
if !in(b.info.skipAttr, attr.name) {
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
}
}
for _, sub := range e.sub {
if in(b.info.predefined, sub.e.name) {
p("\n%sElem", sub.e.name)
continue
}
if in(b.info.skipElem, sub.e.name) {
continue
}
p("\n%s ", title(sub.e.name))
if sub.repeat {
p("[]")
}
p("*")
if in(b.info.top, sub.e.name) {
p(title(sub.e.name))
} else {
b.writeElem(tab, sub.e)
}
p(" `xml:\"%s\"`", sub.e.name)
}
tab--
p("\n}")
}
// write generates the Go XML code.
func (b *builder) write() {
for i, name := range b.info.top {
e := b.index[name]
if e != nil {
fmt.Fprintf(b.w, comments[name])
name := title(e.name)
if i == 0 {
name = b.info.root
}
fmt.Fprintf(b.w, "type %s ", name)
b.writeElem(0, e)
fmt.Fprint(b.w, "\n")
}
}
}

View file

@ -1,602 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
// This file implements the various inheritance constructs defined by LDML.
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
// for more details.
import (
"fmt"
"log"
"reflect"
"regexp"
"sort"
"strings"
)
// fieldIter iterates over fields in a struct. It includes
// fields of embedded structs.
type fieldIter struct {
v reflect.Value
index, n []int
}
func iter(v reflect.Value) fieldIter {
if v.Kind() != reflect.Struct {
log.Panicf("value %v must be a struct", v)
}
i := fieldIter{
v: v,
index: []int{0},
n: []int{v.NumField()},
}
i.descent()
return i
}
func (i *fieldIter) descent() {
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
i.index = append(i.index, 0)
i.n = append(i.n, f.Type.NumField())
}
}
func (i *fieldIter) done() bool {
return len(i.index) == 1 && i.index[0] >= i.n[0]
}
func skip(f reflect.StructField) bool {
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
}
func (i *fieldIter) next() {
for {
k := len(i.index) - 1
i.index[k]++
if i.index[k] < i.n[k] {
if !skip(i.field()) {
break
}
} else {
if k == 0 {
return
}
i.index = i.index[:k]
i.n = i.n[:k]
}
}
i.descent()
}
func (i *fieldIter) value() reflect.Value {
return i.v.FieldByIndex(i.index)
}
func (i *fieldIter) field() reflect.StructField {
return i.v.Type().FieldByIndex(i.index)
}
type visitor func(v reflect.Value) error
var stopDescent = fmt.Errorf("do not recurse")
func (f visitor) visit(x interface{}) error {
return f.visitRec(reflect.ValueOf(x))
}
// visit recursively calls f on all nodes in v.
func (f visitor) visitRec(v reflect.Value) error {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil
}
return f.visitRec(v.Elem())
}
if err := f(v); err != nil {
if err == stopDescent {
return nil
}
return err
}
switch v.Kind() {
case reflect.Struct:
for i := iter(v); !i.done(); i.next() {
if err := f.visitRec(i.value()); err != nil {
return err
}
}
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
if err := f.visitRec(v.Index(i)); err != nil {
return err
}
}
}
return nil
}
// getPath is used for error reporting purposes only.
func getPath(e Elem) string {
if e == nil {
return "<nil>"
}
if e.enclosing() == nil {
return e.GetCommon().name
}
if e.GetCommon().Type == "" {
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
}
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
}
// xmlName returns the xml name of the element or attribute
func xmlName(f reflect.StructField) (name string, attr bool) {
tags := strings.Split(f.Tag.Get("xml"), ",")
for _, s := range tags {
attr = attr || s == "attr"
}
return tags[0], attr
}
func findField(v reflect.Value, key string) (reflect.Value, error) {
v = reflect.Indirect(v)
for i := iter(v); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == key {
return i.value(), nil
}
}
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
}
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
func walkXPath(e Elem, path string) (res Elem, err error) {
for _, c := range strings.Split(path, "/") {
if c == ".." {
if e = e.enclosing(); e == nil {
panic("path ..")
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
}
continue
} else if c == "" {
continue
}
m := xpathPart.FindStringSubmatch(c)
if len(m) == 0 || len(m[0]) != len(c) {
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
}
v, err := findField(reflect.ValueOf(e), m[1])
if err != nil {
return nil, err
}
switch v.Kind() {
case reflect.Slice:
i := 0
if m[2] != "" || v.Len() > 1 {
if m[2] == "" {
m[2] = "type"
if m[3] = e.GetCommon().Default(); m[3] == "" {
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
}
}
for ; i < v.Len(); i++ {
vi := v.Index(i)
key, err := findField(vi.Elem(), m[2])
if err != nil {
return nil, err
}
key = reflect.Indirect(key)
if key.Kind() == reflect.String && key.String() == m[3] {
break
}
}
}
if i == v.Len() || v.Index(i).IsNil() {
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
}
e = v.Index(i).Interface().(Elem)
case reflect.Ptr:
if v.IsNil() {
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
}
var ok bool
if e, ok = v.Interface().(Elem); !ok {
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
} else if m[2] != "" || m[3] != "" {
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
}
default:
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
}
}
return e, nil
}
const absPrefix = "//ldml/"
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
if src != "locale" {
if !strings.HasPrefix(path, absPrefix) {
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
}
path = path[len(absPrefix):]
if e, err = cldr.resolve(src); err != nil {
return nil, err
}
}
return walkXPath(e, path)
}
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
alias := e.GetCommon().Alias
if alias == nil {
return nil
}
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
if err != nil {
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
}
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
err = cldr.resolveAndMergeAlias(a)
v := reflect.ValueOf(e).Elem()
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
if _, attr := xmlName(i.field()); !attr {
v.FieldByIndex(i.index).Set(vv)
}
}
}
return err
}
func (cldr *CLDR) aliasResolver() visitor {
return func(v reflect.Value) (err error) {
if e, ok := v.Addr().Interface().(Elem); ok {
err = cldr.resolveAndMergeAlias(e)
if err == nil && blocking[e.GetCommon().name] {
return stopDescent
}
}
return err
}
}
// elements within blocking elements do not inherit.
// Taken from CLDR's supplementalMetaData.xml.
var blocking = map[string]bool{
"identity": true,
"supplementalData": true,
"cldrTest": true,
"collation": true,
"transform": true,
}
// Distinguishing attributes affect inheritance; two elements with different
// distinguishing attributes are treated as different for purposes of inheritance,
// except when such attributes occur in the indicated elements.
// Taken from CLDR's supplementalMetaData.xml.
var distinguishing = map[string][]string{
"key": nil,
"request_id": nil,
"id": nil,
"registry": nil,
"alt": nil,
"iso4217": nil,
"iso3166": nil,
"mzone": nil,
"from": nil,
"to": nil,
"type": []string{
"abbreviationFallback",
"default",
"mapping",
"measurementSystem",
"preferenceOrdering",
},
"numberSystem": nil,
}
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
// attrKey computes a key based on the distinguishable attributes of
// an element and it's values.
func attrKey(v reflect.Value, exclude ...string) string {
parts := []string{}
ename := v.Interface().(Elem).GetCommon().name
v = v.Elem()
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); attr {
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
v := i.value()
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.IsValid() {
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
}
}
}
}
sort.Strings(parts)
return strings.Join(parts, ";")
}
// Key returns a key for e derived from all distinguishing attributes
// except those specified by exclude.
func Key(e Elem, exclude ...string) string {
return attrKey(reflect.ValueOf(e), exclude...)
}
// linkEnclosing sets the enclosing element as well as the name
// for all sub-elements of child, recursively.
func linkEnclosing(parent, child Elem) {
child.setEnclosing(parent)
v := reflect.ValueOf(child).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
linkEnclosing(child, vf.Index(j).Interface().(Elem))
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
linkEnclosing(child, vf.Interface().(Elem))
}
}
}
func setNames(e Elem, name string) {
e.setName(name)
v := reflect.ValueOf(e).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
name, _ = xmlName(i.field())
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
setNames(vf.Index(j).Interface().(Elem), name)
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
setNames(vf.Interface().(Elem), name)
}
}
}
// deepCopy copies elements of v recursively. All elements of v that may
// be modified by inheritance are explicitly copied.
func deepCopy(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
return v
}
nv := reflect.New(v.Elem().Type())
nv.Elem().Set(v.Elem())
deepCopyRec(nv.Elem(), v.Elem())
return nv
case reflect.Slice:
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
for i := 0; i < v.Len(); i++ {
deepCopyRec(nv.Index(i), v.Index(i))
}
return nv
}
panic("deepCopy: must be called with pointer or slice")
}
// deepCopyRec is only called by deepCopy.
func deepCopyRec(nv, v reflect.Value) {
if v.Kind() == reflect.Struct {
t := v.Type()
for i := 0; i < v.NumField(); i++ {
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
deepCopyRec(nv.Field(i), v.Field(i))
}
}
} else {
nv.Set(deepCopy(v))
}
}
// newNode is used to insert a missing node during inheritance.
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
n := reflect.New(v.Type())
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); name == "" || attr {
n.Elem().FieldByIndex(i.index).Set(i.value())
}
}
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
return n
}
// v, parent must be pointers to struct
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
nv := reflect.New(t)
nv.Elem().Set(v)
for i := iter(v); !i.done(); i.next() {
vf := i.value()
f := i.field()
name, attr := xmlName(f)
if name == "" || attr {
continue
}
pf := parent.FieldByIndex(i.index)
if blocking[name] {
if vf.IsNil() {
vf = pf
}
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
continue
}
switch f.Type.Kind() {
case reflect.Ptr:
if f.Type.Elem().Kind() == reflect.Struct {
if !vf.IsNil() {
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
} else if !pf.IsNil() {
n := cldr.newNode(pf.Elem(), v)
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
case reflect.Slice:
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
if err != nil {
return reflect.Zero(t), err
}
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
return nv, nil
}
func root(e Elem) *LDML {
for ; e.enclosing() != nil; e = e.enclosing() {
}
return e.(*LDML)
}
// inheritStructPtr first merges possible aliases in with v and then inherits
// any underspecified elements from parent.
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
if !v.IsNil() {
e := v.Interface().(Elem).GetCommon()
alias := e.Alias
if alias == nil && !parent.IsNil() {
alias = parent.Interface().(Elem).GetCommon().Alias
}
if alias != nil {
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
if a != nil {
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
return reflect.Value{}, err
}
}
}
if !parent.IsNil() {
return cldr.inheritFields(v.Elem(), parent.Elem())
}
} else if parent.IsNil() {
panic("should not reach here")
}
return v, nil
}
// Must be slice of struct pointers.
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
index := make(map[string]reflect.Value)
if !v.IsNil() {
for i := 0; i < v.Len(); i++ {
vi := v.Index(i)
key := attrKey(vi)
index[key] = vi
}
}
if !parent.IsNil() {
for i := 0; i < parent.Len(); i++ {
vi := parent.Index(i)
key := attrKey(vi)
if w, ok := index[key]; ok {
index[key], err = cldr.inheritStructPtr(w, vi)
} else {
n := cldr.newNode(vi.Elem(), enc)
index[key], err = cldr.inheritStructPtr(n, vi)
}
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
if err != nil {
return v, err
}
}
}
keys := make([]string, 0, len(index))
for k, _ := range index {
keys = append(keys, k)
}
sort.Strings(keys)
sl := reflect.MakeSlice(t, len(index), len(index))
for i, k := range keys {
sl.Index(i).Set(index[k])
}
return sl, nil
}
func parentLocale(loc string) string {
parts := strings.Split(loc, "_")
if len(parts) == 1 {
return "root"
}
parts = parts[:len(parts)-1]
key := strings.Join(parts, "_")
return key
}
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
if r := cldr.resolved[loc]; r != nil {
return r, nil
}
x := cldr.RawLDML(loc)
if x == nil {
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
}
var v reflect.Value
if loc == "root" {
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
linkEnclosing(nil, x)
err = cldr.aliasResolver().visit(x)
} else {
key := parentLocale(loc)
var parent *LDML
for ; cldr.locale[key] == nil; key = parentLocale(key) {
}
if parent, err = cldr.resolve(key); err != nil {
return nil, err
}
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
x = v.Interface().(*LDML)
linkEnclosing(nil, x)
}
if err != nil {
return nil, err
}
cldr.resolved[loc] = x
return x, err
}
// finalize finalizes the initialization of the raw LDML structs. It also
// removed unwanted fields, as specified by filter, so that they will not
// be unnecessarily evaluated.
func (cldr *CLDR) finalize(filter []string) {
for _, x := range cldr.locale {
if filter != nil {
v := reflect.ValueOf(x).Elem()
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
name, _ := xmlName(f)
if name != "" && name != "identity" && !in(filter, name) {
v.Field(i).Set(reflect.Zero(f.Type))
}
}
}
linkEnclosing(nil, x) // for resolving aliases and paths
setNames(x, "ldml")
}
}

View file

@ -1,144 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"fmt"
"reflect"
"sort"
)
// Slice provides utilities for modifying slices of elements.
// It can be wrapped around any slice of which the element type implements
// interface Elem.
type Slice struct {
ptr reflect.Value
typ reflect.Type
}
// Value returns the reflect.Value of the underlying slice.
func (s *Slice) Value() reflect.Value {
return s.ptr.Elem()
}
// MakeSlice wraps a pointer to a slice of Elems.
// It replaces the array pointed to by the slice so that subsequent modifications
// do not alter the data in a CLDR type.
// It panics if an incorrect type is passed.
func MakeSlice(slicePtr interface{}) Slice {
ptr := reflect.ValueOf(slicePtr)
if ptr.Kind() != reflect.Ptr {
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
}
sl := ptr.Elem()
if sl.Kind() != reflect.Slice {
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
}
intf := reflect.TypeOf((*Elem)(nil)).Elem()
if !sl.Type().Elem().Implements(intf) {
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
}
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
reflect.Copy(nsl, sl)
sl.Set(nsl)
return Slice{
ptr: ptr,
typ: sl.Type().Elem().Elem(),
}
}
func (s Slice) indexForAttr(a string) []int {
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == a {
return i.index
}
}
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
}
// Filter filters s to only include elements for which fn returns true.
func (s Slice) Filter(fn func(e Elem) bool) {
k := 0
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
if fn(vi.Interface().(Elem)) {
sl.Index(k).Set(vi)
k++
}
}
sl.Set(sl.Slice(0, k))
}
// Group finds elements in s for which fn returns the same value and groups
// them in a new Slice.
func (s Slice) Group(fn func(e Elem) string) []Slice {
m := make(map[string][]reflect.Value)
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
key := fn(vi.Interface().(Elem))
m[key] = append(m[key], vi)
}
keys := []string{}
for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
res := []Slice{}
for _, k := range keys {
nsl := reflect.New(sl.Type())
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
res = append(res, MakeSlice(nsl.Interface()))
}
return res
}
// SelectAnyOf filters s to contain only elements for which attr matches
// any of the values.
func (s Slice) SelectAnyOf(attr string, values ...string) {
index := s.indexForAttr(attr)
s.Filter(func(e Elem) bool {
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
return in(values, vf.String())
})
}
// SelectOnePerGroup filters s to include at most one element e per group of
// elements matching Key(attr), where e has an attribute a that matches any
// the values in v.
// If more than one element in a group matches a value in v preference
// is given to the element that matches the first value in v.
func (s Slice) SelectOnePerGroup(a string, v []string) {
index := s.indexForAttr(a)
grouped := s.Group(func(e Elem) string { return Key(e, a) })
sl := s.Value()
sl.Set(sl.Slice(0, 0))
for _, g := range grouped {
e := reflect.Value{}
found := len(v)
gsl := g.Value()
for i := 0; i < gsl.Len(); i++ {
vi := gsl.Index(i).Elem().FieldByIndex(index)
j := 0
for ; j < len(v) && v[j] != vi.String(); j++ {
}
if j < found {
found = j
e = gsl.Index(i)
}
}
if found < len(v) {
sl.Set(reflect.Append(sl, e))
}
}
}
// SelectDraft drops all elements from the list with a draft level smaller than d
// and selects the highest draft level of the remaining.
// This method assumes that the input CLDR is canonicalized.
func (s Slice) SelectDraft(d Draft) {
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
}

File diff suppressed because it is too large Load diff

View file

@ -795,7 +795,7 @@ func makeTables() {
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteGoFile("tables.go", "norm", w.Bytes())
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
}
func printChars() {
@ -972,5 +972,5 @@ func printTestdata() {
}
}
fmt.Fprintln(w, "}")
gen.WriteGoFile("data_test.go", "norm", w.Bytes())
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
}

7653
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,7 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build !go1.10
package norm
const (

View file

@ -40,7 +40,7 @@ func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
}
func flushTransform(rb *reorderBuffer) bool {
// Write out (must fully fit in dst, or else it is a ErrShortDst).
// Write out (must fully fit in dst, or else it is an ErrShortDst).
if len(rb.out) < rb.nrune*utf8.UTFMax {
return false
}