1
0
Fork 0
forked from forgejo/forgejo

Vendor Update Go Libs (#13166)

* update github.com/alecthomas/chroma v0.8.0 -> v0.8.1

* github.com/blevesearch/bleve v1.0.10 -> v1.0.12

* editorconfig-core-go v2.1.1 -> v2.3.7

* github.com/gliderlabs/ssh v0.2.2 -> v0.3.1

* migrate editorconfig.ParseBytes to Parse

* github.com/shurcooL/vfsgen to 0d455de96546

* github.com/go-git/go-git/v5 v5.1.0 -> v5.2.0

* github.com/google/uuid v1.1.1 -> v1.1.2

* github.com/huandu/xstrings v1.3.0 -> v1.3.2

* github.com/klauspost/compress v1.10.11 -> v1.11.1

* github.com/markbates/goth v1.61.2 -> v1.65.0

* github.com/mattn/go-sqlite3 v1.14.0 -> v1.14.4

* github.com/mholt/archiver v3.3.0 -> v3.3.2

* github.com/microcosm-cc/bluemonday 4f7140c49acb -> v1.0.4

* github.com/minio/minio-go v7.0.4 -> v7.0.5

* github.com/olivere/elastic v7.0.9 -> v7.0.20

* github.com/urfave/cli v1.20.0 -> v1.22.4

* github.com/prometheus/client_golang v1.1.0 -> v1.8.0

* github.com/xanzy/go-gitlab v0.37.0 -> v0.38.1

* mvdan.cc/xurls v2.1.0 -> v2.2.0

Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
6543 2020-10-16 07:06:27 +02:00 committed by GitHub
parent 91f2afdb54
commit 12a1f914f4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
656 changed files with 52967 additions and 25229 deletions

View file

@ -1,38 +0,0 @@
# Generated by FOSSA CLI (https://github.com/fossas/fossa-cli)
# Visit https://fossa.io to learn more
version: 1
cli:
server: https://app.fossa.io
fetcher: git
project: git@github.com:olivere/elastic.git
analyze:
modules:
- name: github.com/olivere/elastic
path: .
target: github.com/olivere/elastic
type: go
- name: github.com/olivere/elastic/config
path: ./config
target: github.com/olivere/elastic/config
type: go
- name: github.com/olivere/elastic/uritemplates
path: ./uritemplates
target: github.com/olivere/elastic/uritemplates
type: go
- name: github.com/olivere/elastic/trace/opencensus
path: ./trace/opencensus
target: github.com/olivere/elastic/trace/opencensus
type: go
- name: github.com/olivere/elastic/trace/opentracing
path: ./trace/opentracing
target: github.com/olivere/elastic/trace/opentracing
type: go
- name: github.com/olivere/elastic/aws
path: ./aws
target: github.com/olivere/elastic/aws
type: go
- name: github.com/olivere/elastic/aws/v4
path: ./aws/v4
target: github.com/olivere/elastic/aws/v4
type: go

View file

@ -21,6 +21,8 @@ _testmain.go
*.exe
.envrc
/.vscode/
/.idea/
/debug.test
@ -28,6 +30,7 @@ _testmain.go
/cluster-test/cluster-test
/cluster-test/*.log
/cluster-test/es-chaos-monkey
/dist
/go.sum
/spec
/tmp

View file

@ -1,32 +0,0 @@
sudo: required
language: go
go:
- "1.12.x"
- "1.13.x"
- tip
matrix:
allow_failures:
- go: tip
env:
- GO111MODULE=on
- GO111MODULE=off
addons:
apt:
update: true
packages:
- docker-ce
services:
- docker
before_install:
- if [[ "$TRAVIS_OS_NAME" == "linux" && ! $(which nc) ]] ; then sudo apt-get install -y netcat ; fi
- sudo sysctl -w vm.max_map_count=262144
- docker-compose pull
- docker-compose up -d
- go get -u github.com/google/go-cmp/cmp
- go get -u github.com/fortytw2/leaktest
- go get . ./aws/... ./config/... ./trace/... ./uritemplates/...
- while ! nc -z localhost 9200; do sleep 1; done
- while ! nc -z localhost 9210; do sleep 1; done
install: true # ignore the go get -t -v ./...
script:
- go test -race -deprecations -strict-decoder -v . ./aws/... ./config/... ./trace/... ./uritemplates/...

View file

@ -17,6 +17,7 @@ Alex [@akotlar](https://github.com/akotlar)
Alexander Sack [@asac](https://github.com/asac)
Alexandre Olivier [@aliphen](https://github.com/aliphen)
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
Aman Jain [@amanjain97](https://github.com/amanjain97)
Anders [@ANerd](https://github.com/ANerd)
AndreKR [@AndreKR](https://github.com/AndreKR)
André Bierlein [@ligustah](https://github.com/ligustah)
@ -39,6 +40,7 @@ Bryan Conklin [@bmconklin](https://github.com/bmconklin)
Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
Carl Dunham [@carldunham](https://github.com/carldunham)
Carl Johan Gustavsson [@cjgu](https://github.com/cjgu)
Carson [@carson0321](https://github.com/carson0321)
Cat [@cat-turner](https://github.com/cat-turner)
César Jiménez [@cesarjimenez](https://github.com/cesarjimenez)
cforbes [@cforbes](https://github.com/cforbes)
@ -55,14 +57,17 @@ Connor Peet [@connor4312](https://github.com/connor4312)
Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
Corey Scott [@corsc](https://github.com/corsc)
Chris Petersen [@ex-nerd](https://github.com/ex-nerd)
czxichen [@czxichen](https://github.com/czxichen)
Daniel Barrett [@shendaras](https://github.com/shendaras)
Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
Daniel Santos [@danlsgiga](https://github.com/danlsgiga)
David Emanuel Buchmann [@wuurrd](https://github.com/wuurrd)
Devin Christensen [@quixoten](https://github.com/quixoten)
diacone [@diacone](https://github.com/diacone)
Diego Becciolini [@itizir](https://github.com/itizir)
Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
Elizabeth Jarrett [@mejarrett](https://github.com/mejarrett)
Elliot Williams [@elliotwms](https://github.com/elliotwms)
Ellison Leão [@ellisonleao](https://github.com/ellisonleao)
Emil Gedda [@EmilGedda](https://github.com/EmilGedda)
@ -84,12 +89,14 @@ Guillaume J. Charmes [@creack](https://github.com/creack)
Guiseppe [@gm42](https://github.com/gm42)
Han Yu [@MoonighT](https://github.com/MoonighT)
Harmen [@alicebob](https://github.com/alicebob)
Haroldo Vélez [@Haroldov](https://github.com/Haroldov)
Harrison Wright [@wright8191](https://github.com/wright8191)
Henry Clifford [@hcliff](https://github.com/hcliff)
Henry Stern [@hstern](https://github.com/hstern)
Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
initialcontext [@initialcontext](https://github.com/initialcontext)
Isaac Saldana [@isaldana](https://github.com/isaldana)
Ishan Jain [@ishanjain28](https://github.com/ishanjain28)
J Barkey Wolf [@jjhbw](https://github.com/jjhbw)
Jack Lindamood [@cep21](https://github.com/cep21)
Jacob [@jdelgad](https://github.com/jdelgad)
@ -113,6 +120,7 @@ Josh Chorlton [@jchorl](https://github.com/jchorl)
Jpnock [@Jpnock](https://github.com/Jpnock)
jun [@coseyo](https://github.com/coseyo)
Junpei Tsuji [@jun06t](https://github.com/jun06t)
Karen Yang [@kyangtt](https://github.com/kyangtt)
kartlee [@kartlee](https://github.com/kartlee)
Keith Hatton [@khatton-ft](https://github.com/khatton-ft)
kel [@liketic](https://github.com/liketic)
@ -142,6 +150,7 @@ navins [@ishare](https://github.com/ishare)
Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
Nathan Lacey [@nlacey](https://github.com/nlacey)
NeoCN [@NeoCN](https://github.com/NeoCN)
Nguyen Xuan Dung [@dungnx](https://github.com/dungnx)
Nicholas Wolff [@nwolff](https://github.com/nwolff)
Nick K [@utrack](https://github.com/utrack)
Nick Whyte [@nickw444](https://github.com/nickw444)
@ -150,6 +159,7 @@ okhowang [@okhowang](https://github.com/okhowang)
Orne Brocaar [@brocaar](https://github.com/brocaar)
Paul [@eyeamera](https://github.com/eyeamera)
Paul Oldenburg [@lr-paul](https://github.com/lr-paul)
Pedro [@otherview](https://github.com/otherview)
Pete C [@peteclark-ft](https://github.com/peteclark-ft)
Peter Nagy [@nagypeterjob](https://github.com/nagypeterjob)
Paolo [@ppiccolo](https://github.com/ppiccolo)

3
vendor/github.com/olivere/elastic/v7/Makefile generated vendored Normal file
View file

@ -0,0 +1,3 @@
.PHONY: test
test:
go test -race -deprecations -strict-decoder -v . ./aws/... ./config/... ./trace/... ./uritemplates/...

View file

@ -5,10 +5,9 @@
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
[Go](http://www.golang.org/) programming language.
[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v6)](https://travis-ci.org/olivere/elastic)
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/github.com/olivere/elastic)
[![Build Status](https://github.com/olivere/elastic/workflows/Test/badge.svg)](https://github.com/olivere/elastic/actions)
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://pkg.go.dev/github.com/olivere/elastic/v7?tab=doc)
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Folivere%2Felastic.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Folivere%2Felastic?ref=badge_shield)
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
@ -107,11 +106,9 @@ to rewrite your application big time. More often than not it's renaming APIs
and adding/removing features so that Elastic is in sync with Elasticsearch.
Elastic has been used in production starting with Elasticsearch 0.90 up to recent 7.x
versions. Furthermore, we use [Travis CI](https://travis-ci.org/)
to test Elastic with the most recent versions of Elasticsearch and Go.
See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
for the results.
versions.
We recently switched to [GitHub Actions for testing](https://github.com/olivere/elastic/actions).
Before that, we used [Travis CI](https://travis-ci.org/olivere/elastic) successfully for years).
Elasticsearch has quite a few features. Most of them are implemented
by Elastic. I add features and APIs as required. It's straightforward
@ -259,7 +256,7 @@ Here are a few tips on how to get used to Elastic:
- [x] Indices Segments
- [ ] Indices Recovery
- [ ] Indices Shard Stores
- [ ] Clear Cache
- [x] Clear Cache
- [x] Flush
- [x] Synced Flush
- [x] Refresh
@ -362,10 +359,10 @@ Here are a few tips on how to get used to Elastic:
- [x] Script Score Query
- [x] Percolate Query
- Span queries
- [ ] Span Term Query
- [x] Span Term Query
- [ ] Span Multi Term Query
- [ ] Span First Query
- [ ] Span Near Query
- [x] Span First Query
- [x] Span Near Query
- [ ] Span Or Query
- [ ] Span Not Query
- [ ] Span Containing Query
@ -427,6 +424,3 @@ by Joshua Tacoma,
MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
or the LICENSE file provided in the repository for details.
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Folivere%2Felastic.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Folivere%2Felastic?ref=badge_large)

View file

@ -5,7 +5,7 @@
package elastic
// AcknowledgedResponse is returned from various APIs. It simply indicates
// whether the operation is ack'd or not.
// whether the operation is acknowledged or not.
type AcknowledgedResponse struct {
Acknowledged bool `json:"acknowledged"`
ShardsAcknowledged bool `json:"shards_acknowledged"`

View file

@ -137,7 +137,7 @@ func (s *BulkService) Timeout(timeout string) *BulkService {
// Refresh controls when changes made by this request are made visible
// to search. The allowed values are: "true" (refresh the relevant
// primary and replica shards immediately), "wait_for" (wait for the
// changes to be made visible by a refresh before reying), or "false"
// changes to be made visible by a refresh before replying), or "false"
// (no refresh related actions). The default value is "false".
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html

View file

@ -202,11 +202,7 @@ func easyjson8092efb6DecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bul
in.Skip()
} else {
in.Delim('{')
if !in.IsDelim('}') {
*out = make(bulkDeleteRequestCommand)
} else {
*out = nil
}
*out = make(bulkDeleteRequestCommand)
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()

View file

@ -242,11 +242,7 @@ func easyjson9de0fcbfDecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bul
in.Skip()
} else {
in.Delim('{')
if !in.IsDelim('}') {
*out = make(bulkIndexRequestCommand)
} else {
*out = nil
}
*out = make(bulkIndexRequestCommand)
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()

View file

@ -429,11 +429,7 @@ func easyjson1ed00e60DecodeGithubComOlivereElasticV72(in *jlexer.Lexer, out *bul
in.Skip()
} else {
in.Delim('{')
if !in.IsDelim('}') {
*out = make(bulkUpdateRequestCommand)
} else {
*out = nil
}
*out = make(bulkUpdateRequestCommand)
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()

View file

@ -4,7 +4,10 @@
package elastic
import "net/url"
import (
"net/url"
"strings"
)
// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
// remove anything but scheme, userinfo, host, path, and port.
@ -14,15 +17,17 @@ import "net/url"
// Example:
// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
// http://127.0.0.1:9200/db1/// -> http://127.0.0.1:9200/db1
func canonicalize(rawurls ...string) []string {
var canonicalized []string
for _, rawurl := range rawurls {
u, err := url.Parse(rawurl)
if err == nil {
if u.Scheme == "http" || u.Scheme == "https" {
// Trim trailing slashes
for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
u.Path = u.Path[0 : len(u.Path)-1]
// Trim trailing slashes. Notice that strings.TrimSuffix will only remove the last slash,
// not all slashes from the suffix, so we'll loop over the path to remove all slashes.
for strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:len(u.Path)-1]
}
u.Fragment = ""
u.RawQuery = ""

View file

@ -189,6 +189,22 @@ func (s *CatIndicesService) buildURL() (string, url.Values, error) {
params.Set("master_timeout", s.masterTimeout)
}
if len(s.columns) > 0 {
// loop through all columns and apply alias if needed
for i, column := range s.columns {
if fullValueRaw, isAliased := catIndicesResponseRowAliasesMap[column]; isAliased {
// alias can be translated to multiple fields,
// so if translated value contains a comma, than replace the first value
// and append the others
if strings.Contains(fullValueRaw, ",") {
fullValues := strings.Split(fullValueRaw, ",")
s.columns[i] = fullValues[0]
s.columns = append(s.columns, fullValues[1:]...)
} else {
s.columns[i] = fullValueRaw
}
}
}
params.Set("h", strings.Join(s.columns, ","))
}
if s.health != "" {
@ -372,3 +388,144 @@ type CatIndicesResponseRow struct {
MemoryTotal string `json:"memory.total"` // total user memory on primaries & replicas, e.g. "1.5kb"
PriMemoryTotal string `json:"pri.memory.total"` // total user memory on primaries, e.g. "1.5kb"
}
// catIndicesResponseRowAliasesMap holds the global map for columns aliases
// the map is used by CatIndicesService.buildURL
// for backwards compatibility some fields are able to have the same aliases
// that means that one alias can be translated to different columns (from different elastic versions)
// example for understanding: rto -> RefreshTotal, RefreshExternalTotal
var catIndicesResponseRowAliasesMap = map[string]string{
"qce": "query_cache.evictions",
"searchFetchTime": "search.fetch_time",
"memoryTotal": "memory.total",
"requestCacheEvictions": "request_cache.evictions",
"ftt": "flush.total_time",
"iic": "indexing.index_current",
"mtt": "merges.total_time",
"scti": "search.scroll_time",
"searchScrollTime": "search.scroll_time",
"segmentsCount": "segments.count",
"getTotal": "get.total",
"sfti": "search.fetch_time",
"searchScrollCurrent": "search.scroll_current",
"svmm": "segments.version_map_memory",
"warmerTotalTime": "warmer.total_time",
"r": "rep",
"indexingIndexTime": "indexing.index_time",
"refreshTotal": "refresh.total,refresh.external_total",
"scc": "search.scroll_current",
"suggestTime": "suggest.time",
"idc": "indexing.delete_current",
"rti": "refresh.time,refresh.external_time",
"sfto": "search.fetch_total",
"completionSize": "completion.size",
"mt": "merges.total",
"segmentsVersionMapMemory": "segments.version_map_memory",
"rto": "refresh.total,refresh.external_total",
"id": "uuid",
"dd": "docs.deleted",
"docsDeleted": "docs.deleted",
"fielddataMemory": "fielddata.memory_size",
"getTime": "get.time",
"getExistsTime": "get.exists_time",
"mtd": "merges.total_docs",
"rli": "refresh.listeners",
"h": "health",
"cds": "creation.date.string",
"rcmc": "request_cache.miss_count",
"iif": "indexing.index_failed",
"warmerCurrent": "warmer.current",
"gti": "get.time",
"indexingIndexFailed": "indexing.index_failed",
"mts": "merges.total_size",
"sqti": "search.query_time",
"segmentsIndexWriterMemory": "segments.index_writer_memory",
"iiti": "indexing.index_time",
"iito": "indexing.index_total",
"cd": "creation.date",
"gc": "get.current",
"searchFetchTotal": "search.fetch_total",
"sqc": "search.query_current",
"segmentsMemory": "segments.memory",
"dc": "docs.count",
"qcm": "query_cache.memory_size",
"queryCacheMemory": "query_cache.memory_size",
"mergesTotalDocs": "merges.total_docs",
"searchOpenContexts": "search.open_contexts",
"shards.primary": "pri",
"cs": "completion.size",
"mergesTotalTIme": "merges.total_time",
"wtt": "warmer.total_time",
"mergesCurrentSize": "merges.current_size",
"mergesTotal": "merges.total",
"refreshTime": "refresh.time,refresh.external_time",
"wc": "warmer.current",
"p": "pri",
"idti": "indexing.delete_time",
"searchQueryCurrent": "search.query_current",
"warmerTotal": "warmer.total",
"suggestTotal": "suggest.total",
"tm": "memory.total",
"ss": "store.size",
"ft": "flush.total",
"getExistsTotal": "get.exists_total",
"scto": "search.scroll_total",
"s": "status",
"queryCacheEvictions": "query_cache.evictions",
"rce": "request_cache.evictions",
"geto": "get.exists_total",
"refreshListeners": "refresh.listeners",
"suto": "suggest.total",
"storeSize": "store.size",
"gmti": "get.missing_time",
"indexingIdexCurrent": "indexing.index_current",
"searchFetchCurrent": "search.fetch_current",
"idx": "index",
"fm": "fielddata.memory_size",
"geti": "get.exists_time",
"indexingDeleteCurrent": "indexing.delete_current",
"mergesCurrentDocs": "merges.current_docs",
"sth": "search.throttled",
"flushTotal": "flush.total",
"sfc": "search.fetch_current",
"wto": "warmer.total",
"suti": "suggest.time",
"shardsReplica": "rep",
"mergesCurrent": "merges.current",
"mcs": "merges.current_size",
"so": "search.open_contexts",
"i": "index",
"siwm": "segments.index_writer_memory",
"sfbm": "segments.fixed_bitset_memory",
"fe": "fielddata.evictions",
"requestCacheMissCount": "request_cache.miss_count",
"idto": "indexing.delete_total",
"mergesTotalSize": "merges.total_size",
"suc": "suggest.current",
"suggestCurrent": "suggest.current",
"flushTotalTime": "flush.total_time",
"getMissingTotal": "get.missing_total",
"sqto": "search.query_total",
"searchScrollTotal": "search.scroll_total",
"fixedBitsetMemory": "segments.fixed_bitset_memory",
"getMissingTime": "get.missing_time",
"indexingDeleteTotal": "indexing.delete_total",
"mcd": "merges.current_docs",
"docsCount": "docs.count",
"gto": "get.total",
"mc": "merges.current",
"fielddataEvictions": "fielddata.evictions",
"rcm": "request_cache.memory_size",
"requestCacheHitCount": "request_cache.hit_count",
"gmto": "get.missing_total",
"searchQueryTime": "search.query_time",
"shards.replica": "rep",
"requestCacheMemory": "request_cache.memory_size",
"rchc": "request_cache.hit_count",
"getCurrent": "get.current",
"indexingIndexTotal": "indexing.index_total",
"sc": "segments.count,segments.memory",
"shardsPrimary": "pri",
"indexingDeleteTime": "indexing.delete_time",
"searchQueryTotal": "search.query_total",
}

389
vendor/github.com/olivere/elastic/v7/cat_shards.go generated vendored Normal file
View file

@ -0,0 +1,389 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"context"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/olivere/elastic/v7/uritemplates"
)
// CatShardsService returns the list of shards plus some additional
// information about them.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.6/cat-shards.html
// for details.
type CatShardsService struct {
client *Client
pretty *bool // pretty format the returned JSON response
human *bool // return human readable values for statistics
errorTrace *bool // include the stack trace of returned errors
filterPath []string // list of filters used to reduce the response
index []string
bytes string // b, k, kb, m, mb, g, gb, t, tb, p, or pb
local *bool
masterTimeout string
columns []string
time string // d, h, m, s, ms, micros, or nanos
sort []string // list of columns for sort order
headers http.Header
}
// NewCatShardsService creates a new CatShardsService.
func NewCatShardsService(client *Client) *CatShardsService {
return &CatShardsService{
client: client,
}
}
// Pretty tells Elasticsearch whether to return a formatted JSON response.
func (s *CatShardsService) Pretty(pretty bool) *CatShardsService {
s.pretty = &pretty
return s
}
// Human specifies whether human readable values should be returned in
// the JSON response, e.g. "7.5mb".
func (s *CatShardsService) Human(human bool) *CatShardsService {
s.human = &human
return s
}
// ErrorTrace specifies whether to include the stack trace of returned errors.
func (s *CatShardsService) ErrorTrace(errorTrace bool) *CatShardsService {
s.errorTrace = &errorTrace
return s
}
// FilterPath specifies a list of filters used to reduce the response.
func (s *CatShardsService) FilterPath(filterPath ...string) *CatShardsService {
s.filterPath = filterPath
return s
}
// Header adds a header to the request.
func (s *CatShardsService) Header(name string, value string) *CatShardsService {
if s.headers == nil {
s.headers = http.Header{}
}
s.headers.Add(name, value)
return s
}
// Headers specifies the headers of the request.
func (s *CatShardsService) Headers(headers http.Header) *CatShardsService {
s.headers = headers
return s
}
// Index is the name of the index to list (by default all indices are returned).
func (s *CatShardsService) Index(index ...string) *CatShardsService {
s.index = index
return s
}
// Bytes represents the unit in which to display byte values.
// Valid values are: "b", "k", "kb", "m", "mb", "g", "gb", "t", "tb", "p" or "pb".
func (s *CatShardsService) Bytes(bytes string) *CatShardsService {
s.bytes = bytes
return s
}
// Local indicates to return local information, i.e. do not retrieve
// the state from master node (default: false).
func (s *CatShardsService) Local(local bool) *CatShardsService {
s.local = &local
return s
}
// MasterTimeout is the explicit operation timeout for connection to master node.
func (s *CatShardsService) MasterTimeout(masterTimeout string) *CatShardsService {
s.masterTimeout = masterTimeout
return s
}
// Columns to return in the response.
//
// To get a list of all possible columns to return, run the following command
// in your terminal:
//
// Example:
// curl 'http://localhost:9200/_cat/shards?help'
//
// You can use Columns("*") to return all possible columns. That might take
// a little longer than the default set of columns.
func (s *CatShardsService) Columns(columns ...string) *CatShardsService {
s.columns = columns
return s
}
// Sort is a list of fields to sort by.
func (s *CatShardsService) Sort(fields ...string) *CatShardsService {
s.sort = fields
return s
}
// Time specifies the way that time values are formatted with.
func (s *CatShardsService) Time(time string) *CatShardsService {
s.time = time
return s
}
// buildURL builds the URL for the operation.
func (s *CatShardsService) buildURL() (string, url.Values, error) {
// Build URL
var (
path string
err error
)
if len(s.index) > 0 {
path, err = uritemplates.Expand("/_cat/shards/{index}", map[string]string{
"index": strings.Join(s.index, ","),
})
} else {
path = "/_cat/shards"
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{
"format": []string{"json"}, // always returns as JSON
}
if v := s.pretty; v != nil {
params.Set("pretty", fmt.Sprint(*v))
}
if v := s.human; v != nil {
params.Set("human", fmt.Sprint(*v))
}
if v := s.errorTrace; v != nil {
params.Set("error_trace", fmt.Sprint(*v))
}
if len(s.filterPath) > 0 {
params.Set("filter_path", strings.Join(s.filterPath, ","))
}
if s.bytes != "" {
params.Set("bytes", s.bytes)
}
if s.time != "" {
params.Set("time", s.time)
}
if v := s.local; v != nil {
params.Set("local", fmt.Sprint(*v))
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
if len(s.columns) > 0 {
// loop through all columns and apply alias if needed
for i, column := range s.columns {
if fullValueRaw, isAliased := catShardsResponseRowAliasesMap[column]; isAliased {
// alias can be translated to multiple fields,
// so if translated value contains a comma, than replace the first value
// and append the others
if strings.Contains(fullValueRaw, ",") {
fullValues := strings.Split(fullValueRaw, ",")
s.columns[i] = fullValues[0]
s.columns = append(s.columns, fullValues[1:]...)
} else {
s.columns[i] = fullValueRaw
}
}
}
params.Set("h", strings.Join(s.columns, ","))
}
if len(s.sort) > 0 {
params.Set("s", strings.Join(s.sort, ","))
}
return path, params, nil
}
// Do executes the operation.
func (s *CatShardsService) Do(ctx context.Context) (CatShardsResponse, error) {
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
Method: "GET",
Path: path,
Params: params,
Headers: s.headers,
})
if err != nil {
return nil, err
}
// Return operation response
var ret CatShardsResponse
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Result of a get request.
// CatShardsResponse is the outcome of CatShardsService.Do.
type CatShardsResponse []CatShardsResponseRow
// CatShardsResponseRow specifies the data returned for one index
// of a CatShardsResponse. Notice that not all of these fields might
// be filled; that depends on the number of columns chose in the
// request (see CatShardsService.Columns).
type CatShardsResponseRow struct {
Index string `json:"index"` // index name
UUID string `json:"uuid"` // index uuid
Shard int `json:"shard,string"` // shard number, e.g. 1
Prirep string `json:"prirep"` // "r" for replica, "p" for primary
State string `json:"state"` // STARTED, INITIALIZING, RELOCATING, or UNASSIGNED
Docs int64 `json:"docs,string"` // number of documents, e.g. 142847
Store string `json:"store"` // size, e.g. "40mb"
IP string `json:"ip"` // IP address
ID string `json:"id"`
Node string `json:"node"` // Node name
SyncID string `json:"sync_id"`
UnassignedReason string `json:"unassigned.reason"`
UnassignedAt string `json:"unassigned.at"`
UnassignedFor string `json:"unassigned.for"`
UnassignedDetails string `json:"unassigned.details"`
RecoverysourceType string `json:"recoverysource.type"`
CompletionSize string `json:"completion.size"` // size of completion on primaries & replicas
FielddataMemorySize string `json:"fielddata.memory_size"` // used fielddata cache on primaries & replicas
FielddataEvictions int `json:"fielddata.evictions,string"` // fielddata evictions on primaries & replicas
QueryCacheMemorySize string `json:"query_cache.memory_size"` // used query cache on primaries & replicas
QueryCacheEvictions int `json:"query_cache.evictions,string"` // query cache evictions on primaries & replicas
FlushTotal int `json:"flush.total,string"` // number of flushes on primaries & replicas
FlushTotalTime string `json:"flush.total_time"` // time spent in flush on primaries & replicas
GetCurrent int `json:"get.current,string"` // number of current get ops on primaries & replicas
GetTime string `json:"get.time"` // time spent in get on primaries & replicas
GetTotal int `json:"get.total,string"` // number of get ops on primaries & replicas
GetExistsTime string `json:"get.exists_time"` // time spent in successful gets on primaries & replicas
GetExistsTotal int `json:"get.exists_total,string"` // number of successful gets on primaries & replicas
GetMissingTime string `json:"get.missing_time"` // time spent in failed gets on primaries & replicas
GetMissingTotal int `json:"get.missing_total,string"` // number of failed gets on primaries & replicas
IndexingDeleteCurrent int `json:"indexing.delete_current,string"` // number of current deletions on primaries & replicas
IndexingDeleteTime string `json:"indexing.delete_time"` // time spent in deletions on primaries & replicas
IndexingDeleteTotal int `json:"indexing.delete_total,string"` // number of delete ops on primaries & replicas
IndexingIndexCurrent int `json:"indexing.index_current,string"` // number of current indexing on primaries & replicas
IndexingIndexTime string `json:"indexing.index_time"` // time spent in indexing on primaries & replicas
IndexingIndexTotal int `json:"indexing.index_total,string"` // number of index ops on primaries & replicas
IndexingIndexFailed int `json:"indexing.index_failed,string"` // number of failed indexing ops on primaries & replicas
MergesCurrent int `json:"merges.current,string"` // number of current merges on primaries & replicas
MergesCurrentDocs int `json:"merges.current_docs,string"` // number of current merging docs on primaries & replicas
MergesCurrentSize string `json:"merges.current_size"` // size of current merges on primaries & replicas
MergesTotal int `json:"merges.total,string"` // number of completed merge ops on primaries & replicas
MergesTotalDocs int `json:"merges.total_docs,string"` // docs merged on primaries & replicas
MergesTotalSize string `json:"merges.total_size"` // size merged on primaries & replicas
MergesTotalTime string `json:"merges.total_time"` // time spent in merges on primaries & replicas
RefreshTotal int `json:"refresh.total,string"` // total refreshes on primaries & replicas
RefreshExternalTotal int `json:"refresh.external_total,string"` // total external refreshes on primaries & replicas
RefreshTime string `json:"refresh.time"` // time spent in refreshes on primaries & replicas
RefreshExternalTime string `json:"refresh.external_time"` // external time spent in refreshes on primaries & replicas
RefreshListeners int `json:"refresh.listeners,string"` // number of pending refresh listeners on primaries & replicas
SearchFetchCurrent int `json:"search.fetch_current,string"` // current fetch phase ops on primaries & replicas
SearchFetchTime string `json:"search.fetch_time"` // time spent in fetch phase on primaries & replicas
SearchFetchTotal int `json:"search.fetch_total,string"` // total fetch ops on primaries & replicas
SearchOpenContexts int `json:"search.open_contexts,string"` // open search contexts on primaries & replicas
SearchQueryCurrent int `json:"search.query_current,string"` // current query phase ops on primaries & replicas
SearchQueryTime string `json:"search.query_time"` // time spent in query phase on primaries & replicas, e.g. "0s"
SearchQueryTotal int `json:"search.query_total,string"` // total query phase ops on primaries & replicas
SearchScrollCurrent int `json:"search.scroll_current,string"` // open scroll contexts on primaries & replicas
SearchScrollTime string `json:"search.scroll_time"` // time scroll contexts held open on primaries & replicas, e.g. "0s"
SearchScrollTotal int `json:"search.scroll_total,string"` // completed scroll contexts on primaries & replicas
SearchThrottled bool `json:"search.throttled,string"` // indicates if the index is search throttled
SegmentsCount int `json:"segments.count,string"` // number of segments on primaries & replicas
SegmentsMemory string `json:"segments.memory"` // memory used by segments on primaries & replicas, e.g. "1.3kb"
SegmentsIndexWriterMemory string `json:"segments.index_writer_memory"` // memory used by index writer on primaries & replicas, e.g. "0b"
SegmentsVersionMapMemory string `json:"segments.version_map_memory"` // memory used by version map on primaries & replicas, e.g. "0b"
SegmentsFixedBitsetMemory string `json:"segments.fixed_bitset_memory"` // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries & replicas, e.g. "0b"
SeqNoMax int `json:"seq_no.max,string"`
SeqNoLocalCheckpoint int `json:"seq_no.local_checkpoint,string"`
SeqNoGlobalCheckpoint int `json:"seq_no.global_checkpoint,string"`
WarmerCurrent int `json:"warmer.current,string"` // current warmer ops on primaries & replicas
WarmerTotal int `json:"warmer.total,string"` // total warmer ops on primaries & replicas
WarmerTotalTime string `json:"warmer.total_time"` // time spent in warmers on primaries & replicas, e.g. "47s"
PathData string `json:"path.data"`
PathState string `json:"path.state"`
}
// catShardsResponseRowAliasesMap holds the global map for columns aliases
// the map is used by CatShardsService.buildURL.
// For backwards compatibility some fields are able to have the same aliases
// that means that one alias can be translated to different columns (from different elastic versions)
// example for understanding: rto -> RefreshTotal, RefreshExternalTotal
var catShardsResponseRowAliasesMap = map[string]string{
"sync_id": "sync_id",
"ur": "unassigned.reason",
"ua": "unassigned.at",
"uf": "unassigned.for",
"ud": "unassigned.details",
"rs": "recoverysource.type",
"cs": "completion.size",
"fm": "fielddata.memory_size",
"fe": "fielddata.evictions",
"qcm": "query_cache.memory_size",
"qce": "query_cache.evictions",
"ft": "flush.total",
"ftt": "flush.total_time",
"gc": "get.current",
"gti": "get.time",
"gto": "get.total",
"geti": "get.exists_time",
"geto": "get.exists_total",
"gmti": "get.missing_time",
"gmto": "get.missing_total",
"idc": "indexing.delete_current",
"idti": "indexing.delete_time",
"idto": "indexing.delete_total",
"iic": "indexing.index_current",
"iiti": "indexing.index_time",
"iito": "indexing.index_total",
"iif": "indexing.index_failed",
"mc": "merges.current",
"mcd": "merges.current_docs",
"mcs": "merges.current_size",
"mt": "merges.total",
"mtd": "merges.total_docs",
"mts": "merges.total_size",
"mtt": "merges.total_time",
"rto": "refresh.total",
"rti": "refresh.time",
// "rto": "refresh.external_total",
// "rti": "refresh.external_time",
"rli": "refresh.listeners",
"sfc": "search.fetch_current",
"sfti": "search.fetch_time",
"sfto": "search.fetch_total",
"so": "search.open_contexts",
"sqc": "search.query_current",
"sqti": "search.query_time",
"sqto": "search.query_total",
"scc": "search.scroll_current",
"scti": "search.scroll_time",
"scto": "search.scroll_total",
"sc": "segments.count",
"sm": "segments.memory",
"siwm": "segments.index_writer_memory",
"svmm": "segments.version_map_memory",
"sfbm": "segments.fixed_bitset_memory",
"sqm": "seq_no.max",
"sql": "seq_no.local_checkpoint",
"sqg": "seq_no.global_checkpoint",
"wc": "warmer.current",
"wto": "warmer.total",
"wtt": "warmer.total_time",
}

View file

@ -14,7 +14,6 @@ import (
"net/http/httputil"
"net/url"
"os"
"regexp"
"strings"
"sync"
"time"
@ -26,7 +25,7 @@ import (
const (
// Version is the current version of Elastic.
Version = "7.0.9"
Version = "7.0.20"
// DefaultURL is the default endpoint of Elasticsearch on the local machine.
// It is used e.g. when initializing a new Client without a specific URL.
@ -140,7 +139,6 @@ type Client struct {
snifferCallback SnifferCallback // callback to modify the sniffing decision
snifferStop chan bool // notify sniffer to stop, and notify back
decoder Decoder // used to decode data sent from Elasticsearch
basicAuth bool // indicates whether to send HTTP Basic Auth credentials
basicAuthUsername string // username for HTTP Basic Auth
basicAuthPassword string // password for HTTP Basic Auth
sendGetBodyAs string // override for when sending a GET with a body
@ -266,11 +264,10 @@ func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
c.urls = canonicalize(c.urls...)
// If the URLs have auth info, use them here as an alternative to SetBasicAuth
if !c.basicAuth {
if c.basicAuthUsername == "" && c.basicAuthPassword == "" {
for _, urlStr := range c.urls {
u, err := url.Parse(urlStr)
if err == nil && u.User != nil {
c.basicAuth = true
c.basicAuthUsername = u.User.Username()
c.basicAuthPassword, _ = u.User.Password()
break
@ -352,11 +349,10 @@ func DialContext(ctx context.Context, options ...ClientOptionFunc) (*Client, err
c.urls = canonicalize(c.urls...)
// If the URLs have auth info, use them here as an alternative to SetBasicAuth
if !c.basicAuth {
if c.basicAuthUsername == "" && c.basicAuthPassword == "" {
for _, urlStr := range c.urls {
u, err := url.Parse(urlStr)
if err == nil && u.User != nil {
c.basicAuth = true
c.basicAuthUsername = u.User.Username()
c.basicAuthPassword, _ = u.User.Password()
break
@ -465,11 +461,9 @@ func configToOptions(cfg *config.Config) ([]ClientOptionFunc, error) {
if cfg.Sniff != nil {
options = append(options, SetSniff(*cfg.Sniff))
}
/*
if cfg.Healthcheck != nil {
options = append(options, SetHealthcheck(*cfg.Healthcheck))
}
*/
if cfg.Healthcheck != nil {
options = append(options, SetHealthcheck(*cfg.Healthcheck))
}
}
return options, nil
}
@ -493,7 +487,6 @@ func SetBasicAuth(username, password string) ClientOptionFunc {
return func(c *Client) error {
c.basicAuthUsername = username
c.basicAuthPassword = password
c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != ""
return nil
}
}
@ -509,6 +502,12 @@ func SetURL(urls ...string) ClientOptionFunc {
default:
c.urls = urls
}
// Check URLs
for _, urlStr := range c.urls {
if _, err := url.Parse(urlStr); err != nil {
return err
}
}
return nil
}
}
@ -817,8 +816,6 @@ func (c *Client) Stop() {
c.infof("elastic: client stopped")
}
var logDeprecation = func(*http.Request, *http.Response) {}
// errorf logs to the error log.
func (c *Client) errorf(format string, args ...interface{}) {
if c.errorlog != nil {
@ -967,7 +964,7 @@ func (c *Client) sniffNode(ctx context.Context, url string) []*conn {
}
c.mu.RLock()
if c.basicAuth {
if c.basicAuthUsername != "" || c.basicAuthPassword != "" {
req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword)
}
c.mu.RUnlock()
@ -996,25 +993,24 @@ func (c *Client) sniffNode(ctx context.Context, url string) []*conn {
return nodes
}
// reSniffHostAndPort is used to extract hostname and port from a result
// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
// extractHostname returns the URL from the http.publish_address setting.
func (c *Client) extractHostname(scheme, address string) string {
if strings.HasPrefix(address, "inet") {
m := reSniffHostAndPort.FindStringSubmatch(address)
if len(m) == 3 {
return fmt.Sprintf("%s://%s:%s", scheme, m[1], m[2])
}
var (
host string
port string
addrs = strings.Split(address, "/")
ports = strings.Split(address, ":")
)
if len(addrs) > 1 {
host = addrs[0]
} else {
host = strings.Split(addrs[0], ":")[0]
}
s := address
if idx := strings.Index(s, "/"); idx >= 0 {
s = s[idx+1:]
}
if !strings.Contains(s, ":") {
return ""
}
return fmt.Sprintf("%s://%s", scheme, s)
port = ports[len(ports)-1]
return fmt.Sprintf("%s://%s:%s", scheme, host, port)
}
// updateConns updates the clients' connections with new information
@ -1082,7 +1078,8 @@ func (c *Client) healthcheck(parentCtx context.Context, timeout time.Duration, f
c.mu.RUnlock()
return
}
basicAuth := c.basicAuth
headers := c.headers
basicAuth := c.basicAuthUsername != "" || c.basicAuthPassword != ""
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
c.mu.RUnlock()
@ -1108,6 +1105,13 @@ func (c *Client) healthcheck(parentCtx context.Context, timeout time.Duration, f
if basicAuth {
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
}
if len(headers) > 0 {
for key, values := range headers {
for _, v := range values {
req.Header.Add(key, v)
}
}
}
res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
if res != nil {
status = res.StatusCode
@ -1144,7 +1148,8 @@ func (c *Client) healthcheck(parentCtx context.Context, timeout time.Duration, f
func (c *Client) startupHealthcheck(parentCtx context.Context, timeout time.Duration) error {
c.mu.Lock()
urls := c.urls
basicAuth := c.basicAuth
headers := c.headers
basicAuth := c.basicAuthUsername != "" || c.basicAuthPassword != ""
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
c.mu.Unlock()
@ -1162,14 +1167,23 @@ func (c *Client) startupHealthcheck(parentCtx context.Context, timeout time.Dura
if basicAuth {
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
}
if len(headers) > 0 {
for key, values := range headers {
for _, v := range values {
req.Header.Add(key, v)
}
}
}
ctx, cancel := context.WithTimeout(parentCtx, timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := c.c.Do(req)
if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
return nil
} else if err != nil {
if err != nil {
lastErr = err
} else if res.StatusCode >= 200 && res.StatusCode < 300 {
return nil
} else if res.StatusCode == http.StatusUnauthorized {
lastErr = &Error{Status: res.StatusCode}
}
}
select {
@ -1183,7 +1197,7 @@ func (c *Client) startupHealthcheck(parentCtx context.Context, timeout time.Dura
}
}
if lastErr != nil {
if IsContextErr(lastErr) {
if IsContextErr(lastErr) || IsUnauthorized(lastErr) {
return lastErr
}
return errors.Wrapf(ErrNoClient, "health check timeout: %v", lastErr)
@ -1270,11 +1284,12 @@ func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions)
c.mu.RLock()
timeout := c.healthcheckTimeout
basicAuth := c.basicAuth
basicAuth := c.basicAuthUsername != "" || c.basicAuthPassword != ""
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
sendGetBodyAs := c.sendGetBodyAs
gzipEnabled := c.gzipEnabled
healthcheckEnabled := c.healthcheckEnabled
retrier := c.retrier
if opt.Retrier != nil {
retrier = opt.Retrier
@ -1307,6 +1322,10 @@ func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions)
if !retried {
// Force a healtcheck as all connections seem to be dead.
c.healthcheck(ctx, timeout, false)
if healthcheckEnabled {
retried = true
continue
}
}
wait, ok, rerr := retrier.Retry(ctx, n, nil, nil, err)
if rerr != nil {
@ -1664,6 +1683,11 @@ func (c *Client) SyncedFlush(indices ...string) *IndicesSyncedFlushService {
return NewIndicesSyncedFlushService(c).Index(indices...)
}
// ClearCache clears caches for one or more indices.
func (c *Client) ClearCache(indices ...string) *IndicesClearCacheService {
return NewIndicesClearCacheService(c).Index(indices...)
}
// Alias enables the caller to add and/or remove aliases.
func (c *Client) Alias() *AliasService {
return NewAliasService(c)
@ -1750,6 +1774,11 @@ func (c *Client) CatIndices() *CatIndicesService {
return NewCatIndicesService(c)
}
// CatShards returns information about shards.
func (c *Client) CatShards() *CatShardsService {
return NewCatShardsService(c)
}
// -- Ingest APIs --
// IngestPutPipeline adds pipelines and updates existing pipelines in
@ -1830,10 +1859,10 @@ func (c *Client) TasksGetTask() *TasksGetTaskService {
// -- Snapshot and Restore --
// TODO Snapshot Delete
// TODO Snapshot Get
// TODO Snapshot Restore
// TODO Snapshot Status
// SnapshotStatus returns information about the status of a snapshot.
func (c *Client) SnapshotStatus() *SnapshotStatusService {
return NewSnapshotStatusService(c)
}
// SnapshotCreate creates a snapshot.
func (c *Client) SnapshotCreate(repository string, snapshot string) *SnapshotCreateService {

View file

@ -262,7 +262,7 @@ type clusterBlock struct {
type clusterStateMetadata struct {
ClusterUUID string `json:"cluster_uuid"`
ClusterUUIDCommitted string `json:"cluster_uuid_committed"`
ClusterUUIDCommitted bool `json:"cluster_uuid_committed"`
ClusterCoordination *clusterCoordinationMetaData `json:"cluster_coordination"`
Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata
Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data

View file

@ -35,6 +35,7 @@ type CountService struct {
df string
expandWildcards string
ignoreUnavailable *bool
ignoreThrottled *bool
lenient *bool
lowercaseExpandedTerms *bool
minScore interface{}
@ -163,6 +164,13 @@ func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService {
return s
}
// IgnoreThrottled indicates whether specified concrete, expanded or aliased
// indices should be ignored when throttled.
func (s *CountService) IgnoreThrottled(ignoreThrottled bool) *CountService {
s.ignoreThrottled = &ignoreThrottled
return s
}
// Lenient specifies whether format-based query failures (such as
// providing text to a numeric field) should be ignored.
func (s *CountService) Lenient(lenient bool) *CountService {
@ -291,6 +299,9 @@ func (s *CountService) buildURL() (string, url.Values, error) {
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.ignoreThrottled != nil {
params.Set("ignore_throttled", fmt.Sprintf("%v", *s.ignoreThrottled))
}
if s.lenient != nil {
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
}

View file

@ -2,12 +2,13 @@ version: '3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.4.2
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.9.0
hostname: elasticsearch
environment:
- cluster.name=elasticsearch
- bootstrap.memory_lock=true
- discovery.type=single-node
# - http.publish_host=localhost
# - http.host=0.0.0.0
# - transport.host=127.0.0.1
# - network.host=_local_
@ -27,16 +28,16 @@ services:
ports:
- 9200:9200
platinum:
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.0
hostname: elasticsearch-platinum
environment:
- cluster.name=platinum
- bootstrap.memory_lock=true
- discovery.type=single-node
- xpack.ilm.enabled=true
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.watcher.enabled=true
# - http.publish_host=localhost
# - http.host=0.0.0.0
# - transport.host=127.0.0.1
# - network.host=_local_

View file

@ -82,6 +82,21 @@ type ErrorDetails struct {
CausedBy map[string]interface{} `json:"caused_by,omitempty"`
RootCause []*ErrorDetails `json:"root_cause,omitempty"`
FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
// ScriptException adds the information in the following block.
ScriptStack []string `json:"script_stack,omitempty"` // from ScriptException
Script string `json:"script,omitempty"` // from ScriptException
Lang string `json:"lang,omitempty"` // from ScriptException
Position *ScriptErrorPosition `json:"position,omitempty"` // from ScriptException (7.7+)
}
// ScriptErrorPosition specifies the position of the error
// in a script. It is used in ErrorDetails for scripting errors.
type ScriptErrorPosition struct {
Offset int `json:"offset"`
Start int `json:"start"`
End int `json:"end"`
}
// Error returns a string representation of the error.
@ -92,6 +107,20 @@ func (e *Error) Error() string {
return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
}
// ErrorReason returns the reason of an error that Elasticsearch reported,
// if err is of kind Error and has ErrorDetails with a Reason. Any other
// value of err will return an empty string.
func ErrorReason(err error) string {
if err == nil {
return ""
}
e, ok := err.(*Error)
if !ok || e == nil || e.Details == nil {
return ""
}
return e.Details.Reason
}
// IsContextErr returns true if the error is from a context that was canceled or deadline exceeded
func IsContextErr(err error) bool {
if err == context.Canceled || err == context.DeadlineExceeded {
@ -137,6 +166,15 @@ func IsConflict(err interface{}) bool {
return IsStatusCode(err, http.StatusConflict)
}
// IsUnauthorized returns true if the given error indicates that
// Elasticsearch returned HTTP status 401. This happens e.g. when the
// cluster is configured to require HTTP Basic Auth.
// The err parameter can be of type *elastic.Error, elastic.Error,
// *http.Response or int (indicating the HTTP status code).
func IsUnauthorized(err interface{}) bool {
return IsStatusCode(err, http.StatusUnauthorized)
}
// IsForbidden returns true if the given error indicates that Elasticsearch
// returned HTTP status 403. This happens e.g. due to a missing license.
// The err parameter can be of type *elastic.Error, elastic.Error,

View file

@ -1,18 +1,16 @@
module github.com/olivere/elastic/v7
go 1.12
go 1.14
require (
github.com/aws/aws-sdk-go v1.25.25
github.com/aws/aws-sdk-go v1.34.13
github.com/fortytw2/leaktest v1.3.0
github.com/golang/mock v1.2.0 // indirect
github.com/google/go-cmp v0.3.1
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.8.1
github.com/google/go-cmp v0.5.2
github.com/mailru/easyjson v0.7.6
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/smartystreets/assertions v1.1.1 // indirect
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9
go.opencensus.io v0.22.1
google.golang.org/api v0.3.1 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/yaml.v2 v2.2.2 // indirect
github.com/smartystreets/gunit v1.4.2 // indirect
go.opencensus.io v0.22.4
)

View file

@ -0,0 +1,240 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"context"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/olivere/elastic/v7/uritemplates"
)
// IndicesClearCacheService allows to clear either all caches or specific cached associated
// with one or more indices.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.6/indices-clearcache.html
// for details.
type IndicesClearCacheService struct {
client *Client
pretty *bool // pretty format the returned JSON response
human *bool // return human readable values for statistics
errorTrace *bool // include the stack trace of returned errors
filterPath []string // list of filters used to reduce the response
headers http.Header // custom request-level HTTP headers
index []string
ignoreUnavailable *bool
allowNoIndices *bool
expandWildcards string
fieldData *bool
fields string
query *bool
request *bool
}
// NewIndicesClearCacheService initializes a new instance of
// IndicesClearCacheService.
func NewIndicesClearCacheService(client *Client) *IndicesClearCacheService {
return &IndicesClearCacheService{client: client}
}
// Pretty tells Elasticsearch whether to return a formatted JSON response.
func (s *IndicesClearCacheService) Pretty(pretty bool) *IndicesClearCacheService {
s.pretty = &pretty
return s
}
// Human specifies whether human readable values should be returned in
// the JSON response, e.g. "7.5mb".
func (s *IndicesClearCacheService) Human(human bool) *IndicesClearCacheService {
s.human = &human
return s
}
// ErrorTrace specifies whether to include the stack trace of returned errors.
func (s *IndicesClearCacheService) ErrorTrace(errorTrace bool) *IndicesClearCacheService {
s.errorTrace = &errorTrace
return s
}
// FilterPath specifies a list of filters used to reduce the response.
func (s *IndicesClearCacheService) FilterPath(filterPath ...string) *IndicesClearCacheService {
s.filterPath = filterPath
return s
}
// Header adds a header to the request.
func (s *IndicesClearCacheService) Header(name string, value string) *IndicesClearCacheService {
if s.headers == nil {
s.headers = http.Header{}
}
s.headers.Add(name, value)
return s
}
// Headers specifies the headers of the request.
func (s *IndicesClearCacheService) Headers(headers http.Header) *IndicesClearCacheService {
s.headers = headers
return s
}
// Index is the comma-separated list or wildcard expression of index names used to clear cache.
func (s *IndicesClearCacheService) Index(indices ...string) *IndicesClearCacheService {
s.index = append(s.index, indices...)
return s
}
// IgnoreUnavailable indicates whether specified concrete indices should be
// ignored when unavailable (missing or closed).
func (s *IndicesClearCacheService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesClearCacheService {
s.ignoreUnavailable = &ignoreUnavailable
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices. (This includes `_all` string or when no indices
// have been specified).
func (s *IndicesClearCacheService) AllowNoIndices(allowNoIndices bool) *IndicesClearCacheService {
s.allowNoIndices = &allowNoIndices
return s
}
// ExpandWildcards indicates whether to expand wildcard expression to
// concrete indices that are open, closed or both.
func (s *IndicesClearCacheService) ExpandWildcards(expandWildcards string) *IndicesClearCacheService {
s.expandWildcards = expandWildcards
return s
}
// FieldData indicates whether to clear the fields cache.
// Use the fields parameter to clear the cache of specific fields only.
func (s *IndicesClearCacheService) FieldData(fieldData bool) *IndicesClearCacheService {
s.fieldData = &fieldData
return s
}
// Fields indicates comma-separated list of field names used to limit the fielddata parameter.
// Defaults to all fields.
func (s *IndicesClearCacheService) Fields(fields string) *IndicesClearCacheService {
s.fields = fields
return s
}
// Query indicates whether to clear only query cache.
func (s *IndicesClearCacheService) Query(queryCache bool) *IndicesClearCacheService {
s.query = &queryCache
return s
}
// Request indicates whether to clear only request cache.
func (s *IndicesClearCacheService) Request(requestCache bool) *IndicesClearCacheService {
s.request = &requestCache
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesClearCacheService) buildURL() (string, url.Values, error) {
// Build URL
var path string
var err error
if len(s.index) > 0 {
path, err = uritemplates.Expand("/{index}/_cache/clear", map[string]string{
"index": strings.Join(s.index, ","),
})
} else {
path = "/_cache/clear"
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if v := s.pretty; v != nil {
params.Set("pretty", fmt.Sprint(*v))
}
if v := s.human; v != nil {
params.Set("human", fmt.Sprint(*v))
}
if v := s.errorTrace; v != nil {
params.Set("error_trace", fmt.Sprint(*v))
}
if len(s.filterPath) > 0 {
params.Set("filter_path", strings.Join(s.filterPath, ","))
}
if v := s.allowNoIndices; v != nil {
params.Set("allow_no_indices", fmt.Sprint(*v))
}
if v := s.expandWildcards; v != "" {
params.Set("expand_wildcards", v)
}
if v := s.ignoreUnavailable; v != nil {
params.Set("ignore_unavailable", fmt.Sprint(*v))
}
if len(s.index) > 0 {
params.Set("index", fmt.Sprintf("%v", s.index))
}
if v := s.ignoreUnavailable; v != nil {
params.Set("fielddata", fmt.Sprint(*v))
}
if len(s.fields) > 0 {
params.Set("fields", fmt.Sprintf("%v", s.fields))
}
if v := s.query; v != nil {
params.Set("query", fmt.Sprint(*v))
}
if s.request != nil {
params.Set("request", fmt.Sprintf("%v", *s.request))
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesClearCacheService) Validate() error {
return nil
}
// Do executes the operation.
func (s *IndicesClearCacheService) Do(ctx context.Context) (*IndicesClearCacheResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
Method: "POST",
Path: path,
Params: params,
Headers: s.headers,
})
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndicesClearCacheResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// IndicesClearCacheResponse is the response of IndicesClearCacheService.Do.
type IndicesClearCacheResponse struct {
Shards *ShardsInfo `json:"_shards"`
}

View file

@ -159,6 +159,9 @@ func (s *IndicesSyncedFlushService) Validate() error {
}
// Do executes the service.
//
// Deprecated: Synced flush is deprecated and will be removed in 8.0.
// Use flush at _/flush or /{index}/_flush instead.
func (s *IndicesSyncedFlushService) Do(ctx context.Context) (*IndicesSyncedFlushResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {

View file

@ -291,7 +291,7 @@ func (item *MultiGetItem) Source() (interface{}, error) {
source["_source"] = src
}
if item.routing != "" {
source["_routing"] = item.routing
source["routing"] = item.routing
}
if len(item.storedFields) > 0 {
source["stored_fields"] = strings.Join(item.storedFields, ",")

View file

@ -120,7 +120,7 @@ func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
// server, and an error.
func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) {
s.client.mu.RLock()
basicAuth := s.client.basicAuth
basicAuth := s.client.basicAuthUsername != "" || s.client.basicAuthPassword != ""
basicAuthUsername := s.client.basicAuthUsername
basicAuthPassword := s.client.basicAuthPassword
defaultHeaders := s.client.headers

View file

@ -365,7 +365,7 @@ func (s *ReindexService) DoAsync(ctx context.Context) (*StartTaskResult, error)
return nil, err
}
// DoAsync only makes sense with WaitForCompletion set to true
// DoAsync only makes sense with WaitForCompletion set to false
if s.waitForCompletion != nil && *s.waitForCompletion {
return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true")
}
@ -623,13 +623,13 @@ func (ri *ReindexRemoteInfo) Source() (interface{}, error) {
return res, nil
}
// -source Destination of Reindex --
// -- Destination of Reindex --
// ReindexDestination is the destination of a Reindex API call.
// It is basically the meta data of a BulkIndexRequest.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-reindex.html
// fsourcer details.
// for details.
type ReindexDestination struct {
index string
typ string

View file

@ -1,3 +0,0 @@
#!/bin/sh
VERSION=${VERSION:=6.4.0}
docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:$VERSION elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_

View file

@ -1,2 +0,0 @@
#!/bin/sh
go test . ./aws/... ./config/... ./trace/... ./uritemplates/...

View file

@ -41,6 +41,7 @@ type ScrollService struct {
routing string
preference string
ignoreUnavailable *bool
ignoreThrottled *bool
allowNoIndices *bool
expandWildcards string
maxResponseSize int64
@ -116,6 +117,9 @@ func (s *ScrollService) Index(indices ...string) *ScrollService {
}
// Type sets the name of one or more types to iterate over.
//
// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to
// filter on a field on the document.
func (s *ScrollService) Type(types ...string) *ScrollService {
if s.types == nil {
s.types = make([]string, 0)
@ -269,6 +273,13 @@ func (s *ScrollService) IgnoreUnavailable(ignoreUnavailable bool) *ScrollService
return s
}
// IgnoreThrottled indicates whether specified concrete, expanded or aliased
// indices should be ignored when throttled.
func (s *ScrollService) IgnoreThrottled(ignoreThrottled bool) *ScrollService {
s.ignoreThrottled = &ignoreThrottled
return s
}
// AllowNoIndices indicates whether to ignore if a wildcard indices
// expression resolves into no concrete indices. (This includes `_all` string
// or when no indices have been specified).
@ -291,6 +302,28 @@ func (s *ScrollService) MaxResponseSize(maxResponseSize int64) *ScrollService {
return s
}
// NoStoredFields indicates that no stored fields should be loaded, resulting in only
// id and type to be returned per field.
func (s *ScrollService) NoStoredFields() *ScrollService {
s.ss = s.ss.NoStoredFields()
return s
}
// StoredField adds a single field to load and return (note, must be stored) as
// part of the search request. If none are specified, the source of the
// document will be returned.
func (s *ScrollService) StoredField(fieldName string) *ScrollService {
s.ss = s.ss.StoredField(fieldName)
return s
}
// StoredFields sets the fields to load and return as part of the search request.
// If none are specified, the source of the document will be returned.
func (s *ScrollService) StoredFields(fields ...string) *ScrollService {
s.ss = s.ss.StoredFields(fields...)
return s
}
// ScrollId specifies the identifier of a scroll in action.
func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
s.mu.Lock()
@ -471,6 +504,9 @@ func (s *ScrollService) buildFirstURL() (string, url.Values, error) {
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
}
if s.ignoreThrottled != nil {
params.Set("ignore_throttled", fmt.Sprintf("%v", *s.ignoreThrottled))
}
return path, params, nil
}
@ -588,3 +624,31 @@ func (s *ScrollService) bodyNext() (interface{}, error) {
s.mu.RUnlock()
return body, nil
}
// DocvalueField adds a single field to load from the field data cache
// and return as part of the search.
func (s *ScrollService) DocvalueField(docvalueField string) *ScrollService {
s.ss = s.ss.DocvalueField(docvalueField)
return s
}
// DocvalueFieldWithFormat adds a single field to load from the field data cache
// and return as part of the search.
func (s *ScrollService) DocvalueFieldWithFormat(docvalueField DocvalueField) *ScrollService {
s.ss = s.ss.DocvalueFieldWithFormat(docvalueField)
return s
}
// DocvalueFields adds one or more fields to load from the field data cache
// and return as part of the search.
func (s *ScrollService) DocvalueFields(docvalueFields ...string) *ScrollService {
s.ss = s.ss.DocvalueFields(docvalueFields...)
return s
}
// DocvalueFieldsWithFormat adds one or more fields to load from the field data cache
// and return as part of the search.
func (s *ScrollService) DocvalueFieldsWithFormat(docvalueFields ...DocvalueField) *ScrollService {
s.ss = s.ss.DocvalueFieldsWithFormat(docvalueFields...)
return s
}

View file

@ -375,6 +375,19 @@ func (s *SearchService) SearchAfter(sortValues ...interface{}) *SearchService {
return s
}
// DefaultRescoreWindowSize sets the rescore window size for rescores
// that don't specify their window.
func (s *SearchService) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchService {
s.searchSource = s.searchSource.DefaultRescoreWindowSize(defaultRescoreWindowSize)
return s
}
// Rescorer adds a rescorer to the search.
func (s *SearchService) Rescorer(rescore *Rescore) *SearchService {
s.searchSource = s.searchSource.Rescorer(rescore)
return s
}
// IgnoreUnavailable indicates whether the specified concrete indices
// should be ignored when unavailable (missing or closed).
func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService {
@ -628,20 +641,20 @@ func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) {
// SearchResult is the result of a search in Elasticsearch.
type SearchResult struct {
Header http.Header `json:"-"`
TookInMillis int64 `json:"took,omitempty"` // search time in milliseconds
TerminatedEarly bool `json:"terminated_early,omitempty"` // request terminated early
NumReducePhases int `json:"num_reduce_phases,omitempty"`
Clusters []*SearchResultCluster `json:"_clusters,omitempty"` // 6.1.0+
ScrollId string `json:"_scroll_id,omitempty"` // only used with Scroll and Scan operations
Hits *SearchHits `json:"hits,omitempty"` // the actual search hits
Suggest SearchSuggest `json:"suggest,omitempty"` // results from suggesters
Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations
TimedOut bool `json:"timed_out,omitempty"` // true if the search timed out
Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
Profile *SearchProfile `json:"profile,omitempty"` // profiling results, if optional Profile API was active for this search
Shards *ShardsInfo `json:"_shards,omitempty"` // shard information
Status int `json:"status,omitempty"` // used in MultiSearch
Header http.Header `json:"-"`
TookInMillis int64 `json:"took,omitempty"` // search time in milliseconds
TerminatedEarly bool `json:"terminated_early,omitempty"` // request terminated early
NumReducePhases int `json:"num_reduce_phases,omitempty"`
Clusters *SearchResultCluster `json:"_clusters,omitempty"` // 6.1.0+
ScrollId string `json:"_scroll_id,omitempty"` // only used with Scroll and Scan operations
Hits *SearchHits `json:"hits,omitempty"` // the actual search hits
Suggest SearchSuggest `json:"suggest,omitempty"` // results from suggesters
Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations
TimedOut bool `json:"timed_out,omitempty"` // true if the search timed out
Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
Profile *SearchProfile `json:"profile,omitempty"` // profiling results, if optional Profile API was active for this search
Shards *ShardsInfo `json:"_shards,omitempty"` // shard information
Status int `json:"status,omitempty"` // used in MultiSearch
}
// SearchResultCluster holds information about a search response
@ -704,6 +717,30 @@ type TotalHits struct {
Relation string `json:"relation"` // how the value should be interpreted: accurate ("eq") or a lower bound ("gte")
}
// UnmarshalJSON into TotalHits, accepting both the new response structure
// in ES 7.x as well as the older response structure in earlier versions.
// The latter can be enabled with RestTotalHitsAsInt(true).
func (h *TotalHits) UnmarshalJSON(data []byte) error {
if data == nil || string(data) == "null" {
return nil
}
var v struct {
Value int64 `json:"value"` // value of the total hit count
Relation string `json:"relation"` // how the value should be interpreted: accurate ("eq") or a lower bound ("gte")
}
if err := json.Unmarshal(data, &v); err != nil {
var count int64
if err2 := json.Unmarshal(data, &count); err2 != nil {
return err // return inner error
}
h.Value = count
h.Relation = "eq"
return nil
}
*h = v
return nil
}
// SearchHit is a single hit.
type SearchHit struct {
Score *float64 `json:"_score,omitempty"` // computed score

View file

@ -98,6 +98,22 @@ func (a Aggregations) WeightedAvg(name string) (*AggregationValueMetric, bool) {
return nil, false
}
// MedianAbsoluteDeviation returns median absolute deviation aggregation results.
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.6/search-aggregations-metrics-median-absolute-deviation-aggregation.html
// for details.
func (a Aggregations) MedianAbsoluteDeviation(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
if raw == nil {
return agg, true
}
if err := json.Unmarshal(raw, agg); err == nil {
return agg, true
}
}
return nil, false
}
// ValueCount returns value-count aggregation results.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-valuecount-aggregation.html
func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
@ -368,6 +384,21 @@ func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignifica
return nil, false
}
// RareTerms returns rate terms aggregation results.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-rare-terms-aggregation.html
func (a Aggregations) RareTerms(name string) (*AggregationBucketKeyItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketKeyItems)
if raw == nil {
return agg, true
}
if err := json.Unmarshal(raw, agg); err == nil {
return agg, true
}
}
return nil, false
}
// Sampler returns sampler aggregation results.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-sampler-aggregation.html
func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) {
@ -1406,9 +1437,8 @@ func (a *AggregationBucketAdjacencyMatrix) UnmarshalJSON(data []byte) error {
type AggregationBucketHistogramItems struct {
Aggregations
Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
Interval interface{} // `json:"interval"` // can be numeric or a string
Meta map[string]interface{} // `json:"meta,omitempty"`
Buckets []*AggregationBucketHistogramItem // `json:"buckets"`
Meta map[string]interface{} // `json:"meta,omitempty"`
}
// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
@ -1420,9 +1450,6 @@ func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
if v, ok := aggs["buckets"]; ok && v != nil {
json.Unmarshal(v, &a.Buckets)
}
if v, ok := aggs["interval"]; ok && v != nil {
json.Unmarshal(v, &a.Interval)
}
if v, ok := aggs["meta"]; ok && v != nil {
json.Unmarshal(v, &a.Meta)
}

View file

@ -92,9 +92,8 @@ func (a *DateHistogramAggregation) FixedInterval(fixedInterval string) *DateHist
// CalendarInterval by which the aggregation gets processed.
//
// Allowed values are: "year", "1y", "quarter", "1q", "month", "1M",
// "week", "1w", "day", "1d", "hour", "1h", "minute", "1m", "second",
// or "1s". It also supports time settings like "1.5h".
// Allowed values are: "year" ("1y", "y"), "quarter" ("1q", "q"),
// "month" ("1M", "M"), "week" ("1w", "w"), "day" ("d", "1d")
//
// These units are calendar-aware, meaning they respect leap
// additions, variable days per month etc. This is mutually

View file

@ -19,6 +19,8 @@ import "errors"
type FiltersAggregation struct {
unnamedFilters []Query
namedFilters map[string]Query
otherBucket *bool
otherBucketKey string
subAggregations map[string]Aggregation
meta map[string]interface{}
}
@ -55,6 +57,20 @@ func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersA
return a
}
// OtherBucket indicates whether to include a bucket for documents not
// matching any filter.
func (a *FiltersAggregation) OtherBucket(otherBucket bool) *FiltersAggregation {
a.otherBucket = &otherBucket
return a
}
// OtherBucketKey sets the key to use for the bucket for documents not
// matching any filter.
func (a *FiltersAggregation) OtherBucketKey(key string) *FiltersAggregation {
a.otherBucketKey = key
return a
}
// SubAggregation adds a sub-aggregation to this aggregation.
func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation {
a.subAggregations[name] = subAggregation
@ -116,6 +132,13 @@ func (a *FiltersAggregation) Source() (interface{}, error) {
filters["filters"] = dict
}
if v := a.otherBucket; v != nil {
filters["other_bucket"] = *v
}
if v := a.otherBucketKey; v != "" {
filters["other_bucket_key"] = v
}
// AggregationBuilder (SubAggregations)
if len(a.subAggregations) > 0 {
aggsMap := make(map[string]interface{})

View file

@ -0,0 +1,156 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// RareTermsAggregation is a multi-bucket value source based aggregation
// which finds "rare" termsterms that are at the long-tail of the distribution
// and are not frequent. Conceptually, this is like a terms aggregation that
// is sorted by _count ascending.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-rare-terms-aggregation.html
// for details.
type RareTermsAggregation struct {
field string
subAggregations map[string]Aggregation
meta map[string]interface{}
includeExclude *TermsAggregationIncludeExclude
maxDocCount *int
precision *float64
missing interface{}
}
func NewRareTermsAggregation() *RareTermsAggregation {
return &RareTermsAggregation{
subAggregations: make(map[string]Aggregation),
}
}
func (a *RareTermsAggregation) Field(field string) *RareTermsAggregation {
a.field = field
return a
}
func (a *RareTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *RareTermsAggregation {
a.subAggregations[name] = subAggregation
return a
}
// Meta sets the meta data to be included in the aggregation response.
func (a *RareTermsAggregation) Meta(metaData map[string]interface{}) *RareTermsAggregation {
a.meta = metaData
return a
}
func (a *RareTermsAggregation) MaxDocCount(maxDocCount int) *RareTermsAggregation {
a.maxDocCount = &maxDocCount
return a
}
func (a *RareTermsAggregation) Precision(precision float64) *RareTermsAggregation {
a.precision = &precision
return a
}
func (a *RareTermsAggregation) Missing(missing interface{}) *RareTermsAggregation {
a.missing = missing
return a
}
func (a *RareTermsAggregation) Include(regexp string) *RareTermsAggregation {
if a.includeExclude == nil {
a.includeExclude = &TermsAggregationIncludeExclude{}
}
a.includeExclude.Include = regexp
return a
}
func (a *RareTermsAggregation) IncludeValues(values ...interface{}) *RareTermsAggregation {
if a.includeExclude == nil {
a.includeExclude = &TermsAggregationIncludeExclude{}
}
a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...)
return a
}
func (a *RareTermsAggregation) Exclude(regexp string) *RareTermsAggregation {
if a.includeExclude == nil {
a.includeExclude = &TermsAggregationIncludeExclude{}
}
a.includeExclude.Exclude = regexp
return a
}
func (a *RareTermsAggregation) ExcludeValues(values ...interface{}) *RareTermsAggregation {
if a.includeExclude == nil {
a.includeExclude = &TermsAggregationIncludeExclude{}
}
a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...)
return a
}
func (a *RareTermsAggregation) IncludeExclude(includeExclude *TermsAggregationIncludeExclude) *RareTermsAggregation {
a.includeExclude = includeExclude
return a
}
func (a *RareTermsAggregation) Source() (interface{}, error) {
// Example:
// {
// "aggregations" : {
// "genres" : {
// "rare_terms" : { "field" : "genre" }
// }
// }
// }
//
// This method returns only the
// "rare_terms" : { "field" : "genre" }
// part.
source := make(map[string]interface{})
opts := make(map[string]interface{})
source["rare_terms"] = opts
if a.field != "" {
opts["field"] = a.field
}
if a.maxDocCount != nil {
opts["max_doc_count"] = *a.maxDocCount
}
if a.precision != nil {
opts["precision"] = *a.precision
}
if a.missing != nil {
opts["missing"] = a.missing
}
// Include/Exclude
if ie := a.includeExclude; ie != nil {
if err := ie.MergeInto(opts); err != nil {
return nil, err
}
}
// AggregationBuilder (SubAggregations)
if len(a.subAggregations) > 0 {
aggsMap := make(map[string]interface{})
source["aggregations"] = aggsMap
for name, aggregate := range a.subAggregations {
src, err := aggregate.Source()
if err != nil {
return nil, err
}
aggsMap[name] = src
}
}
// Add Meta data if available
if len(a.meta) > 0 {
source["meta"] = a.meta
}
return source, nil
}

View file

@ -0,0 +1,119 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// MedianAbsoluteDeviationAggregation is a measure of variability.
// It is a robust statistic, meaning that it is useful for describing data
// that may have outliers, or may not be normally distributed.
// For such data it can be more descriptive than standard deviation.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.6/search-aggregations-metrics-median-absolute-deviation-aggregation.html
// for details.
type MedianAbsoluteDeviationAggregation struct {
field string
compression *float64
script *Script
format string
missing interface{}
subAggregations map[string]Aggregation
meta map[string]interface{}
}
func NewMedianAbsoluteDeviationAggregation() *MedianAbsoluteDeviationAggregation {
return &MedianAbsoluteDeviationAggregation{
subAggregations: make(map[string]Aggregation),
}
}
func (a *MedianAbsoluteDeviationAggregation) Field(field string) *MedianAbsoluteDeviationAggregation {
a.field = field
return a
}
func (a *MedianAbsoluteDeviationAggregation) Compression(compression float64) *MedianAbsoluteDeviationAggregation {
a.compression = &compression
return a
}
func (a *MedianAbsoluteDeviationAggregation) Script(script *Script) *MedianAbsoluteDeviationAggregation {
a.script = script
return a
}
func (a *MedianAbsoluteDeviationAggregation) Format(format string) *MedianAbsoluteDeviationAggregation {
a.format = format
return a
}
func (a *MedianAbsoluteDeviationAggregation) Missing(missing interface{}) *MedianAbsoluteDeviationAggregation {
a.missing = missing
return a
}
func (a *MedianAbsoluteDeviationAggregation) SubAggregation(name string, subAggregation Aggregation) *MedianAbsoluteDeviationAggregation {
a.subAggregations[name] = subAggregation
return a
}
// Meta sets the meta data to be included in the aggregation response.
func (a *MedianAbsoluteDeviationAggregation) Meta(metaData map[string]interface{}) *MedianAbsoluteDeviationAggregation {
a.meta = metaData
return a
}
func (a *MedianAbsoluteDeviationAggregation) Source() (interface{}, error) {
// Example:
// {
// "aggs" : {
// "review_variability" : { "median_absolute_deviation" : { "field" : "rating" } }
// }
// }
// This method returns only the { "median_absolute_deviation" : { "field" : "rating" } } part.
source := make(map[string]interface{})
opts := make(map[string]interface{})
source["median_absolute_deviation"] = opts
// ValuesSourceAggregationBuilder
if a.field != "" {
opts["field"] = a.field
}
if v := a.compression; v != nil {
opts["compression"] = *v
}
if a.script != nil {
src, err := a.script.Source()
if err != nil {
return nil, err
}
opts["script"] = src
}
if a.format != "" {
opts["format"] = a.format
}
if a.missing != nil {
opts["missing"] = a.missing
}
// AggregationBuilder (SubAggregations)
if len(a.subAggregations) > 0 {
aggsMap := make(map[string]interface{})
source["aggregations"] = aggsMap
for name, aggregate := range a.subAggregations {
src, err := aggregate.Source()
if err != nil {
return nil, err
}
aggsMap[name] = src
}
}
// Add Meta data if available
if len(a.meta) > 0 {
source["meta"] = a.meta
}
return source, nil
}

View file

@ -8,7 +8,7 @@ package elastic
// Note, this query uses the _uid field.
//
// For more details, see
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-ids-query.html
// https://www.elastic.co/guide/en/elasticsearch/reference/7.6/query-dsl-ids-query.html
type IdsQuery struct {
types []string
values []string
@ -18,7 +18,8 @@ type IdsQuery struct {
// NewIdsQuery creates and initializes a new ids query.
//
// Deprecated: Types are in the process of being removed, prefer to filter on a field instead.
// Notice that types are in the process of being removed.
// You should filter on a field instead.
func NewIdsQuery(types ...string) *IdsQuery {
return &IdsQuery{
types: types,

View file

@ -0,0 +1,52 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// IntervalQueryRule represents the generic matching interval rule interface.
// Interval Rule is actually just a Query, but may be used only inside
// IntervalQuery. An extra method is added just to shield its
// implementations (*Rule objects) from other query objects.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html
// for details.
type IntervalQueryRule interface {
Query
// isIntervalQueryRule is never actually called, and is used just for Rule to
// differ from standard Query.
isIntervalQueryRule() bool
}
// IntervalQuery returns documents based on the order and proximity of matching terms.
//
// For more details, see
// https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html
type IntervalQuery struct {
field string
rule IntervalQueryRule
}
// NewIntervalQuery creates and initializes a new IntervalQuery.
func NewIntervalQuery(field string, rule IntervalQueryRule) *IntervalQuery {
return &IntervalQuery{field: field, rule: rule}
}
// Source returns JSON for the function score query.
func (q *IntervalQuery) Source() (interface{}, error) {
// {
// "intervals" : { ... }
// }
source := make(map[string]interface{})
params := make(map[string]interface{})
source["intervals"] = params
src, err := q.rule.Source()
if err != nil {
return nil, err
}
params[q.field] = src
return source, nil
}

View file

@ -0,0 +1,175 @@
package elastic
var (
_ IntervalQueryRule = (*IntervalQueryFilter)(nil)
)
// IntervalQueryFilter specifies filters used in some
// IntervalQueryRule implementations, e.g. IntervalQueryRuleAllOf.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#interval_filter
// for details.
type IntervalQueryFilter struct {
after IntervalQueryRule
before IntervalQueryRule
containedBy IntervalQueryRule
containing IntervalQueryRule
overlapping IntervalQueryRule
notContainedBy IntervalQueryRule
notContaining IntervalQueryRule
notOverlapping IntervalQueryRule
script *Script
}
// NewIntervalQueryFilter initializes and creates a new
// IntervalQueryFilter.
func NewIntervalQueryFilter() *IntervalQueryFilter {
return &IntervalQueryFilter{}
}
// After specifies the query to be used to return intervals that follow
// an interval from the filter rule.
func (r *IntervalQueryFilter) After(after IntervalQueryRule) *IntervalQueryFilter {
r.after = after
return r
}
// Before specifies the query to be used to return intervals that occur
// before an interval from the filter rule.
func (r *IntervalQueryFilter) Before(before IntervalQueryRule) *IntervalQueryFilter {
r.before = before
return r
}
// ContainedBy specifies the query to be used to return intervals contained
// by an interval from the filter rule.
func (r *IntervalQueryFilter) ContainedBy(containedBy IntervalQueryRule) *IntervalQueryFilter {
r.containedBy = containedBy
return r
}
// Containing specifies the query to be used to return intervals that contain an
// interval from the filter rule.
func (r *IntervalQueryFilter) Containing(containing IntervalQueryRule) *IntervalQueryFilter {
r.containing = containing
return r
}
// Overlapping specifies the query to be used to return intervals that overlap
// with an interval from the filter rule.
func (r *IntervalQueryFilter) Overlapping(overlapping IntervalQueryRule) *IntervalQueryFilter {
r.overlapping = overlapping
return r
}
// NotContainedBy specifies the query to be used to return intervals that are NOT
// contained by an interval from the filter rule.
func (r *IntervalQueryFilter) NotContainedBy(notContainedBy IntervalQueryRule) *IntervalQueryFilter {
r.notContainedBy = notContainedBy
return r
}
// NotContaining specifies the query to be used to return intervals that do NOT
// contain an interval from the filter rule.
func (r *IntervalQueryFilter) NotContaining(notContaining IntervalQueryRule) *IntervalQueryFilter {
r.notContaining = notContaining
return r
}
// NotOverlapping specifies the query to be used to return intervals that do NOT
// overlap with an interval from the filter rule.
func (r *IntervalQueryFilter) NotOverlapping(notOverlapping IntervalQueryRule) *IntervalQueryFilter {
r.notOverlapping = notOverlapping
return r
}
// Script allows a script to be used to return matching documents. The script
// must return a boolean value, true or false.
func (r *IntervalQueryFilter) Script(script *Script) *IntervalQueryFilter {
r.script = script
return r
}
// Source returns JSON for the function score query.
func (r *IntervalQueryFilter) Source() (interface{}, error) {
source := make(map[string]interface{})
if r.before != nil {
src, err := r.before.Source()
if err != nil {
return nil, err
}
source["before"] = src
}
if r.after != nil {
src, err := r.after.Source()
if err != nil {
return nil, err
}
source["after"] = src
}
if r.containedBy != nil {
src, err := r.containedBy.Source()
if err != nil {
return nil, err
}
source["contained_by"] = src
}
if r.containing != nil {
src, err := r.containing.Source()
if err != nil {
return nil, err
}
source["containing"] = src
}
if r.overlapping != nil {
src, err := r.overlapping.Source()
if err != nil {
return nil, err
}
source["overlapping"] = src
}
if r.notContainedBy != nil {
src, err := r.notContainedBy.Source()
if err != nil {
return nil, err
}
source["not_contained_by"] = src
}
if r.notContaining != nil {
src, err := r.notContaining.Source()
if err != nil {
return nil, err
}
source["not_containing"] = src
}
if r.notOverlapping != nil {
src, err := r.notOverlapping.Source()
if err != nil {
return nil, err
}
source["not_overlapping"] = src
}
if r.script != nil {
src, err := r.script.Source()
if err != nil {
return nil, err
}
source["script"] = src
}
return source, nil
}
// isIntervalQueryRule implements the marker interface.
func (r *IntervalQueryFilter) isIntervalQueryRule() bool {
return true
}

View file

@ -0,0 +1,82 @@
package elastic
var (
_ IntervalQueryRule = (*IntervalQueryRuleAllOf)(nil)
)
// IntervalQueryRuleAllOf is an implementation of IntervalQueryRule.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-all_of
// for details.
type IntervalQueryRuleAllOf struct {
intervals []IntervalQueryRule
maxGaps *int
ordered *bool
filter *IntervalQueryFilter
}
// NewIntervalQueryRuleAllOf initializes and returns a new instance
// of IntervalQueryRuleAllOf.
func NewIntervalQueryRuleAllOf(intervals ...IntervalQueryRule) *IntervalQueryRuleAllOf {
return &IntervalQueryRuleAllOf{intervals: intervals}
}
// MaxGaps specifies the maximum number of positions between the matching
// terms. Terms further apart than this are considered matches. Defaults to -1.
func (r *IntervalQueryRuleAllOf) MaxGaps(maxGaps int) *IntervalQueryRuleAllOf {
r.maxGaps = &maxGaps
return r
}
// Ordered, if true, indicates that matching terms must appear in their specified
// order. Defaults to false.
func (r *IntervalQueryRuleAllOf) Ordered(ordered bool) *IntervalQueryRuleAllOf {
r.ordered = &ordered
return r
}
// Filter adds an additional interval filter.
func (r *IntervalQueryRuleAllOf) Filter(filter *IntervalQueryFilter) *IntervalQueryRuleAllOf {
r.filter = filter
return r
}
// Source returns JSON for the function score query.
func (r *IntervalQueryRuleAllOf) Source() (interface{}, error) {
source := make(map[string]interface{})
intervalSources := make([]interface{}, 0)
for _, interval := range r.intervals {
src, err := interval.Source()
if err != nil {
return nil, err
}
intervalSources = append(intervalSources, src)
}
source["intervals"] = intervalSources
if r.ordered != nil {
source["ordered"] = *r.ordered
}
if r.maxGaps != nil {
source["max_gaps"] = *r.maxGaps
}
if r.filter != nil {
src, err := r.filter.Source()
if err != nil {
return nil, err
}
source["filter"] = src
}
return map[string]interface{}{
"all_of": source,
}, nil
}
// isIntervalQueryRule implements the marker interface.
func (r *IntervalQueryRuleAllOf) isIntervalQueryRule() bool {
return true
}

View file

@ -0,0 +1,60 @@
package elastic
var (
_ IntervalQueryRule = (*IntervalQueryRuleAnyOf)(nil)
)
// IntervalQueryRuleAnyOf is an implementation of IntervalQueryRule.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-any_of
// for details.
type IntervalQueryRuleAnyOf struct {
intervals []IntervalQueryRule
filter *IntervalQueryFilter
}
// NewIntervalQueryRuleAnyOf initializes and returns a new instance
// of IntervalQueryRuleAnyOf.
func NewIntervalQueryRuleAnyOf(intervals ...IntervalQueryRule) *IntervalQueryRuleAnyOf {
return &IntervalQueryRuleAnyOf{intervals: intervals}
}
// Filter adds an additional interval filter.
func (r *IntervalQueryRuleAnyOf) Filter(filter *IntervalQueryFilter) *IntervalQueryRuleAnyOf {
r.filter = filter
return r
}
// Source returns JSON for the function score query.
func (r *IntervalQueryRuleAnyOf) Source() (interface{}, error) {
source := make(map[string]interface{})
var intervalSources []interface{}
for _, interval := range r.intervals {
src, err := interval.Source()
if err != nil {
return nil, err
}
intervalSources = append(intervalSources, src)
}
source["intervals"] = intervalSources
if r.filter != nil {
src, err := r.filter.Source()
if err != nil {
return nil, err
}
source["filter"] = src
}
return map[string]interface{}{
"any_of": source,
}, nil
}
// isIntervalQueryRule implements the marker interface.
func (r *IntervalQueryRuleAnyOf) isIntervalQueryRule() bool {
return true
}

View file

@ -0,0 +1,94 @@
package elastic
var (
_ IntervalQueryRule = (*IntervalQueryRuleMatch)(nil)
)
// IntervalQueryRuleMatch is an implementation of IntervalQueryRule.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-match
// for details.
type IntervalQueryRuleMatch struct {
query string
maxGaps *int
ordered *bool
analyzer string
useField string
filter *IntervalQueryFilter
}
// NewIntervalQueryRuleMatch initializes and returns a new instance
// of IntervalQueryRuleMatch.
func NewIntervalQueryRuleMatch(query string) *IntervalQueryRuleMatch {
return &IntervalQueryRuleMatch{query: query}
}
// MaxGaps specifies the maximum number of positions between the matching
// terms. Terms further apart than this are considered matches. Defaults to -1.
func (r *IntervalQueryRuleMatch) MaxGaps(maxGaps int) *IntervalQueryRuleMatch {
r.maxGaps = &maxGaps
return r
}
// Ordered, if true, indicates that matching terms must appear in their specified
// order. Defaults to false.
func (r *IntervalQueryRuleMatch) Ordered(ordered bool) *IntervalQueryRuleMatch {
r.ordered = &ordered
return r
}
// Analyzer specifies the analyzer used to analyze terms in the query.
func (r *IntervalQueryRuleMatch) Analyzer(analyzer string) *IntervalQueryRuleMatch {
r.analyzer = analyzer
return r
}
// UseField, if specified, matches the intervals from this field rather than
// the top-level field.
func (r *IntervalQueryRuleMatch) UseField(useField string) *IntervalQueryRuleMatch {
r.useField = useField
return r
}
// Filter adds an additional interval filter.
func (r *IntervalQueryRuleMatch) Filter(filter *IntervalQueryFilter) *IntervalQueryRuleMatch {
r.filter = filter
return r
}
// Source returns JSON for the function score query.
func (r *IntervalQueryRuleMatch) Source() (interface{}, error) {
source := make(map[string]interface{})
source["query"] = r.query
if r.ordered != nil {
source["ordered"] = *r.ordered
}
if r.maxGaps != nil {
source["max_gaps"] = *r.maxGaps
}
if r.analyzer != "" {
source["analyzer"] = r.analyzer
}
if r.useField != "" {
source["use_field"] = r.useField
}
if r.filter != nil {
filterRuleSource, err := r.filter.Source()
if err != nil {
return nil, err
}
source["filter"] = filterRuleSource
}
return map[string]interface{}{
"match": source,
}, nil
}
// isIntervalQueryRule implements the marker interface.
func (r *IntervalQueryRuleMatch) isIntervalQueryRule() bool {
return true
}

View file

@ -0,0 +1,57 @@
package elastic
var (
_ IntervalQueryRule = (*IntervalQueryRulePrefix)(nil)
)
// IntervalQueryRulePrefix is an implementation of IntervalQueryRule.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-prefix
// for details.
type IntervalQueryRulePrefix struct {
prefix string
analyzer string
useField string
}
// NewIntervalQueryRulePrefix initializes and returns a new instance
// of IntervalQueryRulePrefix.
func NewIntervalQueryRulePrefix(prefix string) *IntervalQueryRulePrefix {
return &IntervalQueryRulePrefix{prefix: prefix}
}
// Analyzer specifies the analyzer used to analyze terms in the query.
func (r *IntervalQueryRulePrefix) Analyzer(analyzer string) *IntervalQueryRulePrefix {
r.analyzer = analyzer
return r
}
// UseField, if specified, matches the intervals from this field rather than
// the top-level field.
func (r *IntervalQueryRulePrefix) UseField(useField string) *IntervalQueryRulePrefix {
r.useField = useField
return r
}
// Source returns JSON for the function score query.
func (r *IntervalQueryRulePrefix) Source() (interface{}, error) {
source := make(map[string]interface{})
source["query"] = r.prefix
if r.analyzer != "" {
source["analyzer"] = r.analyzer
}
if r.useField != "" {
source["use_field"] = r.useField
}
return map[string]interface{}{
"prefix": source,
}, nil
}
// isIntervalQueryRule implements the marker interface.
func (r *IntervalQueryRulePrefix) isIntervalQueryRule() bool {
return true
}

View file

@ -0,0 +1,57 @@
package elastic
var (
_ IntervalQueryRule = (*IntervalQueryRuleWildcard)(nil)
)
// IntervalQueryRuleWildcard is an implementation of IntervalQueryRule.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-wildcard
// for details.
type IntervalQueryRuleWildcard struct {
pattern string
analyzer string
useField string
}
// NewIntervalQueryRuleWildcard initializes and returns a new instance
// of IntervalQueryRuleWildcard.
func NewIntervalQueryRuleWildcard(pattern string) *IntervalQueryRuleWildcard {
return &IntervalQueryRuleWildcard{pattern: pattern}
}
// Analyzer specifies the analyzer used to analyze terms in the query.
func (r *IntervalQueryRuleWildcard) Analyzer(analyzer string) *IntervalQueryRuleWildcard {
r.analyzer = analyzer
return r
}
// UseField, if specified, matches the intervals from this field rather than
// the top-level field.
func (r *IntervalQueryRuleWildcard) UseField(useField string) *IntervalQueryRuleWildcard {
r.useField = useField
return r
}
// Source returns JSON for the function score query.
func (r *IntervalQueryRuleWildcard) Source() (interface{}, error) {
source := make(map[string]interface{})
source["pattern"] = r.pattern
if r.analyzer != "" {
source["analyzer"] = r.analyzer
}
if r.useField != "" {
source["use_field"] = r.useField
}
return map[string]interface{}{
"wildcard": source,
}, nil
}
// isIntervalQueryRule implements the marker interface.
func (r *IntervalQueryRuleWildcard) isIntervalQueryRule() bool {
return true
}

View file

@ -394,7 +394,7 @@ func (item *MoreLikeThisQueryItem) Source() (interface{}, error) {
source["fields"] = item.fields
}
if item.routing != "" {
source["_routing"] = item.routing
source["routing"] = item.routing
}
if item.fsc != nil {
src, err := item.fsc.Source()

View file

@ -39,7 +39,6 @@ type MultiMatchQuery struct {
func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery {
q := &MultiMatchQuery{
text: text,
fields: make([]string, 0),
fieldBoosts: make(map[string]*float64),
}
q.fields = append(q.fields, fields...)
@ -62,28 +61,19 @@ func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatc
// Type can be "best_fields", "boolean", "most_fields", "cross_fields",
// "phrase", "phrase_prefix" or "bool_prefix"
func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery {
var zero = float64(0.0)
var one = float64(1.0)
switch strings.ToLower(typ) {
default: // best_fields / boolean
q.typ = "best_fields"
q.tieBreaker = &zero
case "most_fields":
q.typ = "most_fields"
q.tieBreaker = &one
case "cross_fields":
q.typ = "cross_fields"
q.tieBreaker = &zero
case "phrase":
q.typ = "phrase"
q.tieBreaker = &zero
case "phrase_prefix":
q.typ = "phrase_prefix"
q.tieBreaker = &zero
case "bool_prefix":
q.typ = "bool_prefix"
q.tieBreaker = &zero
}
return q
}
@ -209,19 +199,21 @@ func (q *MultiMatchQuery) Source() (interface{}, error) {
multiMatch["query"] = q.text
if len(q.fields) > 0 {
var fields []string
for _, field := range q.fields {
if boost, found := q.fieldBoosts[field]; found {
if boost != nil {
fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
} else {
fields = append(fields, field)
}
var fields []string
for _, field := range q.fields {
if boost, found := q.fieldBoosts[field]; found {
if boost != nil {
fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
} else {
fields = append(fields, field)
}
} else {
fields = append(fields, field)
}
}
if fields == nil {
multiMatch["fields"] = []string{}
} else {
multiMatch["fields"] = fields
}

View file

@ -0,0 +1,61 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// PinnedQuery is a query that promotes selected documents to rank higher than those matching a given query.
//
// For more details, see:
// https://www.elastic.co/guide/en/elasticsearch/reference/7.8/query-dsl-pinned-query.html
type PinnedQuery struct {
ids []string
organic Query
}
// NewPinnedQuery creates and initializes a new pinned query.
func NewPinnedQuery() *PinnedQuery {
return &PinnedQuery{}
}
// Ids sets an array of document IDs listed in the order they are to appear in results.
func (q *PinnedQuery) Ids(ids ...string) *PinnedQuery {
q.ids = ids
return q
}
// Organic sets a choice of query used to rank documents which will be ranked below the "pinned" document ids.
func (q *PinnedQuery) Organic(query Query) *PinnedQuery {
q.organic = query
return q
}
// Source returns the JSON serializable content for this query.
func (q *PinnedQuery) Source() (interface{}, error) {
// {
// "pinned": {
// "ids": [ "1", "4", "100" ],
// "organic": {
// "match": {
// "description": "iphone"
// }
// }
// }
// }
query := make(map[string]interface{})
params := make(map[string]interface{})
query["pinned"] = params
if len(q.ids) > 0 {
params["ids"] = q.ids
}
if q.organic != nil {
src, err := q.organic.Source()
if err != nil {
return nil, err
}
params["organic"] = src
}
return query, nil
}

View file

@ -0,0 +1,74 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// SpanFirstQuery spans near the beginning of a field.
// The span first query maps to Lucene SpanFirstQuery
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.7/query-dsl-span-first-query.html
// for details.
type SpanFirstQuery struct {
match Query
end int
boost *float64
queryName string
}
// NewSpanFirstQuery creates a new SpanFirstQuery.
func NewSpanFirstQuery(query Query, end int) *SpanFirstQuery {
return &SpanFirstQuery{
match: query,
end: end,
}
}
// Match sets the query, e.g. a SpanTermQuery.
func (q *SpanFirstQuery) Match(query Query) *SpanFirstQuery {
q.match = query
return q
}
// End specifies the maximum end position of the match, which needs to be positive.
func (q *SpanFirstQuery) End(end int) *SpanFirstQuery {
q.end = end
return q
}
// Boost sets the boost for this query.
func (q *SpanFirstQuery) Boost(boost float64) *SpanFirstQuery {
q.boost = &boost
return q
}
// QueryName sets the query name for the filter that can be used when
// searching for matched_filters per hit.
func (q *SpanFirstQuery) QueryName(queryName string) *SpanFirstQuery {
q.queryName = queryName
return q
}
// Source returns the JSON body.
func (q *SpanFirstQuery) Source() (interface{}, error) {
m := make(map[string]interface{})
c := make(map[string]interface{})
if v := q.match; v != nil {
src, err := q.match.Source()
if err != nil {
return nil, err
}
c["match"] = src
}
c["end"] = q.end
if v := q.boost; v != nil {
c["boost"] = *v
}
if v := q.queryName; v != "" {
c["query_name"] = v
}
m["span_first"] = c
return m, nil
}

View file

@ -0,0 +1,98 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// SpanNearQuery matches spans which are near one another. One can specify slop,
// the maximum number of intervening unmatched positions, as well as whether
// matches are required to be in-order. The span near query maps to Lucene SpanNearQuery.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.7/query-dsl-span-near-query.html
// for details.
type SpanNearQuery struct {
clauses []Query
slop *int
inOrder *bool
boost *float64
queryName string
}
// NewSpanNearQuery creates a new SpanNearQuery.
func NewSpanNearQuery(clauses ...Query) *SpanNearQuery {
return &SpanNearQuery{
clauses: clauses,
}
}
// Add clauses to use in the query.
func (q *SpanNearQuery) Add(clauses ...Query) *SpanNearQuery {
q.clauses = append(q.clauses, clauses...)
return q
}
// Clauses to use in the query.
func (q *SpanNearQuery) Clauses(clauses ...Query) *SpanNearQuery {
q.clauses = clauses
return q
}
// Slop controls the maximum number of intervening unmatched positions permitted.
func (q *SpanNearQuery) Slop(slop int) *SpanNearQuery {
q.slop = &slop
return q
}
// InOrder, when true, the spans from each clause must be in the same order as
// in Clauses and must be non-overlapping. Defaults to true.
func (q *SpanNearQuery) InOrder(inOrder bool) *SpanNearQuery {
q.inOrder = &inOrder
return q
}
// Boost sets the boost for this query.
func (q *SpanNearQuery) Boost(boost float64) *SpanNearQuery {
q.boost = &boost
return q
}
// QueryName sets the query name for the filter that can be used when
// searching for matched_filters per hit.
func (q *SpanNearQuery) QueryName(queryName string) *SpanNearQuery {
q.queryName = queryName
return q
}
// Source returns the JSON body.
func (q *SpanNearQuery) Source() (interface{}, error) {
m := make(map[string]interface{})
c := make(map[string]interface{})
if len(q.clauses) > 0 {
var clauses []interface{}
for _, clause := range q.clauses {
src, err := clause.Source()
if err != nil {
return nil, err
}
clauses = append(clauses, src)
}
c["clauses"] = clauses
}
if v := q.slop; v != nil {
c["slop"] = *v
}
if v := q.inOrder; v != nil {
c["in_order"] = *v
}
if v := q.boost; v != nil {
c["boost"] = *v
}
if v := q.queryName; v != "" {
c["query_name"] = v
}
m["span_near"] = c
return m, nil
}

View file

@ -0,0 +1,70 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
// SpanTermQuery matches spans containing a term. The span term query maps to Lucene SpanTermQuery.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.7/query-dsl-span-term-query.html
// for details.
type SpanTermQuery struct {
field string
value interface{}
boost *float64
queryName string
}
// NewSpanTermQuery creates a new SpanTermQuery. When passing values, the first one
// is used to initialize the value.
func NewSpanTermQuery(field string, value ...interface{}) *SpanTermQuery {
q := &SpanTermQuery{
field: field,
}
if len(value) > 0 {
q.value = value[0]
}
return q
}
// Field name to match the term against.
func (q *SpanTermQuery) Field(field string) *SpanTermQuery {
q.field = field
return q
}
// Value of the term.
func (q *SpanTermQuery) Value(value interface{}) *SpanTermQuery {
q.value = value
return q
}
// Boost sets the boost for this query.
func (q *SpanTermQuery) Boost(boost float64) *SpanTermQuery {
q.boost = &boost
return q
}
// QueryName sets the query name for the filter that can be used when
// searching for matched_filters per hit.
func (q *SpanTermQuery) QueryName(queryName string) *SpanTermQuery {
q.queryName = queryName
return q
}
// Source returns the JSON body.
func (q *SpanTermQuery) Source() (interface{}, error) {
m := make(map[string]interface{})
c := make(map[string]interface{})
i := make(map[string]interface{})
i["value"] = q.value
if v := q.boost; v != nil {
i["boost"] = *v
}
if v := q.queryName; v != "" {
i["query_name"] = v
}
c[q.field] = i
m["span_term"] = c
return m, nil
}

View file

@ -29,6 +29,19 @@ func NewTermsQuery(name string, values ...interface{}) *TermsQuery {
return q
}
// NewTermsQueryFromStrings creates and initializes a new TermsQuery
// from strings.
func NewTermsQueryFromStrings(name string, values ...string) *TermsQuery {
q := &TermsQuery{
name: name,
values: make([]interface{}, 0),
}
for _, v := range values {
q.values = append(q.values, v)
}
return q
}
// TermsLookup adds terms lookup details to the query.
func (q *TermsQuery) TermsLookup(lookup *TermsLookup) *TermsQuery {
q.termsLookup = lookup

View file

@ -36,8 +36,8 @@ type SearchSource struct {
suggesters []Suggester // suggest
rescores []*Rescore // rescore
defaultRescoreWindowSize *int
indexBoosts map[string]float64 // indices_boost
stats []string // stats
indexBoosts IndexBoosts // indices_boost
stats []string // stats
innerHits map[string]*InnerHit
collapse *CollapseBuilder // collapse
profile bool // profile
@ -50,7 +50,6 @@ func NewSearchSource() *SearchSource {
from: -1,
size: -1,
aggregations: make(map[string]Aggregation),
indexBoosts: make(map[string]float64),
innerHits: make(map[string]*InnerHit),
}
}
@ -340,7 +339,13 @@ func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource
// IndexBoost sets the boost that a specific index will receive when the
// query is executed against it.
func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource {
s.indexBoosts[index] = boost
s.indexBoosts = append(s.indexBoosts, IndexBoost{Index: index, Boost: boost})
return s
}
// IndexBoosts sets the boosts for specific indices.
func (s *SearchSource) IndexBoosts(boosts ...IndexBoost) *SearchSource {
s.indexBoosts = append(s.indexBoosts, boosts...)
return s
}
@ -465,7 +470,11 @@ func (s *SearchSource) Source() (interface{}, error) {
source["slice"] = src
}
if len(s.indexBoosts) > 0 {
source["indices_boost"] = s.indexBoosts
src, err := s.indexBoosts.Source()
if err != nil {
return nil, err
}
source["indices_boost"] = src
}
if len(s.aggregations) > 0 {
aggsMap := make(map[string]interface{})
@ -590,3 +599,34 @@ func (s *SearchSource) Source() (interface{}, error) {
return source, nil
}
// -- IndexBoosts --
// IndexBoost specifies an index by some boost factor.
type IndexBoost struct {
Index string
Boost float64
}
// Source generates a JSON-serializable output for IndexBoost.
func (b IndexBoost) Source() (interface{}, error) {
return map[string]interface{}{
b.Index: b.Boost,
}, nil
}
// IndexBoosts is a slice of IndexBoost entities.
type IndexBoosts []IndexBoost
// Source generates a JSON-serializable output for IndexBoosts.
func (b IndexBoosts) Source() (interface{}, error) {
var boosts []interface{}
for _, ib := range b {
src, err := ib.Source()
if err != nil {
return nil, err
}
boosts = append(boosts, src)
}
return boosts, nil
}

View file

@ -1,3 +1,7 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (

258
vendor/github.com/olivere/elastic/v7/snapshot_status.go generated vendored Normal file
View file

@ -0,0 +1,258 @@
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/olivere/elastic/v7/uritemplates"
)
// SnapshotStatusService returns information about the status of a snapshot.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/modules-snapshots.html
// for details.
type SnapshotStatusService struct {
client *Client
pretty *bool // pretty format the returned JSON response
human *bool // return human readable values for statistics
errorTrace *bool // include the stack trace of returned errors
filterPath []string // list of filters used to reduce the response
headers http.Header // custom request-level HTTP headers
repository string
snapshot []string
masterTimeout string
ignoreUnavailable *bool
}
// NewSnapshotStatusService creates a new SnapshotStatusService.
func NewSnapshotStatusService(client *Client) *SnapshotStatusService {
return &SnapshotStatusService{
client: client,
}
}
// Pretty tells Elasticsearch whether to return a formatted JSON response.
func (s *SnapshotStatusService) Pretty(pretty bool) *SnapshotStatusService {
s.pretty = &pretty
return s
}
// Human specifies whether human readable values should be returned in
// the JSON response, e.g. "7.5mb".
func (s *SnapshotStatusService) Human(human bool) *SnapshotStatusService {
s.human = &human
return s
}
// ErrorTrace specifies whether to include the stack trace of returned errors.
func (s *SnapshotStatusService) ErrorTrace(errorTrace bool) *SnapshotStatusService {
s.errorTrace = &errorTrace
return s
}
// FilterPath specifies a list of filters used to reduce the response.
func (s *SnapshotStatusService) FilterPath(filterPath ...string) *SnapshotStatusService {
s.filterPath = filterPath
return s
}
// Header adds a header to the request.
func (s *SnapshotStatusService) Header(name string, value string) *SnapshotStatusService {
if s.headers == nil {
s.headers = http.Header{}
}
s.headers.Add(name, value)
return s
}
// Headers specifies the headers of the request.
func (s *SnapshotStatusService) Headers(headers http.Header) *SnapshotStatusService {
s.headers = headers
return s
}
// Repository is the repository name.
func (s *SnapshotStatusService) Repository(repository string) *SnapshotStatusService {
s.repository = repository
return s
}
// Snapshot is the list of snapshot names. If not set, defaults to all snapshots.
func (s *SnapshotStatusService) Snapshot(snapshots ...string) *SnapshotStatusService {
s.snapshot = append(s.snapshot, snapshots...)
return s
}
// MasterTimeout specifies an explicit operation timeout for connection to master node.
func (s *SnapshotStatusService) MasterTimeout(masterTimeout string) *SnapshotStatusService {
s.masterTimeout = masterTimeout
return s
}
// buildURL builds the URL for the operation.
func (s *SnapshotStatusService) buildURL() (string, url.Values, error) {
var err error
var path string
if s.repository != "" {
if len(s.snapshot) > 0 {
path, err = uritemplates.Expand("/_snapshot/{repository}/{snapshot}/_status", map[string]string{
"repository": s.repository,
"snapshot": strings.Join(s.snapshot, ","),
})
} else {
path, err = uritemplates.Expand("/_snapshot/{repository}/_status", map[string]string{
"repository": s.repository,
})
}
} else {
path, err = uritemplates.Expand("/_snapshot/_status", nil)
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if v := s.pretty; v != nil {
params.Set("pretty", fmt.Sprint(*v))
}
if v := s.human; v != nil {
params.Set("human", fmt.Sprint(*v))
}
if v := s.errorTrace; v != nil {
params.Set("error_trace", fmt.Sprint(*v))
}
if len(s.filterPath) > 0 {
params.Set("filter_path", strings.Join(s.filterPath, ","))
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
if v := s.ignoreUnavailable; v != nil {
params.Set("ignore_unavailable", fmt.Sprint(*v))
}
return path, params, nil
}
// Validate checks if the operation is valid.
//
// Validation only fails if snapshot names were provided but no repository was
// provided.
func (s *SnapshotStatusService) Validate() error {
if len(s.snapshot) > 0 && s.repository == "" {
return fmt.Errorf("snapshots were specified but repository is missing")
}
return nil
}
// Do executes the operation.
func (s *SnapshotStatusService) Do(ctx context.Context) (*SnapshotStatusResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
Method: "GET",
Path: path,
Params: params,
Headers: s.headers,
})
if err != nil {
return nil, err
}
// Return operation response
ret := new(SnapshotStatusResponse)
if err := json.Unmarshal(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
type SnapshotStatusResponse struct {
Snapshots []SnapshotStatus `json:"snapshots"`
}
type SnapshotStatus struct {
Snapshot string `json:"snapshot"`
Repository string `json:"repository"`
UUID string `json:"uuid"`
State string `json:"state"`
IncludeGlobalState bool `json:"include_global_state"`
ShardsStats SnapshotShardsStats `json:"shards_stats"`
Stats SnapshotStats `json:"stats"`
Indices map[string]SnapshotIndexStatus `json:"indices"`
}
type SnapshotShardsStats struct {
Initializing int `json:"initializing"`
Started int `json:"started"`
Finalizing int `json:"finalizing"`
Done int `json:"done"`
Failed int `json:"failed"`
Total int `json:"total"`
}
type SnapshotStats struct {
Incremental struct {
FileCount int `json:"file_count"`
Size string `json:"size"`
SizeInBytes int64 `json:"size_in_bytes"`
} `json:"incremental"`
Processed struct {
FileCount int `json:"file_count"`
Size string `json:"size"`
SizeInBytes int64 `json:"size_in_bytes"`
} `json:"processed"`
Total struct {
FileCount int `json:"file_count"`
Size string `json:"size"`
SizeInBytes int64 `json:"size_in_bytes"`
} `json:"total"`
StartTime string `json:"start_time"`
StartTimeInMillis int64 `json:"start_time_in_millis"`
Time string `json:"time"`
TimeInMillis int64 `json:"time_in_millis"`
NumberOfFiles int `json:"number_of_files"`
ProcessedFiles int `json:"processed_files"`
TotalSize string `json:"total_size"`
TotalSizeInBytes int64 `json:"total_size_in_bytes"`
}
type SnapshotIndexStatus struct {
ShardsStats SnapshotShardsStats `json:"shards_stats"`
Stats SnapshotStats `json:"stats"`
Shards map[string]SnapshotIndexShardStatus `json:"shards"`
}
type SnapshotIndexShardStatus struct {
Stage string `json:"stage"` // initializing, started, finalize, done, or failed
Stats SnapshotStats `json:"stats"`
Node string `json:"node"`
Reason string `json:"reason"` // reason for failure
}

View file

@ -217,7 +217,7 @@ func (q *TermSuggester) Source(includeName bool) (interface{}, error) {
suggester["prefix_length"] = *q.prefixLength
}
if q.minWordLength != nil {
suggester["min_word_len"] = *q.minWordLength
suggester["min_word_length"] = *q.minWordLength
}
if q.minDocFreq != nil {
suggester["min_doc_freq"] = *q.minDocFreq

View file

@ -162,7 +162,8 @@ func (s *TasksGetTaskService) Do(ctx context.Context) (*TasksGetTaskResponse, er
}
type TasksGetTaskResponse struct {
Header http.Header `json:"-"`
Completed bool `json:"completed"`
Task *TaskInfo `json:"task,omitempty"`
Header http.Header `json:"-"`
Completed bool `json:"completed"`
Task *TaskInfo `json:"task,omitempty"`
Error *ErrorDetails `json:"error,omitempty"`
}

View file

@ -203,7 +203,7 @@ func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService {
return s
}
// ProceedOnVersionConflict aborts the request on version conflicts.
// ProceedOnVersionConflict won't abort the request on version conflicts.
// It is an alias to setting Conflicts("proceed").
func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService {
s.conflicts = "proceed"

View file

@ -12,7 +12,7 @@
// values := make(map[string]interface{})
// values["user"] = "jtacoma"
// values["repo"] = "uritemplates"
// expanded, _ := template.ExpandString(values)
// expanded, _ := template.Expand(values)
// fmt.Printf(expanded)
//
package uritemplates

View file

@ -181,6 +181,6 @@ type XPackSecurityApplicationPrivileges struct {
type XPackSecurityIndicesPermissions struct {
Names []string `json:"names"`
Privileges []string `json:"privileges"`
FieldSecurity interface{} `json:"field_security"`
FieldSecurity interface{} `json:"field_security,omitempty"`
Query string `json:"query"`
}

View file

@ -186,8 +186,8 @@ type XPackWatchActionStatus struct {
}
type XPackWatchActionAckStatus struct {
Timestamp time.Time `json:"timestamp"`
AckStatusState string `json:"ack_status_state"`
Timestamp time.Time `json:"timestamp"`
State string `json:"state"`
}
type XPackWatchActionExecutionState struct {
@ -202,13 +202,13 @@ type XPackWatchActionThrottle struct {
}
type XPackWatch struct {
Trigger map[string]map[string]interface{} `json:"trigger"`
Input map[string]map[string]interface{} `json:"input"`
Condition map[string]map[string]interface{} `json:"condition"`
Transform map[string]interface{} `json:"transform,omitempty"`
ThrottlePeriod string `json:"throttle_period,omitempty"`
ThrottlePeriodInMillis int64 `json:"throttle_period_in_millis,omitempty"`
Actions map[string]*XPackWatchActionStatus `json:"actions"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
Status *XPackWatchStatus `json:"status,omitempty"`
Trigger map[string]map[string]interface{} `json:"trigger"`
Input map[string]map[string]interface{} `json:"input"`
Condition map[string]map[string]interface{} `json:"condition"`
Transform map[string]interface{} `json:"transform,omitempty"`
ThrottlePeriod string `json:"throttle_period,omitempty"`
ThrottlePeriodInMillis int64 `json:"throttle_period_in_millis,omitempty"`
Actions map[string]map[string]interface{} `json:"actions"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
Status *XPackWatchStatus `json:"status,omitempty"`
}