query name sets from 20151216 to 20151217
{
"from": 0,
"size": 200,
"query": {
"bool": {
"must": {
"range": {
"DATE": {
"from": 20151216,
"to": 2015121617,
"include_lower": true,
"include_upper": true
}
}
}
}
},
"_source": {
"includes": [
"NAME"
],
"excludes": []
}
}
Another day
{
"from": 0,
"size": 200,
"query": {
"bool": {
"must": {
"range": {
"DATE": {
"from": 20151217,
"to": 2015121618,
"include_lower": true,
"include_upper": true
}
}
}
}
},
"_source": {
"includes": [
"NAME"
],
"excludes": []
}
}
If in MYSQL I will use the following SQL to solve my problem.
SELECT NAME FROM Table1 where DATE between 20151216 and 20151217 intersect SELECT NAME FROM Table1 where DATE between 20151217 and 20151218
How does elasticsearch to find the intersection of two search results like mysql?
Related
I am trying to set up an automated Kibana alert that takes in data from a defined extraction query. I get all the information I want, however, the response query returns values without rounding them up (up to 12 decimal points). Where in the extraction query and what do I specify to round this value up?
{
"size": 0,
"query": {
"bool": {
"filter": [
{
"match_all": {
"boost": 1
}
},
{
"range": {
"#timestamp": {
"from": "{{period_end}}||-24h",
"to": "{{period_end}}",
"include_lower": true,
"include_upper": true,
"format": "epoch_millis",
"boost": 1
}
}
}
],
"adjust_pure_negative": true,
"boost": 1
}
},
"_source": {
"includes": [],
"excludes": []
},
"stored_fields": "*",
"docvalue_fields": [
{
"field": "#timestamp",
"format": "date_time"
},
{
"field": "timestamp",
"format": "date_time"
}
],
"script_fields": {},
"aggregations": {
"2": {
"terms": {
"field": "tag.country.keyword",
"size": 20,
"min_doc_count": 1,
"shard_min_doc_count": 0,
"show_term_doc_count_error": false,
"order": [
{
"1": "desc"
},
{
"_key": "asc"
}
]
},
"aggregations": {
"1": {
"avg": {
"field": "my_field"
}
}
}
}
}
}
Here, I'm talking about the "avg" aggregation at the very bottom. As I understand, right below the "field" key, I should specify a "script" key, defining the rounding function that I want to use. Can anybody help me come up with the correct function?
I'm not sure what to specify in the "script" key to make the rounding function work.
I'm trying to implement facets with a date range aggregation in the current version of Amazon Elasticsearch Service (version 7.10). The key for what I want the article documents to group for, is publishedAt, what is a date. I want one bucket, where publishedAt is in the past, which means, it is published, one where it is in the future, which means scheduled and one for all articles without a publishedAt, which are drafts. published and scheduled are working as they should. For drafts I can't enter a filter or date range as they are null. So I want to make use of the "Missing Values" feature. This should treat the documents with publishedAt = null like to have the date given in the missing field. Unfortunately it has no effect on the results. Even if I change the date of missing to let it match with published or scheduled.
My request:
GET https://es.amazonaws.com/articles/_search
{
"size": 10,
"aggs": {
"facet_bucket_all": {
"aggs": {
"channel": {
"terms": {
"field": "channel.keyword",
"size": 5
}
},
"brand": {
"terms": {
"field": "brand.keyword",
"size": 5
}
},
"articleStatus": {
"date_range": {
"field": "publishedAt",
"format": "dd-MM-yyyy",
"missing": "01-07-1886",
"ranges": [
{ "key": "published", "from": "now-99y/M", "to": "now/M" },
{ "key": "scheduled", "from": "now+1s/M", "to": "now+99y/M" },
{ "key": "drafts", "from": "01-01-1886", "to": "31-12-1886" }
]
}
}
},
"filter": {
"bool": {
"must": []
}
}
},
"facet_bucket_publishedAt": {
"aggs": {},
"filter": {
"bool": {
"must": []
}
}
},
"facet_bucket_author": {
"aggs": {
"author": {
"terms": {
"field": "author",
"size": 10
}
}
},
"filter": {
"bool": {
"must": []
}
}
}
},
"query": {
"bool": {
"filter": [
{
"range": {
"publishedAt": {
"lte": "2021-08-09T09:52:19.975Z"
}
}
}
]
}
},
"from": 0,
"sort": [
{
"_score": "desc"
}
]
}
And in the result, the drafts are empty:
"articleStatus": {
"buckets": [
{
"key": "published",
"from": -1.496448E12,
"from_as_string": "01-08-1922",
"to": 1.627776E12,
"to_as_string": "01-08-2021",
"doc_count": 47920
},
{
"key": "scheduled",
"from": 1.627776E12,
"from_as_string": "01-08-2021",
"to": 4.7519136E12,
"to_as_string": "01-08-2120",
"doc_count": 3
},
{
"key": "drafts",
"from": 1.67252256E13,
"from_as_string": "01-01-1886",
"to": 1.67566752E13,
"to_as_string": "31-12-1886",
"doc_count": 0
}
]
}
SearchKit added this part to the query:
"query": {
"bool": {
"filter": [
{
"range": {
"publishedAt": {
"lte": "2021-08-09T09:52:19.975Z"
}
}
}
]
}
}
This had to be removed, because it filters out null values, before the missing filter makes its job.
Now I get the correct result:
"articleStatus": {
"buckets": [
{
"key": "drafts",
"from": -2.650752E12,
"from_as_string": "01-01-1886",
"to": -2.6193024E12,
"to_as_string": "31-12-1886",
"doc_count": 7
},
{
"key": "published",
"from": -1.496448E12,
"from_as_string": "01-08-1922",
"to": 1.627776E12,
"to_as_string": "01-08-2021",
"doc_count": 47920
},
{
"key": "scheduled",
"from": 1.627776E12,
"from_as_string": "01-08-2021",
"to": 4.7519136E12,
"to_as_string": "01-08-2120",
"doc_count": 3
}
]
}
Im having some problems querying with elasticsearch. So basically i wanted to know the sql equivalent of elasticsearch.
Whats the equivalent elasticsearch syntax for this sql query ?
SELECT *
FROM people
WHERE name LIKE( $name% )
AND ( gender = 0 OR age < 18 )
AND id IN( 1, 2, 3 )
AND id NOT IN( 4, 5, 6 )
AND dead = 0
ORDER BY status desc,
TIME desc
* is just for example
Using boolean query (and nested boolean query ) allows you to express same things than sql request.
Terms query is validated when one of its elements is found (OR).
POST /index_name/people/_search
{
"query": {
"bool": {
"must": [
{
"match": {
"name": "John"
}
}
],
"filter": [
{
"bool": {
"must": [
{
"terms": {
"id": [
1,
2,
3
]
}
},
{
"term": {
"dead": 0
}
},
{
"bool": {
"should": [
{
"term": {
"gender": 0
}
},
{
"range": {
"age": {
"lt": 18
}
}
}
]
}
}
],
"must_not": [
{
"terms": {
"id": [
4,
5,
6
]
}
}
]
}
}
]
}
},
"sort": [
{
"status": {
"order": "desc"
},
"time": {
"order": "desc"
}
}
]
}
I'm using ELK stack and I'm trying to find out how to visualize all logs except of those from specific IP ranges (for example 10.0.0.0/8). Is there any way how to negate filter query:
{"wildcard":{"src_address":"10.*"}}
I put it to Buckets -> Split Bars -> Aggregation -> Filters and I would like to negate this query so I got all logs except of those from 10.0.0.0/8
This is the whole JSON request:
{
"query": {
"filtered": {
"query": {
"query_string": {
"query": "low_level_category:\"user_authentication_failure\" AND NOT src_address:\"10.*\"",
"analyze_wildcard": true
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"#timestamp": {
"gte": 1474384885044,
"lte": 1474989685044,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
}
}
},
"size": 0,
"aggs": {
"2": {
"date_histogram": {
"field": "#timestamp",
"interval": "3h",
"time_zone": "Europe/Berlin",
"min_doc_count": 200,
"extended_bounds": {
"min": 1474384885043,
"max": 1474989685043
}
},
"aggs": {
"3": {
"terms": {
"field": "src_address.raw",
"size": 5,
"order": {
"_count": "desc"
}
}
}
}
}
}
}
Thanks
You can input this in the Kibana search box and it should get you what you need:
NOT src_address:10.*
I have two indexes index1 and index2 and both has two types type1 and type2 with same name in elastic search.(please assume that we have valid business reason behind it)
I would like to search index1 - type1 and index2 -type2
here is my query
POST _search
{
"query": {
"indices": {
"indices": ["index1","index2"],
"query": {
"filtered":{
"query":{
"multi_match": {
"query": "test",
"type": "cross_fields",
"fields": ["_all"]
}
},
"filter":{
"or":{
"filters":[
{
"terms":{
"_index":["index1"], // how can i make this work?
"_type": ["type1"]
}
},
{
"terms":{
"_index":["index2"], // how can i make this work?
"_type": ["type2"]
}
}
]
}
}
}
},
"no_match_query":"none"
}
}
}
You can use the indices, type in a bool filter to filter on type and index
The query would look something on these lines :
POST index1,index2/_search
{
"query": {
"filtered": {
"query": {
"multi_match": {
"query": "test",
"type": "cross_fields",
"fields": [
"_all"
]
}
},
"filter": {
"bool": {
"should": [
{
"indices": {
"index": "index1",
"filter": {
"type": {
"value": "type1"
}
},
"no_match_filter": "none"
}
},
{
"indices": {
"index": "index2",
"filter": {
"type": {
"value": "type2"
}
},
"no_match_filter": "none"
}
}
]
}
}
}
}
}
Passing the index names in the url example : index1,index2/_search is a good practice else you risk executing query across all indices in the cluster.