I am querying elastic search using status field and range but getting an error:
"type": "parsing_exception","reason": "[status] query malformed, no
start_object after query name"
Query looks as below:
{
"_source": {
"includes": []
},
"query": {
"bool": {
"must": [
{
"status": "IN_PROGRESS"
},
{
"range": {
"requestDate": {
"gte": "2018-10-01T08:00:00.000Z",
}
}
}
]
}
},
"sort": {
"requestDate": {
"order": "desc"
}
}
}
The error is that you haven't specified the query type - term or match - against status field. So if status is a text datatype, you should perform a match query:
{
"_source": {
"includes": []
},
"query": {
"bool": {
"must": [
{
"match":{ "status": "IN_PROGRESS"
}},
{
"range": {
"requestDate": {
"gte": "2018-10-01T08:00:00.000Z",
}
}
}
]
}
},
"sort": {
"requestDate": {
"order": "desc"
}
}
}
Related
I have the following query:
{
"aggs": {
"groupby": {
"terms": {
"field": "AMAZING LONG NAME THAT MAKES NO SENSE",
"missing": "",
"order": [
{
"_term": "asc"
}
],
"size": 10038
}
}
},
"query": {
"bool": {
"filter": [
{
"bool": {
"must": [
{
"term": {
"match": {
"AMAZING LONG NAME THAT MAKES NO SENSE": "Term1"
}
}
}
]
}
}
]
}
},
"size": 10
}
And it raises a parsing_exception
{
"error": {
"root_cause": [
{
"type": "parsing_exception",
"reason": "[term] query does not support [AMAZING LONG NAME THAT MAKES NO SENSE]",
"line": 1,
"col": 235
}
],
"type": "x_content_parse_exception",
"reason": "[1:235] [bool] failed to parse field [filter]",
"caused_by": {
"type": "x_content_parse_exception",
"reason": "[1:235] [bool] failed to parse field [must]",
"caused_by": {
"type": "parsing_exception",
"reason": "[term] query does not support [AMAZING LONG NAME THAT MAKES NO SENSE]",
"line": 1,
"col": 235
}
}
},
"status": 400
}
My question is should it be the field name that is to be entered in match?
The Term query syntax can be corrected as belwo :
POST demoindex/_search
{
"aggs": {
"groupby": {
"terms": {
"field": "AMAZING LONG NAME THAT MAKES NO SENSE",
"missing": "",
"order": [
{
"_term": "asc"
}
],
"size": 10038
}
}
},
"query": {
"bool": {
"filter": [
{
"bool": {
"must": [
{
"term": {
"AMAZING LONG NAME THAT MAKES NO SENSE": {
"value": "Term1"
}
}
}
]
}
}
]
}
},
"size": 10
}
Term query syntax is as belwo:
query -> term -> fieldname(to perform exact match on)--> value
Please, I have a request on Elastic search and I'm trying to add a list of acronyms (or synonyms) inside the request. But I can't figure out where to place it.
Let's say the synonymes list is {'HR': 'Human Ressources", "AWS": "Amazon Web Service"}
The request is the following:
{
"query": {
"bool": {
"filter": [
{
"terms": {
"observatory": [
"rome",
"meban",
"emass",
"cigref",
"opiiec",
"null"
]
}
},
{
"terms": {
"referentiel_id": [
"null",
42,
48,
52
]
}
}
],
"must": {
"match": {
"skill": {
"query": "*dactif*",
"fuzziness": "AUTO"
}
}
}
}
}
}
You can use synonym token filter to handle synonyms in your search query
Adding a working example with index data, mapping, search query and search result
Index Mapping:
{
"settings": {
"index": {
"analysis": {
"analyzer": {
"synonym": {
"tokenizer": "whitespace",
"filter": [
"synonym"
]
}
},
"filter": {
"synonym": {
"type": "synonym",
"synonyms": [
"HR, Human Ressources",
"AWS, Amazon Web Service"
]
}
}
}
}
},
"mappings": {
"properties": {
"observatory": {
"type": "text",
"analyzer": "synonym"
}
}
}
}
Index Data:
{
"observatory":"HR"
}
{
"observatory":"Human Ressources"
}
Search Query:
{
"query": {
"bool": {
"should": [
{
"match": {
"observatory": {
"query": "HR"
}
}
}
]
}
}
}
Search Result:
"hits": [
{
"_index": "67707925",
"_type": "_doc",
"_id": "1",
"_score": 0.487735,
"_source": {
"observatory": "Human Ressources"
}
},
{
"_index": "67707925",
"_type": "_doc",
"_id": "2",
"_score": 0.487735,
"_source": {
"observatory": "HR"
}
}
]
I am trying to query my elastic search server in python, If I hard coded the url and query string like below in my python (search()) script working fine no issues. If I want to store those url and query in a separate xml file (property file). I'm not getting the results instead I got the below error:
text '{"error":{"root_cause":[{"type":"parsing_exception","reason":"Expected [START_OBJECT] but found [VALUE_STRING]","line":1,"col":1}],"type":"parsing_exception","reason":"Expected [START_OBJECT] but found [VALUE_STRING]","line":1,"col":1},"status":400}' str
Here is my code I am using
def search():
url="http://0.0.0.0/logstash-pse*/_search/"
query={ "size": 0, "aggs": { "2": { "date_histogram": { "field": "#timestamp", "interval": "30m", "time_zone": "America/Chicago", "min_doc_count": 1 }, "aggs": { "3": { "terms": { "field": "queryname.keyword", "size": 100, "order": { "1.90": "desc" } }, "aggs": { "1": { "percentiles": { "field": "queryResponseTime", "percents": [ 90 ], "keyed": "false" } } } } } } }, "query": { "bool": { "must": [ { "query_string": { "query": "path: \"/store_locator/\"", "analyze_wildcard": "true" } }, { "query_string": { "analyze_wildcard": "true", "query": "*" } }, { "range": { "#timestamp": { "gte": 1527181463371, "lte": 1527267863371, "format": "epoch_millis" } } } ], "must_not": [] } }, "highlight": { "pre_tags": [ "#kibana-highlighted-field#" ], "post_tags": [ "#/kibana-highlighted-field#" ], "fields": { "*": { "highlight_query": { "bool": { "must": [ { "query_string": { "query": "path: \"/store_locator/\"", "analyze_wildcard": "true", "all_fields": "true" } }, { "query_string": { "analyze_wildcard": "true", "query": "*", "all_fields": "true" } }, { "range": { "#timestamp": { "gte": 1527181463371, "lte": 1527267863371, "format": "epoch_millis" } } } ], "must_not": [] } } } }, "fragment_size": 2147483647 }, "_source": { "excludes": [] }, "version": "true"}
response = requests.post(url, auth=(user, password), verify=False,json=query)
XML property file I am using like the below:
<custom>
<url>the above url goes here</url>
<query> above query </query>
</custom>
Any idea what I am missing?, Much appreciated
Able to figure it out with few exercise on my own. In case if someone is looking for:
I just used the below
response = requests.post(url, auth=(user, password), verify=False,json=json.loads(query))
I'm using ELK stack and I'm trying to find out how to visualize all logs except of those from specific IP ranges (for example 10.0.0.0/8). Is there any way how to negate filter query:
{"wildcard":{"src_address":"10.*"}}
I put it to Buckets -> Split Bars -> Aggregation -> Filters and I would like to negate this query so I got all logs except of those from 10.0.0.0/8
This is the whole JSON request:
{
"query": {
"filtered": {
"query": {
"query_string": {
"query": "low_level_category:\"user_authentication_failure\" AND NOT src_address:\"10.*\"",
"analyze_wildcard": true
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"#timestamp": {
"gte": 1474384885044,
"lte": 1474989685044,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
}
}
},
"size": 0,
"aggs": {
"2": {
"date_histogram": {
"field": "#timestamp",
"interval": "3h",
"time_zone": "Europe/Berlin",
"min_doc_count": 200,
"extended_bounds": {
"min": 1474384885043,
"max": 1474989685043
}
},
"aggs": {
"3": {
"terms": {
"field": "src_address.raw",
"size": 5,
"order": {
"_count": "desc"
}
}
}
}
}
}
}
Thanks
You can input this in the Kibana search box and it should get you what you need:
NOT src_address:10.*
I have two indexes index1 and index2 and both has two types type1 and type2 with same name in elastic search.(please assume that we have valid business reason behind it)
I would like to search index1 - type1 and index2 -type2
here is my query
POST _search
{
"query": {
"indices": {
"indices": ["index1","index2"],
"query": {
"filtered":{
"query":{
"multi_match": {
"query": "test",
"type": "cross_fields",
"fields": ["_all"]
}
},
"filter":{
"or":{
"filters":[
{
"terms":{
"_index":["index1"], // how can i make this work?
"_type": ["type1"]
}
},
{
"terms":{
"_index":["index2"], // how can i make this work?
"_type": ["type2"]
}
}
]
}
}
}
},
"no_match_query":"none"
}
}
}
You can use the indices, type in a bool filter to filter on type and index
The query would look something on these lines :
POST index1,index2/_search
{
"query": {
"filtered": {
"query": {
"multi_match": {
"query": "test",
"type": "cross_fields",
"fields": [
"_all"
]
}
},
"filter": {
"bool": {
"should": [
{
"indices": {
"index": "index1",
"filter": {
"type": {
"value": "type1"
}
},
"no_match_filter": "none"
}
},
{
"indices": {
"index": "index2",
"filter": {
"type": {
"value": "type2"
}
},
"no_match_filter": "none"
}
}
]
}
}
}
}
}
Passing the index names in the url example : index1,index2/_search is a good practice else you risk executing query across all indices in the cluster.