configure an elasticsearch index with json not taking - json

I'm using the following json to configure elasticsearch. The goal is to set up the index and the type in one swoop (this is the requirement, setting up docker images). This is as far as I've gotten that will allow elasticsearch to start successfully. The problem is that the index isn't created yet it doesn't error. Other forms I've tried prevents the service from starting.
{
"cluster": {
"name": "MyClusterName"
},
"node": {
"name": "MyNodeName"
},
"indices": {
"number_of_shards": 4,
"index.number_of_replicas": 4
},
"index": {
"analysis": {
"analyzer": {
"my_ngram_analyzer": {
"tokenizer": "my_ngram_tokenizer",
"filter": "lowercase"
},
"my_lowercase_whitespace_analyzer": {
"tokenizer": "whitespace",
"filter": "lowercase"
}
},
"tokenizer": {
"my_ngram_tokenizer": {
"type": "nGram",
"min_gram": "2",
"max_gram": "20"
}
}
},
"index": {
"settings": {
"_id": "indexindexer"
},
"mappings": {
"inventoryIndex": {
"_id": {
"path": "indexName"
},
"_routing": {
"required": true,
"path": "indexName"
},
"properties": {
"indexName": {
"type": "string",
"index": "not_analyzed"
},
"startedOn": {
"type": "date",
"index": "not_analyzed"
},
"deleted": {
"type": "boolean",
"index": "not_analyzed"
},
"deletedOn": {
"type": "date",
"index": "not_analyzed"
},
"archived": {
"type": "boolean",
"index": "not_analyzed"
},
"archivedOn": {
"type": "date",
"index": "not_analyzed"
},
"failure": {
"type": "boolean",
"index": "not_analyzed"
},
"failureOn": {
"type": "date",
"index": "not_analyzed"
}
}
}
}
}
}
}
I may have a workaround using curl in a post-boot script but I would prefer to have the configuration handled in the config file.
Thanks!

It appears that elasticsearch will not allow all the configuration to be done in a single yml. The workaround I've found is to create an index template and place it in the <es-config>/templates/ dir then after spinning up the service I use curl to create the index. The index matching will catch it and provision it according to the template.
http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-templates.html

Related

ARM Templates - Values and parameters for Adding Dynamic Data disks to VMs?

I'm new to ARM Templates.
I've downloaded an ARM Template from the Portal after building a VM with 1 managed Data Disk.
My objective is to use ARM Templates to build several VMs in a row.
For now, with identical parameters, except for the VM Name and of course NIC and Disks Names.
I noticed the parameters.json file had hardcoded values and that wouldn't work as a template, so I started modifying to see how could I make it more dynamic.
However I don't understand the Data Disks structure, which, in this template, is divided among different components and that's making me struggle with Dynamic Naming for the Disks.
Data disks appear in the template as a Resource and then as a property of the VM, inside a copy function.
However in the parameters file there are two objects, dataDisks and dataDisksResources.
I don't understand why the parameters have two different objects instead of one (for example, everything inside dataDisks instead of also having a dataDisksResources) and I also don't get why the parameters of the VM disk property are different and more than the parameters of the Disk Resource.
This is the template.json
{
"$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"location": {
"type": "string"
},
"subnetName": {
"type": "string"
},
"virtualNetworkId": {
"type": "string"
},
"virtualMachineName": {
"type": "string"
},
"virtualMachineRG": {
"type": "string"
},
"osDiskType": {
"type": "string"
},
"dataDisks": {
"type": "array"
},
"dataDiskResources": {
"type": "array"
},
"virtualMachineSize": {
"type": "string"
},
"adminUsername": {
"type": "string"
},
"adminPassword": {
"type": "secureString"
},
"diagnosticsStorageAccountName": {
"type": "string"
},
"diagnosticsStorageAccountId": {
"type": "string"
},
"diagnosticsStorageAccountType": {
"type": "string"
},
"diagnosticsStorageAccountKind": {
"type": "string"
}
},
"variables": {
"vnetId": "[parameters('virtualNetworkId')]",
"subnetRef": "[concat(variables('vnetId'), '/subnets/', parameters('subnetName'))]",
"nicName": "[concat(parameters('virtualMachineName'), substring(uniqueString(resourceGroup().id),0,4))]"
},
"resources": [
{
"name": "[variables('nicName')]",
"type": "Microsoft.Network/networkInterfaces",
"apiVersion": "2019-07-01",
"location": "[parameters('location')]",
"dependsOn": [],
"properties": {
"ipConfigurations": [
{
"name": "ipconfig1",
"properties": {
"subnet": {
"id": "[variables('subnetRef')]"
},
"privateIPAllocationMethod": "Dynamic"
}
}
]
},
"tags": {
}
},
{
"name": "[concat(parameters('virtualMachineName'),'_DataDisk_0')]",
"type": "Microsoft.Compute/disks",
"apiVersion": "2019-07-01",
"location": "[parameters('location')]",
"properties": "[parameters('dataDiskResources')[copyIndex()].properties]",
"sku": {
"name": "[parameters('dataDiskResources')[copyIndex()].sku]"
},
"copy": {
"name": "managedDiskResources",
"count": "[length(parameters('dataDiskResources'))]"
},
"tags": {
}
},
{
"name": "[parameters('virtualMachineName')]",
"type": "Microsoft.Compute/virtualMachines",
"apiVersion": "2019-07-01",
"location": "[parameters('location')]",
"dependsOn": [
"managedDiskResources",
"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]",
"[concat('Microsoft.Storage/storageAccounts/', parameters('diagnosticsStorageAccountName'))]"
],
"properties": {
"hardwareProfile": {
"vmSize": "[parameters('virtualMachineSize')]"
},
"storageProfile": {
"osDisk": {
"createOption": "fromImage",
"managedDisk": {
"storageAccountType": "[parameters('osDiskType')]"
}
},
"imageReference": {
"publisher": "MicrosoftVisualStudio",
"offer": "VisualStudio",
"sku": "VS-2017-Ent-Latest-Win10-N",
"version": "latest"
},
"copy": [
{
"name": "dataDisks",
"count": "[length(parameters('dataDisks'))]",
"input": {
"lun": "[parameters('dataDisks')[copyIndex('dataDisks')].lun]",
"createOption": "[parameters('dataDisks')[copyIndex('dataDisks')].createOption]",
"caching": "[parameters('dataDisks')[copyIndex('dataDisks')].caching]",
"writeAcceleratorEnabled": "[parameters('dataDisks')[copyIndex('dataDisks')].writeAcceleratorEnabled]",
"diskSizeGB": "[parameters('dataDisks')[copyIndex('dataDisks')].diskSizeGB]",
"managedDisk": {
"id": "[coalesce(parameters('dataDisks')[copyIndex('dataDisks')].id, if(equals(parameters('dataDisks')[copyIndex('dataDisks')].name, json('null')), json('null'), resourceId('Microsoft.Compute/disks', parameters('dataDisks')[copyIndex('dataDisks')].name)))]",
"storageAccountType": "[parameters('dataDisks')[copyIndex('dataDisks')].storageAccountType]"
}
}
}
]
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', variables('nicName'))]"
}
]
},
"osProfile": {
"computerName": "[parameters('virtualMachineName')]",
"adminUsername": "[parameters('adminUsername')]",
"adminPassword": "[parameters('adminPassword')]",
"windowsConfiguration": {
"enableAutomaticUpdates": true,
"provisionVmAgent": true
}
},
"licenseType": "Windows_Server",
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": true,
"storageUri": "[concat('https://', parameters('diagnosticsStorageAccountName'), '.blob.core.windows.net/')]"
}
}
},
"tags": {
}
},
{
"name": "[parameters('diagnosticsStorageAccountName')]",
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "2019-06-01",
"location": "[parameters('location')]",
"properties": {},
"kind": "[parameters('diagnosticsStorageAccountKind')]",
"sku": {
"name": "[parameters('diagnosticsStorageAccountType')]"
},
"tags": {
}
}
],
"outputs": {
"adminUsername": {
"type": "string",
"value": "[parameters('adminUsername')]"
}
}
}
And this is the parameters.json
{
"location": {
"value": "location"
},
"subnetName": {
"value": "subnetname"
},
"virtualNetworkId": {
"value": "networkid"
},
"virtualMachineRG": {
"value": "vmRG"
},
"osDiskType": {
"value": "Standard_LRS"
},
"dataDisks": {
"value": [
{
"lun": 0,
"createOption": "attach",
"caching": "None",
"writeAcceleratorEnabled": false,
"id": null,
"storageAccountType": null,
"name": null,
"diskSizeGB": null,
"diskEncryptionSet": {
"id": null
}
}
]
},
"dataDiskResources": {
"value": [
{
"sku": "Standard_LRS",
"properties": {
"diskSizeGB": 128,
"creationData": {
"createOption": "empty"
}
}
}
]
},
"virtualMachineSize": {
"value": "Standard_B4ms"
},
"adminUsername": {
"value": "admin"
},
"diagnosticsStorageAccountName": {
"value": "rg01diag"
},
"diagnosticsStorageAccountId": {
"value": "Microsoft.Storage/storageAccounts/rg01diag"
},
"diagnosticsStorageAccountType": {
"value": "Standard_LRS"
},
"diagnosticsStorageAccountKind": {
"value": "Storage"
} }
I also can't find any documentation for this kind of template. All the quick templates I find have a simpler version of this. For example they state all the disks properties inside the same template file, the parameters and properties are fewer and there isn't any dataDisksResources object anywhere.
I want to understand how would I need to modify these Disk structure to add dynamic naming that names them, for example, as Azure portal does (VMName_DataDisk_Lunnumber)
Because you have to specify different input when you create the data disk and when you attach it, but you dont have to create it, you can just tell the VM to create those. thsis would be one way of doing that:
"dataDisks": [
{
"diskSizeGB": "[parameters('sizeOfEachDataDiskInGB')]",
"lun": 0,
"createOption": "Empty"
},
{
"diskSizeGB": "[parameters('sizeOfEachDataDiskInGB')]",
"lun": 1,
"createOption": "Empty"
},
{
"diskSizeGB": "[parameters('sizeOfEachDataDiskInGB')]",
"lun": 2,
"createOption": "Empty"
},
{
"diskSizeGB": "[parameters('sizeOfEachDataDiskInGB')]",
"lun": 3,
"createOption": "Empty"
}
],
and you dont have to have a separate disk resource, these would be created automatically. you can also add a property called name to specify a name for those.
https://github.com/Azure/azure-quickstart-templates/blob/master/101-vm-multiple-data-disk/azuredeploy.json

Azure DataFactory ForEach Copy activity is not iterating through but instead pulling all files in blob. Why?

I have a pipeline in DF2 that has to look at a folder in blob and process each of the 145 files sequentially into a database table. After each file has been loaded into the table, a stored procedure should be trigger that will check each record and either insert it, or update an existing record into a master table.
Looking online I feel as though I have tried every combination of "Get MetaData", "For Each", "LookUp" and "Assign Variable" activates that have been suggested but for some reason my Copy Data STILL picks up all files at the same time and runs 145 times.
Recently found a blog online that I followed to use "Assign Variable" as it will be useful for multiple file locations but it does not work for me. I need to read the files as CSVs to tables and not binary objects so therefore I think this is my issue.
{
"name": "BulkLoadPipeline",
"properties": {
"activities": [
{
"name": "GetFileNames",
"type": "GetMetadata",
"policy": {
"timeout": "7.00:00:00",
"retry": 0,
"retryIntervalInSeconds": 30,
"secureOutput": false,
"secureInput": false
},
"typeProperties": {
"dataset": {
"referenceName": "DelimitedText1",
"type": "DatasetReference",
"parameters": {
"fileName": "#item()"
}
},
"fieldList": [
"childItems"
],
"storeSettings": {
"type": "AzureBlobStorageReadSetting"
},
"formatSettings": {
"type": "DelimitedTextReadSetting"
}
}
},
{
"name": "CopyDataRunDeltaCheck",
"type": "ForEach",
"dependsOn": [
{
"activity": "BuildList",
"dependencyConditions": [
"Succeeded"
]
}
],
"typeProperties": {
"items": {
"value": "#variables('fileList')",
"type": "Expression"
},
"isSequential": true,
"activities": [
{
"name": "WriteToTables",
"type": "Copy",
"policy": {
"timeout": "7.00:00:00",
"retry": 0,
"retryIntervalInSeconds": 30,
"secureOutput": false,
"secureInput": false
},
"typeProperties": {
"source": {
"type": "DelimitedTextSource",
"storeSettings": {
"type": "AzureBlobStorageReadSetting",
"wildcardFileName": "*.*"
},
"formatSettings": {
"type": "DelimitedTextReadSetting"
}
},
"sink": {
"type": "AzureSqlSink"
},
"enableStaging": false,
"translator": {
"type": "TabularTranslator",
"mappings": [
{
"source": {
"name": "myID",
"type": "String"
},
"sink": {
"name": "myID",
"type": "String"
}
},
{
"source": {
"name": "Col1",
"type": "String"
},
"sink": {
"name": "Col1",
"type": "String"
}
},
{
"source": {
"name": "Col2",
"type": "String"
},
"sink": {
"name": "Col2",
"type": "String"
}
},
{
"source": {
"name": "Col3",
"type": "String"
},
"sink": {
"name": "Col3",
"type": "String"
}
},
{
"source": {
"name": "Col4",
"type": "String"
},
"sink": {
"name": "Col4",
"type": "String"
}
},
{
"source": {
"name": "DW Date Created",
"type": "String"
},
"sink": {
"name": "DW_Date_Created",
"type": "String"
}
},
{
"source": {
"name": "DW Date Updated",
"type": "String"
},
"sink": {
"name": "DW_Date_Updated",
"type": "String"
}
}
]
}
},
"inputs": [
{
"referenceName": "DelimitedText1",
"type": "DatasetReference",
"parameters": {
"fileName": "#item()"
}
}
],
"outputs": [
{
"referenceName": "myTable",
"type": "DatasetReference"
}
]
},
{
"name": "CheckDeltas",
"type": "SqlServerStoredProcedure",
"dependsOn": [
{
"activity": "WriteToTables",
"dependencyConditions": [
"Succeeded"
]
}
],
"policy": {
"timeout": "7.00:00:00",
"retry": 0,
"retryIntervalInSeconds": 30,
"secureOutput": false,
"secureInput": false
},
"typeProperties": {
"storedProcedureName": "[TL].[uspMyCheck]"
},
"linkedServiceName": {
"referenceName": "myService",
"type": "LinkedServiceReference"
}
}
]
}
},
{
"name": "BuildList",
"type": "ForEach",
"dependsOn": [
{
"activity": "GetFileNames",
"dependencyConditions": [
"Succeeded"
]
}
],
"typeProperties": {
"items": {
"value": "#activity('GetFileNames').output.childItems",
"type": "Expression"
},
"isSequential": true,
"activities": [
{
"name": "Create list from variables",
"type": "AppendVariable",
"typeProperties": {
"variableName": "fileList",
"value": "#item().name"
}
}
]
}
}
],
"variables": {
"fileList": {
"type": "Array"
}
}
}
}
The Details screen of the pipleline output shows the pipeline loops for the number of items in the blob but each time, the Copy Data and Stored Procedure are run for each file in the list at once as opposed to one at a time.
I feel like I am close to the answer but missing one vital part. Any help or suggestions are GREATLY appreciated.
Your payload is not correct.
GetMetadata actvitiy should not use the same dataset with Copy Activity.
GetMetadata activity should reference a dataset with a folder, the folder contains all file you want to deal with. but your dataset has 'filename' parameter.
use the output of the getMetadata activity as the input of forEach activity.

Can't understand this JSON schema from the Swish QR Code API

I'm trying to use an API but the documentation is really bad. I got this JSON schema but I don't understand it. What am I supposed to include in the request?
url: https://mpc.getswish.net/qrg-swish/api/v1/prefilled
I have tried this but it doesn't work:
{
"payee":{
"editable":{
"editable":"false"
},
"swishString":{
"value":"0721876507"
}
},
"size":600,
"border":20,
"transparent":false,
"format":"png"
}
Here's the JSON schema
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Swish pre-filled qr code generator",
"description": "REST interface to get a QR code that the Swish app will interpret as a pre filled code",
"definitions": {
"editable": {
"description ": "Controls if user can modify this value in Swish app or not",
"type": "object",
"properties": {
"editable": {
"type": "boolean",
"default": false
}
}
},
"swishString": {
"type": "object",
"properties": {
"value": {
"type": "string",
"maxLength": 70
}
},
"required": [
"value"
]
},
"swishNumber": {
"type": "object",
"properties": {
"value": {
"type": "number"
}
},
"required": [
"value"
]
}
},
"type": "object",
"properties": {
"format": {
"enum": [
"jpg",
"png",
"svg"
]
},
"payee": {
"description": "Payment receiver",
"allOf": [
{
"$ref": "#/definitions/editable"
},
{
"$ref": "#/definitions/swishString"
}
]
},
"amount": {
"description": "Payment amount",
"allOf": [
{
"$ref": "#/definitions/editable"
},
{
"$ref": "#/definitions/swishNumber"
}
]
},
"message": {
"description": "Message for payment",
"allOf": [
{
"$ref": "#/definitions/editable"
},
{
"$ref": "#/definitions/swishString"
}
]
},
"size": {
"description": "Size of the QR code. The code is a square, so width and height are the same. Not required is the format is svg",
"value": "number",
"minimum": 300
},
"border": {
"description": "Width of the border.",
"type": "number"
},
"transparent": {
"description": "Select background color to be transparent. Do not work with jpg format.",
"type": "boolean"
}
},
"required": [
"format"
],
"anyOf": [
{
"required": [
"payee"
]
},
{
"required": [
"amount"
]
},
{
"required": [
"message"
]
}
],
"additionalProperties": false,
"maxProperties": 5
}
The API should return a QR code.
To be honest, I have not taken the time to learn JSON schema, but your example should probably look something like this:
{
"payee": {
"value": "0721876507",
"editable": false
},
"size": 600,
"border": 20,
"transparent": false,
"format": "png"
}
There are other parameters you may choose to utilize:
{
"payee": {
"value": "1239006032",
"editable": false
},
"message": {
"value": "LIV",
"editable": true
},
"amount": {
"value": 100,
"editable": true
},
"format": "png",
"size": 300,
"border": 0,
"transparent": true
}
Honestly, I think the developers behind the Swish APIs are trying to look smart by complicating things. They should, of course, have provided example JSON data instead of forcing consumers to understand their JSON schema. Also, I believe their published schema is wrong. The second example I provided works even though it doesn't validate according to the JSON schema ("Object property count 7 exceeds maximum count of 5").
Here is a minimal and pretty useless request that returns a valid QR-code
{
"format": "png",
"size": 300
}
And here is a more usable example that works
{
"format": "png",
"size": 300,
"transparent": false,
"amount": {
"value": 999.99,
"editable": true
},
"payee": {
"value": "0701000000",
"editable": false
},
"message": {
"value": "Hello",
"editable": false
}
}

Elasticsearch : Default template does not detect date

I have a default template in place which looks like
PUT /_template/abtemp
{
"template": "abt*",
"settings": {
"index.refresh_interval": "5s",
"number_of_shards": 5,
"number_of_replicas": 1,
"index.codec": "best_compression"
},
"mappings": {
"_default_": {
"_all": {
"enabled": false
},
"_source": {
"enabled": true
},
"dynamic_templates": [
{
"message_field": {
"match": "message",
"match_mapping_type": "string",
"mapping": {
"type": "string",
"index": "analyzed",
"omit_norms": true,
"fielddata": {
"format": "disabled"
}
}
}
},
{
"string_fields": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "string",
"index": "analyzed",
"omit_norms": true,
"fielddata": {
"format": "disabled"
},
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed",
"ignore_above": 256
}
}
}
}
}
]
}
}
}
the idea here is this
apply the template for all indices whose name matches abt*
Only analyze a string field if it is named message. All other string fields will be not_analyzed and will have a corresponding .raw field
now i try to index some data into this as
curl -s -XPOST hostName:port/indexName/_bulk --data-binary #myFile.json
and here is the file
{ "index" : { "_index" : "abtclm3","_type" : "test"} }
{ "FIELD1":1, "FIELD2":"2015-11-18 15:32:18"", "FIELD3":"MATTHEWS", "FIELD4":"GARY", "FIELD5":"", "FIELD6":"STARMX", "FIELD7":"AL", "FIELD8":"05/15/2010 11:30", "FIELD9":"05/19/2010 7:00", "FIELD10":"05/19/2010 23:00", "FIELD11":3275, "FIELD12":"LC", "FIELD13":"WIN", "FIELD14":"05/15/2010 11:30", "FIELD15":"LC", "FIELD16":"POTUS", "FIELD17":"WH", "FIELD18":"S GROUNDS", "FIELD19":"OFFICE", "FIELD20":"VISITORS", "FIELD21":"STATE ARRIVAL - MEXICO**", "FIELD22":"08/27/2010 07:00:00 AM +0000", "FIELD23":"MATTHEWS", "FIELD24":"GARY", "FIELD25":"", "FIELD26":"STARMX", "FIELD27":"AL", "FIELD28":"05/15/2010 11:30", "FIELD29":"05/19/2010 7:00", "FIELD30":"05/19/2010 23:00", "FIELD31":3275, "FIELD32":"LC", "FIELD33":"WIN", "FIELD34":"05/15/2010 11:30", "FIELD35":"LC", "FIELD36":"POTUS", "FIELD37":"WH", "FIELD38":"S GROUNDS", "FIELD39":"OFFICE", "FIELD40":"VISITORS", "FIELD41":"STATE ARRIVAL - MEXICO**", "FIELD42":"08/27/2010 07:00:00 AM +0000" }
note that there are a few fields, such as FIELD2 that should be classified as a date. Also, FIELD31 should be classified as long. So the indexing happens and when i look at the data i see that the numbers have been correctly classified but everything else has been put under string. How do i make sure that the fields that have timestamps get classified as dates?
You have a lot of date formats there. You need a template like this one:
{
"template": "abt*",
"settings": {
"index.refresh_interval": "5s",
"number_of_shards": 5,
"number_of_replicas": 1,
"index.codec": "best_compression"
},
"mappings": {
"_default_": {
"dynamic_date_formats":["dateOptionalTime||yyyy-mm-dd HH:mm:ss||mm/dd/yyyy HH:mm||mm/dd/yyyy HH:mm:ss aa ZZ"],
"_all": {
"enabled": false
},
"_source": {
"enabled": true
},
"dynamic_templates": [
{
"message_field": {
"match": "message",
"match_mapping_type": "string",
"mapping": {
"type": "string",
"index": "analyzed",
"omit_norms": true,
"fielddata": {
"format": "disabled"
}
}
}
},
{
"dates": {
"match": "*",
"match_mapping_type": "date",
"mapping": {
"type": "date",
"format": "dateOptionalTime||yyyy-mm-dd HH:mm:ss||mm/dd/yyyy HH:mm||mm/dd/yyyy HH:mm:ss aa ZZ"
}
}
},
{
"string_fields": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "string",
"index": "analyzed",
"omit_norms": true,
"fielddata": {
"format": "disabled"
},
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed",
"ignore_above": 256
}
}
}
}
}
]
}
}
}
This probably doesn't cover all the formats you have in there, you need to add the remaining ones. The idea is to specify them under dynamic_date_formats separated by || and then to specify them, also, under the format field for the date field itself.
To get an idea on what you need to do to define them, please see this section of the documentation for builtin formats and this piece of documentation for any custom formats you'd plan on using.

Logstash + Elasticsearch template mapping fails to add to Elasticsearch

I'm trying to add a custom template for all logstash indexes in elasticsearch, however whenever I add one, logstash raises a 400 error on all the logs and fails to add anything to elasticsearch.
I'm adding the template using the REST API for elasticsearch:
POST _template/logstash
{
"order": 0,
"template" : "logstash*",
"settings": {
"index.refresh_interval": "5s"
},
"mappings": {
"_default_": {
"_all" : {
"enabled" : true,
"omit_norms": true
},
"dynamic_templates": [
{
"message_field": {
"mapping": {
"index": "analyzed",
"omit_norms": true,
"type": "string"
},
"match_mapping_type": "string",
"match": "message"
}
},
{
"string_fields": {
"mapping": {
"index": "analyzed",
"omit_norms": true,
"type": "string",
"fields": {
"raw": {
"ignore_above": 256,
"index": "not_analyzed",
"type": "string"
}
}
},
"match_mapping_type": "string",
"match": "*"
}
}
],
"properties": {
"geoip": {
"dynamic": true,
"type": "object",
"properties": {
"location": {
"type": "geo_point"
}
}
},
"#version": {
"index": "not_analyzed",
"type": "string"
},
"#fields": {
"type": "object",
"dynamic": true,
"path": "full"
},
"#message": {
"type": "string",
"index": "analyzed"
},
"#source": {
"type": "string",
"index": "not_analyzed"
},
"method": {
"type": "string",
"index": "not_analyzed"
},
"requested": {
"type": "date",
"format": "dateOptionalTime",
"index": "not_analyzed"
},
"response_time": {
"type": "float",
"index": "not_analyzed"
},
"hostname": {
"type": "string",
"index": "not_analyzed"
},
"ip": {
"type": "string",
"index": "not_analyzed"
},
"error": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
you should try to add the template using logstash instead of using the rest api directly.
In your logstash configuration:
output {
elasticsearch {
# add additional configurations appropriately
template => # path to the template file you want to use
template_name => "logstash"
template_overwrite => true
}
}