Send Cisco ASA Syslogs to Elasticsearch Using Logstash

Blog, ElasticSearch, Information Technology, Kibana, Logstash, Networking, Software

This guide is a continuation of this blog post here. The following assumes that you already have an Elasticsearch instance set up and ready to go.  This post will walk you through installing and setting up logstash for sending Cisco ASA messages to an Elasticsearch index.

As of today (6/16/2015), version 1.5.1 is the latest stable release of Logstash so I will be using 1.5.1 in my setup guide.


OPTIONAL

Before we begin sending data to Elasticsearch, it is probably a good idea to set up a custom template in Elasticsearch. This will set it so that specific fields are set for the correct types of data.

For example: By default, all data passed to a new index in elasticsearch is treated as a string type. You wouldn’t want this for something like the bytes field in case you wanted to add up all the bytes for a specific time window search. So you create a custom template so that the bytes field is stored as type long. This is totally optional but it is probably in your benifit to do this before you start collecting your ASA syslogs. You can read more about custom elasticsearch templates here or you can just copy mine by doing this…

Copy the text below, change the IP address at the top to the IP of your elasticsearch server and save it as template.sh on your desktop.

#!/bin/sh
curl -XPUT http://10.0.0.112:9200/_template/logstash_per_index -d '
{
    "template" : "logstash*",
    "mappings" : {
      "cisco-fw" : {
         "properties": {
            "@timestamp":{"type":"date","format":"dateOptionalTime"},
            "@version":{"type":"string", "index" : "not_analyzed"},
	    "action":{"type":"string"},
	    "bytes":{"type":"long"},
	    "cisco_message":{"type":"string"},
	    "ciscotag":{"type":"string", "index" : "not_analyzed"},
	    "connection_count":{"type":"long"},
            "connection_count_max":{"type":"long"},
	    "connection_id":{"type":"string"},
            "direction":{"type":"string"},
            "dst_interface":{"type":"string"},
	    "dst_ip":{"type":"string"},
            "dst_mapped_ip":{"type":"ip"},
	    "dst_mapped_port":{"type":"long"},
            "dst_port":{"type":"long"},
            "duration":{"type":"string"},
	    "err_dst_interface":{"type":"string"},
	    "err_dst_ip":{"type":"ip"},
	    "err_icmp_code":{"type":"string"},
	    "err_icmp_type":{"type":"string"},
	    "err_protocol":{"type":"string"},
            "err_src_interface":{"type":"string"},
            "err_src_ip":{"type":"ip"},
            "geoip":{
               "properties":{
                  "area_code":{"type":"long"},
                  "asn":{"type":"string", "index":"not_analyzed"},
                  "city_name":{"type":"string", "index":"not_analyzed"},
                  "continent_code":{"type":"string"},
                  "country_code2":{"type":"string"},
                  "country_code3":{"type":"string"},
                  "country_name":{"type":"string", "index":"not_analyzed"},
                  "dma_code":{"type":"long"},
                  "ip":{"type":"ip"},
                  "latitude":{"type":"double"},
                  "location":{"type":"geo_point"},
                  "longitude":{"type":"double"},
                  "number":{"type":"string"},
                  "postal_code":{"type":"string"},
                  "real_region_name":{"type":"string", "index":"not_analyzed"},
                  "region_name":{"type":"string", "index":"not_analyzed"},
                  "timezone":{"type":"string"}
               }
            },
            "group":{"type":"string"},
 	    "hashcode1": {"type": "string"}, 
 	    "hashcode2": {"type": "string"}, 
            "host":{"type":"string"},
            "icmp_code":{"type":"string"},
            "icmp_code_xlated":{"type":"string"},
            "icmp_seq_num":{"type":"string"},
            "icmp_type":{"type":"string"},
            "interface":{"type":"string"},
            "is_local_natted":{"type":"string"},
            "is_remote_natted":{"type":"string"},
            "message":{"type":"string"},
            "orig_dst_ip":{"type":"ip"},
            "orig_dst_port":{"type":"long"},
            "orig_protocol":{"type":"string"},
            "orig_src_ip":{"type":"ip"},
            "orig_src_port":{"type":"long"},
            "policy_id":{"type":"string"},
            "protocol":{"type":"string"},
            "reason":{"type":"string"},
            "seq_num":{"type":"long"},
            "spi":{"type":"string"},
            "src_interface":{"type":"string"},
            "src_ip":{"type":"ip"},
            "src_mapped_ip":{"type":"ip"},
            "src_mapped_port":{"type":"long"},
            "src_port":{"type":"long"},
            "src_xlated_interface":{"type":"string"},
            "src_xlated_ip":{"type":"ip"},
            "syslog_facility":{"type":"string"},
            "syslog_facility_code":{"type":"long"},
            "syslog_pri":{"type":"string"},
            "syslog_severity":{"type":"string"},
            "syslog_severity_code":{"type":"long"},
            "tags":{"type":"string"},
            "tcp_flags":{"type":"string"},
            "timestamp":{"type":"string"},
            "tunnel_type":{"type":"string"},
            "type":{"type":"string"},
            "user":{"type":"string"},
            "xlate_type":{"type":"string"}
      }
    }
  }
}'

Save it and close. Now, open up a terminal window and change to the directory where the template.sh file is located. We need to set the script as executable so we can run it. Run the following commands in a terminal window…

cd Desktop
chmod +x template.sh
./template.sh

You should get back {“acknowledged”:true}

ack

Now back to the tutorial…


Download and Install Java

If you are doing this on a fresh install of Ubuntu (like me), the first thing you’re going to need to do is install Java. Logstash requires at least Java 7 to function so let’s set that up. If you already have Java on your machine, you can skip to the next section. I will be using Java 8 in this example but you can run 7 or openjdk if you wish.

Open a terminal window (ctrl+shift+t) and type…

sudo apt-add-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer

Once you have accepted the license agreement, Java is ready to go.

 


Download and Install Logstash

Open a terminal window (ctrl+shift+t) and run these commands…

wget -O - http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -
echo 'deb http://packages.elasticsearch.org/logstash/1.5/debian stable main' | sudo tee /etc/apt/sources.list.d/logstash.list
sudo apt-get update
sudo apt-get install logstash

 


Logstash Configuration

Logstash is now installed so now we need to write a configuration file so that we can do things like specify listening port, patterns, the IP of the Elasticsearch server etc.

A Logstash configuration file is basically built of 3 parts: The input (network protocol, listening port, data type etc.), the filter (patterns, grok filters, syslog severity etc.) and the output (IP address of the elasticsearch server logstash is shipping the modified data to etc.).

First and foremost, let’s create a blank logstash configuration file. Open up your terminal window and type…

sudo vi /etc/logstash/conf.d/logstash.conf

or if you don’t know how to use VI

sudo gedit /etc/logstash/conf.d/logstash.conf

 

I am going to post my logstash configuration file below and then I will explain each part of the file and what it is doing. So, copy my code, paste it into your logstash.conf file and follow along below making changes to your file as you see fit.

logstash.conf

input {
  	udp { 
    		port => 5544
    		type => "cisco-fw"
  	}
}

filter {
	grok {
    		match => ["message", "%{CISCO_TAGGED_SYSLOG} %{GREEDYDATA:cisco_message}"]
  	}

  	# Extract fields from the each of the detailed message types
  	# The patterns provided below are included in core of LogStash 1.4.2.
	grok {
		match => [
  			"cisco_message", "%{CISCOFW106001}",
  			"cisco_message", "%{CISCOFW106006_106007_106010}",
  			"cisco_message", "%{CISCOFW106014}",
  			"cisco_message", "%{CISCOFW106015}",
  			"cisco_message", "%{CISCOFW106021}",
  			"cisco_message", "%{CISCOFW106023}",
  			"cisco_message", "%{CISCOFW106100}",
  			"cisco_message", "%{CISCOFW110002}",
  			"cisco_message", "%{CISCOFW302010}",
  			"cisco_message", "%{CISCOFW302013_302014_302015_302016}",
  			"cisco_message", "%{CISCOFW302020_302021}",
  			"cisco_message", "%{CISCOFW305011}",
  			"cisco_message", "%{CISCOFW313001_313004_313008}",
  			"cisco_message", "%{CISCOFW313005}",
  			"cisco_message", "%{CISCOFW402117}",
  			"cisco_message", "%{CISCOFW402119}",
  			"cisco_message", "%{CISCOFW419001}",
  			"cisco_message", "%{CISCOFW419002}",
  			"cisco_message", "%{CISCOFW500004}",
  			"cisco_message", "%{CISCOFW602303_602304}",
  			"cisco_message", "%{CISCOFW710001_710002_710003_710005_710006}",
  			"cisco_message", "%{CISCOFW713172}",
  			"cisco_message", "%{CISCOFW733100}"
		]
	}

  	# Parse the syslog severity and facility
  	syslog_pri { }

	geoip {
      		add_tag => [ "GeoIP" ]
      		database => "/opt/logstash/databases/GeoLiteCity.dat"
      		source => "src_ip"
    	}

	if [geoip][city_name]      == "" { mutate { remove_field => "[geoip][city_name]" } }
    	if [geoip][continent_code] == "" { mutate { remove_field => "[geoip][continent_code]" } }
    	if [geoip][country_code2]  == "" { mutate { remove_field => "[geoip][country_code2]" } }
    	if [geoip][country_code3]  == "" { mutate { remove_field => "[geoip][country_code3]" } }
    	if [geoip][country_name]   == "" { mutate { remove_field => "[geoip][country_name]" } }
    	if [geoip][latitude]       == "" { mutate { remove_field => "[geoip][latitude]" } }
    	if [geoip][longitude]      == "" { mutate { remove_field => "[geoip][longitude]" } }
    	if [geoip][postal_code]    == "" { mutate { remove_field => "[geoip][postal_code]" } }
    	if [geoip][region_name]    == "" { mutate { remove_field => "[geoip][region_name]" } }
    	if [geoip][time_zone]      == "" { mutate { remove_field => "[geoip][time_zone]" } }

	# Gets the source IP whois information from the GeoIPASNum.dat flat file database
	geoip {
      		add_tag => [ "Whois" ]
      		database => "/opt/logstash/databases/GeoIPASNum.dat"
      		source => "src_ip"
    	}

 	# Parse the date
  	date {
    		match => ["timestamp",
      			"MMM dd HH:mm:ss",
      			"MMM  d HH:mm:ss",
      			"MMM dd yyyy HH:mm:ss",
      			"MMM  d yyyy HH:mm:ss"
    		]
  	}
}

output {
	stdout { 
		codec => json
	}

	elasticsearch {
    		host => "10.0.0.133"
    		flush_size => 1
	}
}

End of logstash.conf

Don’t freak out! I will walk you through my configuration file and explain what each section is doing. Like I said before, a logstash configuration file is made up of 3 parts: the input, the filter and the output. So let’s walk through it, shall we?

Note: All of my examples will be using this as an example ASA syslog message.

<182>May 07 2015 13:26:42: %ASA-6-302014: Teardown TCP connection 48809467 for outside:124.35.68.19/46505 to inside:10.10.10.32/443 duration 0:00:00 bytes 300 TCP FINs

 

The Input

input {
  udp { 
    port => 5544
    type => "cisco-fw"
  }
}

This tells logstash the protocol (UDP) and what port to listen (5544). You can make it any port you want but you just need to set it in your ASA firewall like so…

asa

or for you CLI people…

logging enable
logging timestamp
logging buffer-size 40960
logging trap informational
logging facility 22
logging host inside 10.0.0.133 17/5544

You can learn more on how to set this up by checking out my ASA syslog tutorial here

The Filter

This is the filter section. This is where most of the work will be done in logstash. The is where you can do things like use grok patterns to split data into fields based off the message and other neat little features for manipulating your data. My filter section has 5 different parts: grok, syslog_pri, geoip, mutate and date. Let’s look at them individually and show what exactly they do.

Grok

filter {
	grok {
    		match => ["message", "%{CISCO_TAGGED_SYSLOG} %{GREEDYDATA:cisco_message}"]
  	...
}

This section is where you use patterns to match various elements of your log messages and extract them into fields. This specific pattern is using the CISCO_TAGGED_SYSLOG definition in the firewalls pattern built into logstash and splitting it into sections.

The CISCO_TAGGED_SYSLOG looks like this…

CISCO_TAGGED_SYSLOG ^<%{POSINT:syslog_pri}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:sysloghost})?: %%{CISCOTAG:ciscotag}:

and, based off our example ASA syslog May 07 2015 13:26:42: %ASA-6-302014: Teardown TCP connection 48809467 for outside:124.35.68.19/46505 to inside:10.10.10.32/443 duration 0:00:00 bytes 300 TCP FINs, it pulls out this into these fields…

syslog_pri = 182
timestamp = May 07 2015 13:26:42
sysloghost =
ciscotag = ASA-6-302014

and the rest of the message of Teardown TCP connection 48809467 for outside:124.35.68.19/46505 to inside:10.10.10.32/443 duration 0:00:00 bytes 300 TCP FINs is just is just set as GREEDYDATA:cisco_message, which will be used later in the next grok filter below.

grok {
    match => [
    	"cisco_message", "%{CISCOFW106001}",
    	....
    	"cisco_message", "%{CISCOFW733100}"
    ]
}

Now, using the GREEDYDATA:cisco_message from the previous grok filter, we are going to use the same firewall patterns file built into logstash and match the message type based off the message. The filter goes through all the patterns until it finds a match and then splits the contents of the message into fields. So, based off our GREEDYDATA:cisco_message of Teardown TCP connection 48809467 for outside:124.35.68.19/46505 to inside:10.10.10.32/443 duration 0:00:00 bytes 300 TCP FINs from our example syslog, it will try to match it against the patterns file. Our specific example matches the CISCOFW302013_302014_302015_302016 line in the patterns file, which reads…

CISCOFW302013_302014_302015_302016 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection %{INT:connection_id} for %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port}( \(%{IP:src_mapped_ip}/%{INT:src_mapped_port}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}( \(%{IP:dst_mapped_ip}/%{INT:dst_mapped_port}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:reason})?( \(%{DATA:user}\))?

…and puts the data into these fields…

action = Teardown
protocol = TCP
connection_id = 48809467
src_interface = outside
src_ip = 124.35.68.19
src_port = 46505
dst_interface = inside
dst_ip = 10.10.10.32
dst_port = 443
duration = 0 =00 =00
bytes = 300
reason = TCP FINs

 

Syslog_pri

syslog_pri { }

This section takes the POSINT syslog_pri from the first grok filter and gets the facility and severity level of the syslog message. Our example had a syslog_pri number of 182 and logstash can determine that the message is an informational based message from the local6 facility. I was able to get that by referencing this chart and finding which column and row 182 fell under.

             emergency   alert   critical   error   warning   notice   info   debug
 kernel              0       1          2       3         4        5      6       7
 user                8       9         10      11        12       13     14      15
 mail               16      17         18      19        20       21     22      23
 system             24      25         26      27        28       29     30      31
 security           32      33         34      35        36       37     38      39
 syslog             40      41         42      43        44       45     46      47
 lpd                48      49         50      51        52       53     54      55
 nntp               56      57         58      59        60       61     62      63
 uucp               64      65         66      67        68       69     70      71
 time               72      73         74      75        76       77     78      79
 security           80      81         82      83        84       85     86      87
 ftpd               88      89         90      91        92       93     94      95
 ntpd               96      97         98      99       100      101    102     103
 logaudit          104     105        106     107       108      109    110     111
 logalert          112     113        114     115       116      117    118     119
 clock             120     121        122     123       124      125    126     127
 local0            128     129        130     131       132      133    134     135
 local1            136     137        138     139       140      141    142     143
 local2            144     145        146     147       148      149    150     151
 local3            152     153        154     155       156      157    158     159
 local4            160     161        162     163       164      165    166     167
 local5            168     169        170     171       172      173    174     175
 local6            176     177        178     179       180      181    182     183
 local7            184     185        186     187       188      189    190     191

Pretty cool stuff.


OPTIONAL: GeoIP

geoip {
    	add_tag => [ "GeoIP" ]
    	database => "/opt/logstash/databases/GeoLiteCity.dat"
    	source => "src_ip"
}

This part is completely optional and up to you if you want to set it up. What it does is it uses the source ip address in the syslog and it gets location data based of a flat file database. This flat file database will be used by logstash to get the location of the IP addresses hitting the firewall so you can get turn the source IP address of 124.35.68.19 into this…

"geoip": {
      "ip": "124.35.68.19",
      "country_code2": "US",
      "country_code3": "USA",
      "country_name": "United States",
      "continent_code": "NA",
      "region_name": "NJ",
      "city_name": "Edison",
      "postal_code": "08820",
      "latitude": 40.57669999999999,
      "longitude": -74.3674,
      "dma_code": 501,
      "area_code": 732,
      "timezone": "America/New_York",
      "real_region_name": "New Jersey",
      "location": [
        -74.3674,
        40.57669999999999
      ]
}

If this doesn’t interest you, remove the geoip section from the config file and skip to the next section. If this does interest you, follow the steps below.

Open a terminal window and type…

cd ~
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
sudo mkdir /opt/logstash/databases
gunzip GeoLiteCity.dat.gz
sudo mv ~/GeoLiteCity.dat /opt/logstash/databases/

and that is it for the first GeoIP tag for location data.

This is another optional GeoIP filter…

geoip {
    	add_tag => [ "Whois" ]
    	database => "/opt/logstash/databases/GeoIPASNum.dat"
    	source => "src_ip"
}

Just like the GeoIP location database, the GeoIP ASN database uses the source IP address, but instead of returning location information, it returns the ASN information. So essentially it turns 124.35.68.19 into…

"number": "AS6128",
"asn": "Cablevision Systems Corp."

So you can see ISP names in your elasticsearch searches. Also very cool and highly recommended for your setup. If this does not interest you for your setup, remove the geoip section from your configuration file and skip to the next section. If this does interest you, follow the steps below…

Open a terminal window and type…

cd ~
wget http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz
gunzip GeoIPASNum.dat.gz
sudo mv ~/GeoIPASNum.dat /opt/logstash/databases/

and that’s it with the GeoIP section.
Mutate

if [geoip][city_name]      == "" { mutate { remove_field => "[geoip][city_name]" } }

This part is fairly straight forward. Basically what is happening is that if the entry in the GeoIPASNum.dat file for a particular IP address has a country but does not have a city, it will remove that field before it is inserted into elasticsearch. That is basically it with the mutate section of my config file.

Date

date {
    match => ["timestamp",
    	"MMM dd HH:mm:ss",
    	"MMM  d HH:mm:ss",
    	"MMM dd yyyy HH:mm:ss",
    	"MMM  d yyyy HH:mm:ss"
    ]
}

This part is also pretty straight forward. It takes the timestamp value from the first grok filter and sets it as the timestamp when putting it into elasticsearch. So basically, instead of the timestamp being set as the time when logstash received the message, the timestamp is set as when the event was triggered on the firewall based off the firewalls clock and hopefully the firewall is configured to use NTP so that all your devices clocks are synchronized.

Output

output {
	stdout { 
		codec => json
	}

	elasticsearch {
		host => "10.0.0.133"
		flush_size => 1
	}
}

This section is how the final result is displayed/sent to various things. My output section has 2 parts: stdout and elasticsearch.

Stdout

stdout { 
	codec => json
}

Stdout is optional but I have it in there so I can see if everything is working properly through the terminal window like so
Logstash2

Obviously you do not need this part to run but I like to have it in there for debugging purposes.

Elasticsearch

elasticsearch {
    	host => "10.0.0.133"
    	flush_size => 1
}

Elasticsearch is where you specify the IP address of your elasticsearch server and that is pretty much it.


So now that you have your logstash.conf file set up, you can now run logstash for your ASA firewall. Save your config file and type in this into a terminal window…

/opt/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf

That’s it, you’re finished.

Note: If you have not turned on syslog messages on your ASA firewall, read my other blog post here.

Create a Custom Elasticsearch Template

Blog, ElasticSearch, Information Technology, Networking, Software

This post will show you how to create a custom index template for Elasticsearch.

Why would you need to create a custom template Lets say you are storing ASN data in your Elasticsearch index.
For example…

  • Google Inc. – 15 messages
  • Facebook Inc. – 25 messages
  • Linkedin Inc. – 33 messages

When you query the index for ASN fields, you are going to get 15 hits for Google, 25 hits for Facebook, 33 hits for Linkedin and 73 hits for Inc. This is because, by default, ElasticSearch does automatic index creation which analyzes each field and splits strings at spaces when indexing. So what if you want to just have the whole field item return as a result so that something like “Google Inc” will show up as 15 hits? Well, you have to create an ElasticSearch index template. Note: creating a template will not magically modify old indexes, that data has already been indexed. The template will only work for newly created indices in ElasticSearch after you add the template.

First thing you need to do is figure out the naming scheme for your indices. Knowing the name pattern for new indices will make it so that the template you are about to create only applies to that index and not other indices in ElasticSearch. I use logstash to ship everything to ElasticSearch and the default index naming pattern is logstash-YYYY-MM-DD so, iny my template, I will have logstash* with the asterisk acting as a wildcard. If you’re not using logstash and are unsure of the naming, go to /var/lib/elasticsearch and look in the indices folder to see the names of your current indices. Remember this for when we create the template.

Next you want to find the name inside of a current index so the template will only match the types you want it to match. Open your browser and type http://localhost:9200/_all/_mapping?pretty=1 in the URL bar and hit enter.

For example: I see

"logstash-2014.09.30": {
        "cisco-fw": {
            "properties": {

Because I have my logstash config file set to specify anything coming in on a certain port as type cisco-fw and because of that, the type in ElasticSearch is cisco-fw but yours might be default or something else. Remember this for when we create the template

Next thing is open up a notepad so we can start creating the template.

Here is the template for your template
yo dawg

curl -XPUT http://localhost:9200/_template/logstash_per_index -d '
{
    "template" : "logstash*",
    "mappings" : {
      "cisco-fw" : {
      }
    }
}'

Where logstash_per_index is the name you want to give the template, logstash* is the index naming scheme and cisco-fw is the type.

Now, under properties, you are going to set the field type and options based on field name. For my example, I am doing ASN values, so, under properties, I would write

"asn":{"type":"string", "index":"not_analyzed"}

The ASN type is a string (obviously) and index is set to not_analyzed. Not_analyzed means that it is still searchable with a query, but it does not go through any analysis process and is not broken down into tokens. This will allow us to see Google Inc as one result when querying ElasticSearch. Do this for all the fields in your index. For example, here is my completed template…

#!/bin/sh
curl -XPUT http://localhost:9200/_template/logstash_per_index -d '
{
    "template" : "logstash*",
    "mappings" : {
      "cisco-fw" : {
         "properties": {
            "@timestamp":{"type":"date","format":"dateOptionalTime"},
            "@version":{"type":"string", "index" : "not_analyzed"},
            "action":{"type":"string"},
            "bytes":{"type":"long"},
            "cisco_message":{"type":"string"},
            "ciscotag":{"type":"string", "index" : "not_analyzed"},
            "connection_count":{"type":"long"},
            "connection_count_max":{"type":"long"},
            "connection_id":{"type":"string"},
            "direction":{"type":"string"},
            "dst_interface":{"type":"string"},
            "dst_ip":{"type":"string"},
            "dst_mapped_ip":{"type":"ip"},
            "dst_mapped_port":{"type":"long"},
            "dst_port":{"type":"long"},
            "duration":{"type":"string"},
            "err_dst_interface":{"type":"string"},
            "err_dst_ip":{"type":"ip"},
            "err_icmp_code":{"type":"string"},
            "err_icmp_type":{"type":"string"},
            "err_protocol":{"type":"string"},
            "err_src_interface":{"type":"string"},
            "err_src_ip":{"type":"ip"},
            "geoip":{
               "properties":{
                  "area_code":{"type":"long"},
                  "asn":{"type":"string", "index":"not_analyzed"},
                  "city_name":{"type":"string", "index":"not_analyzed"},
                  "continent_code":{"type":"string"},
                  "country_code2":{"type":"string"},
                  "country_code3":{"type":"string"},
                  "country_name":{"type":"string", "index":"not_analyzed"},
                  "dma_code":{"type":"long"},
                  "ip":{"type":"ip"},
                  "latitude":{"type":"double"},
                  "location":{"type":"geo_point"},
                  "longitude":{"type":"double"},
                  "number":{"type":"string"},
                  "postal_code":{"type":"string"},
                  "real_region_name":{"type":"string", "index":"not_analyzed"},
                  "region_name":{"type":"string", "index":"not_analyzed"},
                  "timezone":{"type":"string"}
               }
            },
            "group":{"type":"string"},
            "hashcode1": {"type": "string"},
            "hashcode2": {"type": "string"},
            "host":{"type":"string"},
            "icmp_code":{"type":"string"},
            "icmp_code_xlated":{"type":"string"},
            "icmp_seq_num":{"type":"string"},
            "icmp_type":{"type":"string"},
            "interface":{"type":"string"},
            "is_local_natted":{"type":"string"},
            "is_remote_natted":{"type":"string"},
            "message":{"type":"string"},
            "orig_dst_ip":{"type":"ip"},
            "orig_dst_port":{"type":"long"},
            "orig_protocol":{"type":"string"},
            "orig_src_ip":{"type":"ip"},
            "orig_src_port":{"type":"long"},
            "policy_id":{"type":"string"},
            "protocol":{"type":"string"},
            "reason":{"type":"string"},
            "seq_num":{"type":"long"},
            "spi":{"type":"string"},
            "src_interface":{"type":"string"},
            "src_ip":{"type":"string"},
            "src_mapped_ip":{"type":"ip"},
            "src_mapped_port":{"type":"long"},
            "src_port":{"type":"long"},
            "src_xlated_interface":{"type":"string"},
            "src_xlated_ip":{"type":"ip"},
            "syslog_facility":{"type":"string"},
            "syslog_facility_code":{"type":"long"},
            "syslog_pri":{"type":"string"},
            "syslog_severity":{"type":"string"},
            "syslog_severity_code":{"type":"long"},
            "tags":{"type":"string"},
            "tcp_flags":{"type":"string"},
            "timestamp":{"type":"string"},
            "tunnel_type":{"type":"string"},
            "type":{"type":"string"},
            "user":{"type":"string"},
            "xlate_type":{"type":"string"}
      }
    }
  }
}'

Once you change all the types you need, it is now time to add the template to ElasticSearch. You can either save you file in notepad, make it a script and run that through a terminal window or you can copy the text from notepad and enter into a terminal window. You should see a {“ok”:true,”acknowledged”:true} response if everything was formatted properly.
ACK

And that is it. You will only see the fruits of your labor when a new index is created and matches the parameters set in your template file (i.e logstash*). Because of my naming scheme with Logstash, new indices are only created at the start of the day (logstash-YYYY-MM-DD) so I had to wait until the next day to see if my template was working properly. If you are impatient, cannot wait to see if it worked or not and don’t care about losing data in your current index then you can delete it from ElasticSearch by issuing the following CURL command in a terminal window

curl -XDELETE localhost:9200/index_name

where index_name is the name of your index (ex. logstash-2014-12-11)

Helpful tip: if you start seeing data not show up in the index, it is very possible that you may have messed up one of the field types in you template file. I am writing this because I ran into this issue and could not figure out why there was no data in my index. To solve this, go to /var/log/elasticsearch and see the log file for the date where data was not properly going into the ElasticSearch index (it should be a lot bigger in file size compared to the other log files). In the log file, I was seeing this error multiple times

org.elasticsearch.index.mapper.MapperParsingException: failed to parse [protocol]

What happened was that, thinking protocol meant port number protocol (ex. 25, 80, 443 etc), I set the protocol field as type LONG. To my surprise, protocol was either TCP or UDP so it should have been set as type string. ElasticSearch was expecting a long to index based off my template but instead was getting strings so the application freaked out. Instead of modifying the template file on the server, I decided to delete it from ElasticSearch, make my changes to the protocol field and then re-upload the template back to ElasticSearch. To do that, I opened a terminal and typed

curl -XDELETE http://localhost:9200/_template/logstash_per_index

where logstash_per_index is the name of the template. That command will delete the template off of your server. Make your changes to your template in notepad and then add the template back to ElasticSearch.

Since the template only applies to newly created indices and your index did not have any data inside of it because of the incorrect template, you can go ahead and just delete that index and create a new one that will work with the newly modified template.

curl -XDELETE localhost:9200/index_name

where index_name is the name of your index (ex. logstash-2014-12-11).

And that is it. Leave a comment down below if you found this information helpful or if you have any questions for me. Good luck!

Install Kibana 4 Beta on Ubuntu

Blog, ElasticSearch, Information Technology, Kibana, Networking, Software

NOTE: This post is out of date as Kibana 4 is no longer in beta. Check out this blog post on how to install a stable release of Kibana 4.


Good news, everyone! The Kibana 4 beta has been released.

If you like to live life on the bleeding edge like me and want to mess around with the new features then follow this guide below. This guide will show you how to install the beta on Ubuntu so you can play around with it before the final version is released to the masses. This guide assumes you have a fresh install of Ubuntu to run this on.

Download and Install Elasticsearch 

First thing you will need to do is download and install the latest version of Elasticsearch. You need to have Elasticsearch 1.40 for the beta to work. Running anything earlier than 1.4.0 will result in this error in Kibana

Error

You need to have Java on you machine for this to work. If you are not sure on whether or not you have Java on your machine, open up a terminal window (ctrl + alt + t) and type java -version  

If you get java version “1.7.0_65” or similar then skip this next step, otherwise type this into your terminal window

sudo apt-get update
sudo apt-get install openjdk-7-jre-headless -y

Once java is installed, open a terminal session (ctrl + alt + t) and enter these commands.

 cd ~
 sudo apt-get update
 wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.4.0.deb
 sudo dpkg -i elasticsearch-1.4.0.deb

This will install Elasticsearch 1.4.0 stable on your machine.

Next you need to configure Elasticsearch on your machine.

sudo sed -i -e 's|# cluster.name: elasticsearch|cluster.name: kibana|' /etc/elasticsearch/elasticsearch.yml

Now you need to tell your machine to run elasticsearch on boot.

sudo update-rc.d elasticsearch defaults 95 10
sudo service elasticsearch restart

Download and Install Kibana 4 Beta 

Unlike previous versions of Kibana, this version does not require that you have Apache on your machine so this makes this install much easier for fresh installs.

First download the Kibana 4 beta from here

  1. Extract the archive
  2. In the root dir, open config/kibana.yml in an editor
  3. Set the elasticsearch parameter to the fully qualified hostname of your Elasticsearch server. For us its going to be on the same machine so you can keep the file as is.
  4. Run ./bin/kibana from a terminal window (ctrl + alt + t) & cd to you Kibana root directory
  5. Open your browser to Kibana. (ex: http://127.0.0.1:5601)

And that is it. Start inserting data into your Elasticsearch index and start playing around with the beta.

Set up Syslogging on Cisco ASA Firewall

Blog, Information Technology, Networking

The purpose of this post is to guide you on how to set up syslogging on a Cisco ASA firewall so you can ship your logs to a centralized log server like the ELK stack (Elasticsearch, Logstash and Kibana). This configuration guide is done using the ASDM (GUI).

Here is an example of what a Cisco syslog looks like…

%ASA-4-106023: Deny tcp src outside:109.230.217.95/24069 dst inside:164.32.112.125/25 by access-group "PERMIT_IN" [0x0, 0x0]"

The Cisco message above shows that the IP of 109.230.217.95 was denied access to a blocked SMTP port based on the access group “PERMIT_IN” for the public IP address of 164.32.112.125.

Here are the settings to tell our Firewall to send syslogs to our centralized log service. Open up your ASDM and log into your firewall.

Click the configuration button on the top
Configuration

Click “Device Management”
Device Management

Expand “Logging” and click “Syslog Servers”
Syslog Servers

On the right side click “Add”, choose the inside interface, type in the IP of the machine you want to ship logs to, select UDP, type in your port and click OK.
Add Server

Click “Logging Setup”, select “Enable logging” and click apply.
Enable

Click “Logging Filters”, double click “Syslog Servers”, check “Filter on severity”, select the type of logs you want to receive and click OK.
Logging Filters

Here is an explanation of the different logging levels with Cisco products.
Cisco Severity

Now just save your running config to the startup config and you will now start seeing your logs on your centralized log server.

Using Logstash, Elasticsearch and Kibana for Cisco ASA Syslog Message Analysis.

Blog, Information Technology, Networking, Servers, Software

I originally wrote this as a comment on the Networking subreddit but I thought I would post this here in case anyone was curious on using open source tools for centralized logging.

Originally we were using Graylog2 for message analysis but I recently found out about Kibana and it is substantially better. What this guide will do is help to transform raw syslog messages from your Cisco ASA like this…

<182>Apr 21 2014 11:51:03: %ASA-6-302014: Teardown TCP connection 9443865 for outside:123.456.789.10/9058 to inside:10.20.30.40/443 duration 0:00:20 bytes 3925 TCP FINs\n

and filter them into this…

{
 "_index": "logstash-2014.04.21",
 "_type": "cisco-fw",
 "_id": "I7yBKbUITHWxuMrAhaoNWQ",
 "_source": {
 "message": "<182>Apr 21 2014 11:51:03: %ASA-6-302014: Teardown TCP connection 9443865 for outside:123.456.789.10/9058 to inside:10.20.30.40/443 duration 0:00:20 bytes 3925 TCP FINs\n",
 "@version": "1",
 "@timestamp": "2014-04-21T15:51:03.000Z",
 "type": "cisco-fw",
 "host": "10.20.30.1",
 "syslog_pri": "182",
 "timestamp": "Apr 21 2014 11:51:03",
 "ciscotag": "ASA-6-302014",
 "cisco_message": "Teardown TCP connection 9443865 for outside:123.456.789.10/9058 to inside:10.20.30.40/443 duration 0:00:20 bytes 3925 TCP FINs",
 "syslog_severity_code": 6,
 "syslog_facility_code": 22,
 "syslog_facility": "local6",
 "syslog_severity": "informational",
 "action": "Teardown",
 "protocol": "TCP",
 "connection_id": "9443865",
 "src_interface": "outside",
 "src_ip": "123.456.789.10",
 "src_port": "9058",
 "dst_interface": "inside",
 "dst_ip": "10.20.30.40",
 "dst_port": "443",
 "duration": "0:00:20",
 "bytes": "3925",
 "reason": "TCP FINs",
 "tags": [
 "GeoIP"
 ]
 },
 "sort": [
 "apr",
 1398095463000
 ]
}

Using this newly created data, you can do things like see which IP is hitting your web servers the most, see which country is giving you the most traffic, see a graph of when your site is being accessed the most through out the day etc.

Here’s my setup

All three of these programs can be run on the same machine and also can be run on either OS (logstash & elasticsearch are java based and kibana is just html/js/css so all you need is apache) but my setup (and this guide) is what is listed above.


Download GeoIP database

First thing I did was download a IP – Lat/long database here. This flat file database will be used by logstash to get the location of the IP addresses hitting the firewall so you can map hits like this… bettermap

Unzip the file and remember where you put the GeoLiteCity.dat


Download Logstash 

Next I downloaded the latest version of Logstash. Unzip and throw somewhere on your computer.


Download and Install Elasticsearch 

Switch to you Ubuntu 12.04 LTS machine and open a terminal session (ctrl + alt + t) and enter these commands.

 cd ~
 sudo apt-get update
 sudo apt-get install openjdk-7-jre-headless -y
 wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.3.1.deb
 sudo dpkg -i elasticsearch-1.3.1.deb

This will install java and Elasticsearch 1.3.1 on your machine.

Next you need to configure up elasticsearch on your machine.

sudo sed -i -e 's|# cluster.name: elasticsearch|cluster.name: kibana|' /etc/elasticsearch/elasticsearch.yml

Now you need to tell your machine to run elasticsearch on boot.

sudo update-rc.d elasticsearch defaults 95 10
sudo service elasticsearch restart

Installing Kibana

First you have to have Apache on your machine. There are plenty of guides on getting that set up if you are not familiar. Here is a good post about setting up Apache on Ubuntu. http://aarvik.dk/initial-web-server-setup-with-apache-mod_rewrite-and-virtual-host/

Next, download Kibana here.

  1. Extract your archive
  2. Open config.js in an editor
  3. Set the elasticsearch parameter to the fully qualified hostname of your Elasticsearch server
  4. Copy the contents of the extracted directory to your webserver
  5. Open your browser to kibana. (ex: http://127.0.0.1/kibana3)

Logstash Configuration
(Switch back to your Windows machine)

Next I created the logstash config file (logstash needs to know how to filter the syslog messages for parsing). Here is my config file…

input {
 udp { 
 port => 5544 ## change me to whatever you set your ASA syslog port to
 type => "cisco-fw"
 }
}

filter {
 ####### Cisco FW ####
 if [type] == "cisco-fw" {
 grok {
 match => ["message", "%{CISCO_TAGGED_SYSLOG} %{GREEDYDATA:cisco_message}"]
 }
 # Parse the syslog severity and facility
 syslog_pri { }

 # Extract fields from the each of the detailed message types
 # The patterns provided below are included in core of LogStash 1.2.0.
 grok {
 match => [
 "cisco_message", "%{CISCOFW106001}",
 "cisco_message", "%{CISCOFW106006_106007_106010}",
 "cisco_message", "%{CISCOFW106014}",
 "cisco_message", "%{CISCOFW106015}",
 "cisco_message", "%{CISCOFW106021}",
 "cisco_message", "%{CISCOFW106023}",
 "cisco_message", "%{CISCOFW106100}",
 "cisco_message", "%{CISCOFW110002}",
 "cisco_message", "%{CISCOFW302010}",
 "cisco_message", "%{CISCOFW302013_302014_302015_302016}",
 "cisco_message", "%{CISCOFW302020_302021}",
 "cisco_message", "%{CISCOFW305011}",
 "cisco_message", "%{CISCOFW313001_313004_313008}",
 "cisco_message", "%{CISCOFW313005}",
 "cisco_message", "%{CISCOFW402117}",
 "cisco_message", "%{CISCOFW402119}",
 "cisco_message", "%{CISCOFW419001}",
 "cisco_message", "%{CISCOFW419002}",
 "cisco_message", "%{CISCOFW500004}",
 "cisco_message", "%{CISCOFW602303_602304}",
 "cisco_message", "%{CISCOFW710001_710002_710003_710005_710006}",
 "cisco_message", "%{CISCOFW713172}",
 "cisco_message", "%{CISCOFW733100}"
 ]
 }

 geoip {
 add_tag => [ "GeoIP" ]
 database => "C:\GeoLiteCity.dat" ### Change me to location of GeoLiteCity.dat file
 source => "src_ip"
 }

 if [geoip][city_name] == "" { mutate { remove_field => "[geoip][city_name]" } }
 if [geoip][continent_code] == "" { mutate { remove_field => "[geoip][continent_code]" } }
 if [geoip][country_code2] == "" { mutate { remove_field => "[geoip][country_code2]" } }
 if [geoip][country_code3] == "" { mutate { remove_field => "[geoip][country_code3]" } }
 if [geoip][country_name] == "" { mutate { remove_field => "[geoip][country_name]" } }
 if [geoip][latitude] == "" { mutate { remove_field => "[geoip][latitude]" } }
 if [geoip][longitude] == "" { mutate { remove_field => "[geoip][longitude]" } }
 if [geoip][postal_code] == "" { mutate { remove_field => "[geoip][postal_code]" } }
 if [geoip][region_name] == "" { mutate { remove_field => "[geoip][region_name]" } }
 if [geoip][time_zone] == "" { mutate { remove_field => "[geoip][time_zone]" } }


 # Parse the date
 date {
 match => ["timestamp",
 "MMM dd HH:mm:ss",
 "MMM d HH:mm:ss",
 "MMM dd yyyy HH:mm:ss",
 "MMM d yyyy HH:mm:ss"
 ]
 }
 }
 ###### End of Cisco FW #######
}

output {
 stdout { 
 codec => json
 }

 elasticsearch_http {
 host => "10.0.0.123" # change me to the IP of your elasticsearch server
 }
}

Change the geoip location and elasticsearch IP address in your config file

geoip {
        add_tag => [ "GeoIP" ]
         database => "location of geolitecity.dat file. ex c:\geolitecity.dat"

and

elasticsearch_http { 
        host => "IP of elasticsearch server"

Save the config file in your Logstash folder as config.conf.

Next run Logstash with your newly created config file.

bin\logstash.bat agent -f config.conf

Logstash

You might see warnings but you should not be seeing any errors


Cisco ASA configuration vis ASDM

Follow my guide here to turn on syslogging on your ASA firewall. Set the IP to the IP address of the server running logstash and set the port to 5544 like in the logstash config file. I set my logging level to informational but you can set it to whatever level you want to log. Here is an explanation of the different logging levels with Cisco products.

Cisco Severity

As soon as you apply the config in the ASA, you should immediately start seeing results in your logstash window because one of the outputs was set to stdout.

Logstash2

The messages are also going to your elasticsearch server. Open your browser to kibana and read the getting started guides. Once you configure your dashboard, you should start seeing results like this.

Kibana-wp1

Kibana-wp2

 

Here is my dashboard code in case you are curious. It shows a graph of message types, a map of hits, a graph of source and destination IPs, a histogram of messages per second and a list of the messages at the bottom.

To get this to work in Kibana…
1. Copy all of the code below
2. Save it as a text file
3. Go to Kibana
4. Click the Load icon at the top right of the page
5. Click Advanced
6. Click choose file and pick the file you just saved.

{
  "title": "Logstash ASA",
  "services": {
    "query": {
      "list": {
        "0": {
          "query": "cisco-fw",
          "alias": "",
          "color": "#7EB26D",
          "id": 0,
          "pin": false,
          "type": "lucene",
          "enable": true
        },
        "1": {
          "id": 1,
          "color": "#7EB26D",
          "alias": "",
          "pin": false,
          "type": "lucene",
          "enable": true,
          "query": "severity informational"
        },
        "2": {
          "id": 2,
          "color": "#6ED0E0",
          "alias": "",
          "pin": false,
          "type": "lucene",
          "enable": true,
          "query": "severity warning"
        },
        "3": {
          "id": 3,
          "color": "#EF843C",
          "alias": "",
          "pin": false,
          "type": "lucene",
          "enable": true,
          "query": "severity error"
        },
        "4": {
          "id": 4,
          "color": "#E24D42",
          "alias": "",
          "pin": false,
          "type": "lucene",
          "enable": true,
          "query": "severity critical"
        },
        "5": {
          "id": 5,
          "color": "#1F78C1",
          "alias": "",
          "pin": false,
          "type": "lucene",
          "enable": true,
          "query": "severity alert"
        },
        "6": {
          "id": 6,
          "color": "#BA43A9",
          "alias": "",
          "pin": false,
          "type": "lucene",
          "enable": true,
          "query": "_grok"
        }
      },
      "ids": [
        0,
        1,
        2,
        3,
        4,
        5,
        6
      ]
    },
    "filter": {
      "list": {
        "0": {
          "type": "time",
          "field": "@timestamp",
          "from": "now-6h",
          "to": "now",
          "mandate": "must",
          "active": true,
          "alias": "",
          "id": 0
        }
      },
      "ids": [
        0
      ]
    }
  },
  "rows": [
    {
      "title": "Graph",
      "height": "350px",
      "editable": true,
      "collapse": false,
      "collapsable": true,
      "panels": [
        {
          "span": 12,
          "editable": true,
          "group": [
            "default"
          ],
          "type": "histogram",
          "mode": "count",
          "time_field": "@timestamp",
          "value_field": null,
          "auto_int": true,
          "resolution": 100,
          "interval": "5m",
          "fill": 3,
          "linewidth": 3,
          "timezone": "browser",
          "spyable": true,
          "zoomlinks": true,
          "bars": false,
          "stack": false,
          "points": false,
          "lines": true,
          "legend": true,
          "x-axis": true,
          "y-axis": true,
          "percentage": false,
          "interactive": true,
          "queries": {
            "mode": "selected",
            "ids": [
              1,
              2,
              3,
              4,
              5
            ]
          },
          "title": "Events over time",
          "intervals": [
            "auto",
            "1s",
            "1m",
            "5m",
            "10m",
            "30m",
            "1h",
            "3h",
            "12h",
            "1d",
            "1w",
            "1M",
            "1y"
          ],
          "options": true,
          "tooltip": {
            "value_type": "cumulative",
            "query_as_alias": true
          },
          "scale": 1,
          "y_format": "none",
          "grid": {
            "max": null,
            "min": 0
          },
          "annotate": {
            "enable": false,
            "query": "*",
            "size": 20,
            "field": "_type",
            "sort": [
              "_score",
              "desc"
            ]
          },
          "pointradius": 5,
          "show_query": true,
          "legend_counts": true,
          "zerofill": true,
          "derivative": false,
          "scaleSeconds": true
        },
        {
          "error": false,
          "span": 6,
          "editable": true,
          "type": "bettermap",
          "loadingEditor": false,
          "field": "geoip.location",
          "size": 2000,
          "spyable": true,
          "tooltip": "_id",
          "queries": {
            "mode": "all",
            "ids": [
              0,
              1,
              2,
              3,
              4,
              5,
              6
            ]
          },
          "title": "Firewall Hits"
        },
        {
          "error": false,
          "span": 4,
          "editable": true,
          "type": "terms",
          "loadingEditor": false,
          "field": "syslog_severity",
          "exclude": [],
          "missing": true,
          "other": true,
          "size": 7,
          "order": "count",
          "style": {
            "font-size": "10pt"
          },
          "donut": false,
          "tilt": false,
          "labels": true,
          "arrangement": "horizontal",
          "chart": "bar",
          "counter_pos": "above",
          "spyable": true,
          "queries": {
            "mode": "all",
            "ids": [
              0,
              1,
              2,
              3,
              4,
              5,
              6
            ]
          },
          "tmode": "terms",
          "tstat": "total",
          "valuefield": "",
          "title": "Message Types"
        },
        {
          "error": false,
          "span": 4,
          "editable": true,
          "type": "terms",
          "loadingEditor": false,
          "field": "src_ip",
          "exclude": [],
          "missing": false,
          "other": false,
          "size": 10,
          "order": "count",
          "style": {
            "font-size": "10pt"
          },
          "donut": false,
          "tilt": false,
          "labels": true,
          "arrangement": "horizontal",
          "chart": "bar",
          "counter_pos": "above",
          "spyable": false,
          "queries": {
            "mode": "all",
            "ids": [
              0,
              1,
              2,
              3,
              4,
              5,
              6
            ]
          },
          "tmode": "terms",
          "tstat": "total",
          "valuefield": "",
          "title": "Source IP"
        },
        {
          "error": false,
          "span": 4,
          "editable": true,
          "type": "terms",
          "loadingEditor": false,
          "field": "dst_ip",
          "exclude": [],
          "missing": true,
          "other": true,
          "size": 10,
          "order": "count",
          "style": {
            "font-size": "10pt"
          },
          "donut": false,
          "tilt": false,
          "labels": true,
          "arrangement": "horizontal",
          "chart": "bar",
          "counter_pos": "above",
          "spyable": true,
          "queries": {
            "mode": "all",
            "ids": [
              0,
              1,
              2,
              3,
              4,
              5,
              6
            ]
          },
          "tmode": "terms",
          "tstat": "total",
          "valuefield": "",
          "title": "DESTINATION IP"
        }
      ],
      "notice": false
    },
    {
      "title": "Events",
      "height": "350px",
      "editable": true,
      "collapse": false,
      "collapsable": true,
      "panels": [
        {
          "title": "All events",
          "error": false,
          "span": 12,
          "editable": true,
          "group": [
            "default"
          ],
          "type": "table",
          "size": 100,
          "pages": 5,
          "offset": 0,
          "sort": [
            "timestamp",
            "desc"
          ],
          "style": {
            "font-size": "9pt"
          },
          "overflow": "min-height",
          "fields": [
            "duration",
            "cisco_message",
            "geoip.location",
            "direction",
            "src_ip",
            "dst_ip",
            "timestamp",
            "dst_port",
            "syslog_severity",
            "src_xlated_ip",
            "src_mapped_ip"
          ],
          "localTime": true,
          "timeField": "@timestamp",
          "highlight": [],
          "sortable": true,
          "header": true,
          "paging": true,
          "spyable": true,
          "queries": {
            "mode": "all",
            "ids": [
              0,
              1,
              2,
              3,
              4,
              5,
              6
            ]
          },
          "field_list": true,
          "status": "Stable",
          "trimFactor": 300,
          "normTimes": true,
          "all_fields": false
        }
      ],
      "notice": false
    }
  ],
  "editable": true,
  "failover": false,
  "index": {
    "interval": "day",
    "pattern": "[logstash-]YYYY.MM.DD",
    "default": "NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED",
    "warm_fields": true
  },
  "style": "dark",
  "panel_hints": true,
  "pulldowns": [
    {
      "type": "query",
      "collapse": false,
      "notice": false,
      "query": "*",
      "pinned": true,
      "history": [
        "_grok",
        "severity alert",
        "severity critical",
        "severity error",
        "severity warning",
        "severity informational",
        "cisco-fw",
        "severity emergencie",
        "severity emergencies",
        "severity errors"
      ],
      "remember": 10,
      "enable": true
    },
    {
      "type": "filtering",
      "collapse": false,
      "notice": true,
      "enable": true
    }
  ],
  "nav": [
    {
      "type": "timepicker",
      "collapse": false,
      "notice": false,
      "status": "Stable",
      "time_options": [
        "5m",
        "15m",
        "1h",
        "6h",
        "12h",
        "24h",
        "2d",
        "7d",
        "30d"
      ],
      "refresh_intervals": [
        "5s",
        "10s",
        "30s",
        "1m",
        "5m",
        "15m",
        "30m",
        "1h",
        "2h",
        "1d"
      ],
      "timefield": "@timestamp",
      "now": true,
      "filter_id": 0,
      "enable": true
    }
  ],
  "loader": {
    "save_gist": false,
    "save_elasticsearch": true,
    "save_local": true,
    "save_default": true,
    "save_temp": true,
    "save_temp_ttl_enable": true,
    "save_temp_ttl": "30d",
    "load_gist": true,
    "load_elasticsearch": true,
    "load_elasticsearch_size": 20,
    "load_local": true,
    "hide": false
  },
  "refresh": false
}