Merge pull request #188 from wazuh/wazuh-391-elastic-711-workaround

Wazuh Ansible adaptation to Elastic 7
This commit is contained in:
Manuel J. Bernal 2019-06-13 18:02:02 +02:00 committed by GitHub
commit 0e7801945e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 3044 additions and 697 deletions

View File

@ -1,25 +0,0 @@
version: 2
jobs:
test:
machine:
python:
version: 2.7
services:
- docker
working_directory: ~/wazuh-ansible
steps:
- checkout
- run:
name: Install pipenv
command: pip install pipenv
- run:
name: Install molecule
command: pipenv install --dev --system
- run:
name: Run molecule
command: pipenv run test
workflows:
version: 2
test_molecule:
jobs:
- test

1
.gitignore vendored
View File

@ -4,6 +4,5 @@ wazuh-elastic_stack-distributed.yml
wazuh-elastic_stack-single.yml
wazuh-elastic.yml
wazuh-kibana.yml
wazuh-logstash.yml
wazuh-manager.yml
*.pyc

View File

@ -1,8 +0,0 @@
language: python
services: docker
before_script:
- pip install pipenv
- pipenv install --dev --system
script:
- pipenv run test
- pipenv run agent

View File

@ -126,7 +126,6 @@ Ansible starting point.
Roles:
- Elastic Stack:
- ansible-elasticsearch: This role is prepared to install elasticsearch on the host that runs it.
- ansible-logstash: This role involves the installation of logstash on the host that runs it.
- ansible-kibana: Using this role we will install Kibana on the host that runs it.
- Wazuh:
- ansible-filebeat: This role is prepared to install filebeat on the host that runs it.

View File

@ -18,7 +18,6 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
│ ├── roles
│ │ ├── elastic-stack
│ │ │ ├── ansible-elasticsearch
│ │ │ ├── ansible-logstash
│ │ │ ├── ansible-kibana
│ │
│ │ ├── wazuh
@ -35,7 +34,6 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
│ │ ├── wazuh-elastic_stack-distributed.yml
│ │ ├── wazuh-elastic_stack-single.yml
│ │ ├── wazuh-kibana.yml
│ │ ├── wazuh-logstash.yml
│ │ ├── wazuh-manager.yml
│ ├── README.md

View File

@ -4,8 +4,7 @@
roles:
- role: wazuh/ansible-wazuh-manager
# - {role: wazuh/ansible-filebeat} #, filebeat_output_logstash_hosts: 'your elastic stack server IP'
# - {role: wazuh/ansible-filebeat} #, filebeat_output_elasticsearch_hosts: 'your elastic stack server IP'
# Elasticsearch requires too much memory to test multiple containers concurrently - To Fix
# - {role: elastic-stack/ansible-elasticsearch, elasticsearch_network_host: 'localhost'}
# - {role: elastic-stack/ansible-logstash, logstash_input_beats: true, elasticsearch_network_host: 'localhost'}
# - {role: elastic-stack/ansible-kibana, elasticsearch_network_host: 'localhost'}

View File

@ -6,5 +6,4 @@
- hosts: <your elastic stack server host>
roles:
- {role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: 'localhost'}
- {role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-logstash, logstash_input_beats: true, elasticsearch_network_host: 'localhost'}
- {role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-kibana, elasticsearch_network_host: 'localhost'}

View File

@ -1,7 +1,6 @@
---
- hosts: <your single server host>
- hosts: <your server host>
roles:
- {role: /etc/ansible/roles/wazuh-ansible/roles/wazuh/ansible-wazuh-manager}
- {role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: 'localhost'}
- { role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-logstash, elasticsearch_network_host: 'localhost' }
- { role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-kibana, elasticsearch_network_host: 'localhost' }
- {role: ../roles/wazuh/ansible-wazuh-manager}
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '0.0.0.0', single_node: true}
- { role: ../roles/elastic-stack/ansible-kibana, elasticsearch_network_host: 'localhost' }

View File

@ -1,4 +0,0 @@
---
- hosts: <your logstash host>
roles:
- {role: /etc/ansible/roles/wazuh-ansible/roles/elastic-stack/ansible-logstash, elasticsearch_network_host: ["localhost"]}

View File

@ -2,4 +2,4 @@
- hosts: <your wazuh server host>
roles:
- role: /etc/ansible/roles/wazuh-ansible/roles/wazuh/ansible-wazuh-manager
- {role: /etc/ansible/roles/wazuh-ansible/roles/wazuh/ansible-filebeat, filebeat_output_logstash_hosts: 'your logstash IP'}
- {role: /etc/ansible/roles/wazuh-ansible/roles/wazuh/ansible-filebeat, filebeat_output_elasticsearch_hosts: 'your elasticsearch IP'}

View File

@ -30,10 +30,27 @@ Defaults variables are listed below, along with its values (see `defaults/main.y
Example Playbook
----------------
- Single-node
```
- hosts: elasticsearch
roles:
- { role: ansible-role-elasticsearch, elasticsearch_network_host: '192.168.33.182' }
- { role: ansible-role-elasticsearch, elasticsearch_network_host: '192.168.33.182', single_host: true }
```
- Three nodes Elasticsearch cluster
```
---
- hosts: 172.16.0.161
roles:
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '172.16.0.161', elasticsearch_bootstrap_node: true, elasticsearch_cluster_nodes: ['172.16.0.162','172.16.0.163','172.16.0.161']}
- hosts: 172.16.0.162
roles:
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '172.16.0.162', elasticsearch_master_candidate: true, elasticsearch_cluster_nodes: ['172.16.0.162','172.16.0.163','172.16.0.161']}
- hosts: 172.16.0.163
roles:
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '172.16.0.163', elasticsearch_master_candidate: true, elasticsearch_cluster_nodes: ['172.16.0.162','172.16.0.163','172.16.0.161']}
```
License and copyright

View File

@ -4,7 +4,9 @@ elasticsearch_node_name: node-1
elasticsearch_http_port: 9200
elasticsearch_network_host: 127.0.0.1
elasticsearch_jvm_xms: null
elastic_stack_version: 6.8.0
elasticsearch_shards: 5
elasticsearch_replicas: 1
elasticsearch_install_java: true
elastic_stack_version: 7.1.1
single_node: false
elasticsearch_bootstrap_node: false
elasticsearch_master_candidate: false
elasticsearch_cluster_nodes:
- 127.0.0.1

View File

@ -4,12 +4,6 @@
name: ['apt-transport-https', 'ca-certificates']
state: present
- when: elasticsearch_install_java
block:
- name: Debian/Ubuntu | Install OpenJDK 1.8
apt: name=openjdk-8-jre state=present cache_valid_time=3600
tags: install
- name: Debian/Ubuntu | Add Elasticsearch GPG key.
apt_key:
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
@ -17,7 +11,7 @@
- name: Debian/Ubuntu | Install Elastic repo
apt_repository:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
repo: 'deb https://artifacts.elastic.co/packages/7.x/apt stable main'
state: present
filename: 'elastic_repo'
update_cache: true

View File

@ -1,5 +1,5 @@
---
- name: Debian/Ubuntu | Removing Elasticsearch repository
apt_repository:
repo: deb https://artifacts.elastic.co/packages/5.x/apt stable main
repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
state: absent

View File

@ -1,20 +1,13 @@
---
- when: elasticsearch_install_java
block:
- name: RedHat/CentOS/Fedora | Install OpenJDK 1.8
yum: name=java-1.8.0-openjdk state=present
register: oracle_java_task_rpm_installed
tags: install
- name: RedHat/CentOS/Fedora | Install Elastic repo
yum_repository:
name: elastic_repo
description: Elastic repository for 6.x packages
baseurl: https://artifacts.elastic.co/packages/6.x/yum
description: Elastic repository for 7.x packages
baseurl: https://artifacts.elastic.co/packages/7.x/yum
gpgkey: https://artifacts.elastic.co/GPG-KEY-elasticsearch
gpgcheck: true
- name: RedHat/CentOS/Fedora | Install Elasticsarch
package: name=elasticsearch-{{ elastic_stack_version }} state=present
when: not elasticsearch_install_java or oracle_java_task_rpm_installed is defined
tags: install

View File

@ -95,6 +95,8 @@
url: "http://{{elasticsearch_network_host}}:{{elasticsearch_http_port}}/_template/wazuh"
method: GET
status_code: 200, 404
when: not elasticsearch_bootstrap_node or single_node
poll: 30
register: wazuh_alerts_template_exits
tags: init
@ -104,8 +106,10 @@
method: PUT
status_code: 200
body_format: json
body: "{{ lookup('template','wazuh-elastic6-template-alerts.json.j2') }}"
when: wazuh_alerts_template_exits.status != 200
body: "{{ lookup('template','wazuh-elastic7-template-alerts.json.j2') }}"
when:
- wazuh_alerts_template_exits.status is defined
- wazuh_alerts_template_exits.status != 200
tags: init
- import_tasks: "RMRedHat.yml"

View File

@ -1,89 +1,24 @@
# {{ ansible_managed }}
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
cluster.name: {{ elasticsearch_cluster_name }}
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: {{ elasticsearch_node_name }}
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: {{ elasticsearch_network_host }}
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.zen.ping.unicast.hosts: ["host1", "host2"]
#
# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):
#
#discovery.zen.minimum_master_nodes: 3
#
# For more information, consult the zen discovery module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
{% if single_node %}
discovery.type: single-node
{% elif elasticsearch_bootstrap_node %}
node.master: true
cluster.initial_master_nodes:
{% for item in elasticsearch_cluster_nodes %}
- {{ item }}
{% endfor %}
{% elif elasticsearch_master_candidate %}
node.master: true
discovery.seed_hosts:
{% for item in elasticsearch_cluster_nodes %}
- {{ item }}
{% endfor %}
{% endif %}

View File

@ -3,5 +3,5 @@ elasticsearch_http_port: "9200"
elasticsearch_network_host: "127.0.0.1"
kibana_server_host: "0.0.0.0"
kibana_server_port: "5601"
elastic_stack_version: 6.8.0
wazuh_version: 3.9.1
elastic_stack_version: 7.1.1
wazuh_version: 3.9.1

View File

@ -11,7 +11,7 @@
- name: Debian/Ubuntu | Install Elastic repo
apt_repository:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
repo: 'deb https://artifacts.elastic.co/packages/7.x/apt stable main'
state: present
filename: 'elastic_repo'
update_cache: true

View File

@ -1,5 +1,5 @@
---
- name: Debian/Ubuntu | Removing Elasticsearch repository
apt_repository:
repo: deb https://artifacts.elastic.co/packages/5.x/apt stable main
repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
state: absent

View File

@ -2,8 +2,8 @@
- name: RedHat/CentOS/Fedora | Install Elastic repo
yum_repository:
name: elastic_repo
description: Elastic repository for 6.x packages
baseurl: https://artifacts.elastic.co/packages/6.x/yum
description: Elastic repository for 7.x packages
baseurl: https://artifacts.elastic.co/packages/7.x/yum
gpgkey: https://artifacts.elastic.co/GPG-KEY-elasticsearch
gpgcheck: true

View File

@ -19,7 +19,7 @@ server.host: {{ kibana_server_host }}
#server.name: "your-hostname"
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}"
elasticsearch.hosts: "http://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}"
# When this setting's value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host

View File

@ -1,53 +0,0 @@
Ansible Role: Logstash
----------------------
An Ansible Role that installs [Logstash](https://www.elastic.co/products/logstash)
Requirements
------------
This role will work on:
* Red Hat
* CentOS
* Fedora
* Debian
* Ubuntu
Role Variables
--------------
```
---
logstash_create_config: true
logstash_input_beats: false
elasticsearch_network_host: "127.0.0.1"
elasticsearch_http_port: "9200"
elastic_stack_version: 5.5.0
logstash_ssl: false
logstash_ssl_dir: /etc/pki/logstash
logstash_ssl_certificate_file: ""
logstash_ssl_key_file: ""
```
Example Playbook
----------------
```
- hosts: logstash
roles:
- { role: ansible-role-logstash, elasticsearch_network_host: '192.168.33.182' }
```
License and copyright
---------------------
WAZUH Copyright (C) 2017 Wazuh Inc. (License GPLv3)
### Based on previous work from geerlingguy
- https://github.com/geerlingguy/ansible-role-elasticsearch
### Modified by Wazuh
The playbooks have been modified by Wazuh, including some specific requirements, templates and configuration to improve integration with Wazuh ecosystem.

View File

@ -1,19 +0,0 @@
---
logstash_create_config: true
logstash_input_beats: false
# You can introduce Multiples IPs
# elasticseacrh_network_host: ["Localhost1", "Localhost2", "Localhost3", ...]
elasticsearch_network_host: ["Localhost"]
elasticsearch_http_port: "9200"
elasticsearch_shards: 5
elasticsearch_replicas: 1
elastic_stack_version: 6.8.0
logstash_ssl: false
logstash_ssl_dir: /etc/pki/logstash
logstash_ssl_certificate_file: ""
logstash_ssl_key_file: ""
logstash_install_java: true

View File

@ -1,3 +0,0 @@
---
- name: restart logstash
service: name=logstash state=restarted

View File

@ -1,24 +0,0 @@
---
galaxy_info:
author: Wazuh
description: Installing and maintaining Elasticsearch server.
company: wazuh.com
license: license (GPLv3)
min_ansible_version: 2.0
platforms:
- name: EL
versions:
- all
- name: Fedora
versions:
- all
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
galaxy_tags:
- web
- system
- monitoring

View File

@ -1,45 +0,0 @@
---
- name: Debian/Ubuntu | Install apt-transport-https and ca-certificates
apt:
name: ['apt-transport-https', 'ca-certificates']
state: present
- when: logstash_install_java
block:
- name: Debian/Ubuntu | Install OpenJDK 1.8
apt: name=openjdk-8-jre state=present cache_valid_time=3600
tags: install
- name: Debian/Ubuntu | Add Elasticsearch GPG key
apt_key:
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
state: present
- name: Debian/Ubuntu | Install Elasticsearch repo
apt_repository:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
state: present
filename: 'elastic_repo'
- name: Debian/Ubuntu | Install Logstash
apt:
name: "logstash=1:{{ elastic_stack_version }}-1"
state: present
update_cache: true
tags: install
- name: Debian/Ubuntu | Checking if wazuh-manager is installed
command: dpkg -l wazuh-manager
register: wazuh_manager_check_deb
when: logstash_input_beats == false
args:
warn: false
- name: Debian/Ubuntu | Add user logstash to group ossec
user:
name: logstash
groups: ossec
append: true
when:
- logstash_input_beats == false
- wazuh_manager_check_deb.rc == 0

View File

@ -1,5 +0,0 @@
---
- name: Debian/Ubuntu | Removing Elasticsearch repository
apt_repository:
repo: deb https://artifacts.elastic.co/packages/5.x/apt stable main
state: absent

View File

@ -1,5 +0,0 @@
---
- name: RedHat/CentOS/Fedora | Remove logstash repository (and clean up left-over metadata)
yum_repository:
name: elastic_repo
state: absent

View File

@ -1,43 +0,0 @@
---
- when: logstash_install_java
block:
- name: RedHat/CentOS/Fedora | Install OpenJDK 1.8
yum: name=java-1.8.0-openjdk state=present
register: oracle_java_task_rpm_installed
tags: install
- name: RedHat/CentOS/Fedora | Install Logstash repo
yum_repository:
name: elastic_repo
description: Elastic repository for 6.x packages
baseurl: https://artifacts.elastic.co/packages/6.x/yum
gpgkey: https://artifacts.elastic.co/GPG-KEY-elasticsearch
gpgcheck: true
- name: RedHat/CentOS/Fedora | Install Logstash
package: name=logstash-{{ elastic_stack_version }} state=present
when: not logstash_install_java or oracle_java_task_rpm_installed is defined
tags: install
- name: RedHat/CentOS/Fedora | Checking if wazuh-manager is installed
command: rpm -q wazuh-manager
register: wazuh_manager_check_rpm
when: logstash_input_beats == false
args:
warn: false
- name: RedHat/CentOS/Fedora | Add user logstash to group ossec
user:
name: logstash
groups: ossec
append: true
when:
- logstash_input_beats == false
- wazuh_manager_check_rpm.rc == 0
- name: Amazon Linux change startup group
shell: sed -i 's/.*LS_GROUP=logstash.*/LS_GROUP=ossec/' /etc/logstash/startup.options
when:
- logstash_input_beats == false
- wazuh_manager_check_rpm.rc == 0
- ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA"

View File

@ -1,27 +0,0 @@
---
- name: Ensure Logstash SSL key pair directory exists.
file:
path: "{{ logstash_ssl_dir }}"
state: directory
when: logstash_ssl
tags: configure
- name: Copy SSL key and cert for logstash.
copy:
src: "{{ item }}"
dest: "{{ logstash_ssl_dir }}/{{ item | basename }}"
mode: 0644
with_items:
- "{{ logstash_ssl_key_file }}"
- "{{ logstash_ssl_certificate_file }}"
when: logstash_ssl
tags: configure
- name: Logstash configuration
template:
src: 01-wazuh.conf.j2
dest: /etc/logstash/conf.d/01-wazuh.conf
owner: root
group: root
notify: restart logstash
tags: configure

View File

@ -1,40 +0,0 @@
---
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == "Debian"
- import_tasks: config.yml
when: logstash_create_config
- name: Reload systemd
systemd: daemon_reload=yes
ignore_errors: true
when:
- not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA")
- not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<'))
- not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<'))
- name: Amazon Linux create service
shell: /usr/share/logstash/bin/system-install /etc/logstash/startup.options
when: ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA"
- name: Ensure Logstash started and enabled
service:
name: logstash
enabled: true
state: started
- name: Amazon Linux start Logstash
service:
name: logstash
enabled: true
state: started
when: ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA"
- import_tasks: "RMRedHat.yml"
when: ansible_os_family == "RedHat"
- import_tasks: "RMDebian.yml"
when: ansible_os_family == "Debian"

View File

@ -1,73 +0,0 @@
#jinja2: trim_blocks:False
# {{ ansible_managed }}
# Wazuh - Logstash configuration file
{% if logstash_input_beats == true %}
## Remote Wazuh Manager - Filebeat input
input {
beats {
port => 5000
codec => "json_lines"
{% if logstash_ssl == true %}
ssl => true
ssl_certificate => "{{ logstash_ssl_dir }}/{{ logstash_ssl_certificate_file | basename }}"
ssl_key => "{{ logstash_ssl_dir }}/{{ logstash_ssl_key_file | basename }}"
{% endif %}
}
}
{% else %}
## Local Wazuh Manager - JSON file input
input {
file {
type => "wazuh-alerts"
path => "/var/ossec/logs/alerts/alerts.json"
codec => "json"
}
}
{% endif %}
filter {
if [data][srcip] {
mutate {
add_field => [ "@src_ip", "%{[data][srcip]}" ]
}
}
if [data][aws][sourceIPAddress] {
mutate {
add_field => [ "@src_ip", "%{[data][aws][sourceIPAddress]}" ]
}
}
}
filter {
if [data][srcip] {
mutate {
add_field => [ "@src_ip", "%{[data][srcip]}" ]
}
}
if [data][aws][sourceIPAddress] {
mutate {
add_field => [ "@src_ip", "%{[data][aws][sourceIPAddress]}" ]
}
}
}
filter {
geoip {
source => "@src_ip"
target => "GeoLocation"
fields => ["city_name", "country_name", "region_name", "location"]
}
date {
match => ["timestamp", "ISO8601"]
target => "@timestamp"
}
mutate {
remove_field => [ "timestamp", "beat", "input_type", "tags", "count", "@version", "log", "offset", "type", "@src_ip", "host"]
}
}
output {
#stdout { codec => rubydebug }
elasticsearch {
hosts => {{ elasticsearch_network_host | to_json}}
index => "wazuh-alerts-3.x-%{+YYYY.MM.dd}"
document_type => "wazuh"
}
}

View File

@ -19,34 +19,10 @@ Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
```
filebeat_create_config: true
filebeat_prospectors:
- input_type: log
paths:
- "/var/ossec/logs/alerts/alerts.json"
document_type: json
json.message_key: log
json.keys_under_root: true
json.overwrite_keys: true
filebeat_output_elasticsearch_enabled: false
filebeat_output_elasticsearch_hosts:
- "localhost:9200"
filebeat_output_logstash_enabled: true
filebeat_output_logstash_hosts:
- "192.168.212.158:5000"
filebeat_enable_logging: true
filebeat_log_level: debug
filebeat_log_dir: /var/log/mybeat
filebeat_log_filename: mybeat.log
filebeat_ssl_dir: /etc/pki/logstash
filebeat_ssl_certificate_file: ""
filebeat_ssl_key_file: ""
filebeat_ssl_insecure: "false"
```
License and copyright

View File

@ -14,16 +14,12 @@ filebeat_output_elasticsearch_enabled: false
filebeat_output_elasticsearch_hosts:
- "localhost:9200"
filebeat_output_logstash_enabled: true
filebeat_output_logstash_hosts:
- "192.168.212.158:5000"
filebeat_enable_logging: true
filebeat_log_level: debug
filebeat_log_dir: /var/log/mybeat
filebeat_log_filename: mybeat.log
filebeat_ssl_dir: /etc/pki/logstash
filebeat_ssl_dir: /etc/pki/filebeat
filebeat_ssl_certificate_file: ""
filebeat_ssl_key_file: ""
filebeat_ssl_insecure: "false"

View File

@ -12,6 +12,6 @@
- name: Debian/Ubuntu | Add Filebeat repository.
apt_repository:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
repo: 'deb https://artifacts.elastic.co/packages/7.x/apt stable main'
state: present
update_cache: true

View File

@ -1,5 +1,5 @@
---
- name: Debian/Ubuntu | Remove Filebeat repository (and clean up left-over metadata)
apt_repository:
repo: deb https://artifacts.elastic.co/packages/5.x/apt stable main
repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main
state: absent

View File

@ -3,6 +3,6 @@
yum_repository:
name: elastic_repo
description: Elastic repository for 6.x packages
baseurl: https://artifacts.elastic.co/packages/6.x/yum
baseurl: https://artifacts.elastic.co/packages/7.x/yum
gpgkey: https://artifacts.elastic.co/GPG-KEY-elasticsearch
gpgcheck: true

View File

@ -9,6 +9,16 @@
notify: restart filebeat
tags: configure
- name: Copy Elasticsearch template.
template:
src: elasticsearch.yml.j2
dest: "/etc/filebeat/wazuh-template.json"
owner: root
group: root
mode: 0644
notify: restart filebeat
tags: configure
- name: Ensure Filebeat SSL key pair directory exists.
file:
path: "{{ filebeat_ssl_dir }}"

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +1,58 @@
filebeat:
# List of prospectors to fetch data.
prospectors:
{{ filebeat_prospectors | to_json }}
# Wazuh - Filebeat configuration file
# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.
output:
filebeat.inputs:
- type: log
paths:
- '/var/ossec/logs/alerts/alerts.json'
{% if filebeat_output_elasticsearch_enabled %}
### Elasticsearch as output
elasticsearch:
# Array of hosts to connect to.
hosts: {{ filebeat_output_elasticsearch_hosts | to_json }}
setup.template.json.enabled: true
setup.template.json.path: "/etc/filebeat/wazuh-template.json"
setup.template.json.name: "wazuh"
setup.template.overwrite: true
# Optional protocol and basic auth credentials. These are deprecated.
#protocol: "https"
#username: "admin"
#password: "s3cr3t"
processors:
- decode_json_fields:
fields: ['message']
process_array: true
max_depth: 200
target: ''
overwrite_keys: true
- drop_fields:
fields: ['message', 'ecs', 'beat', 'input_type', 'tags', 'count', '@version', 'log', 'offset', 'type', 'host']
- rename:
fields:
- from: "data.aws.sourceIPAddress"
to: "@src_ip"
ignore_missing: true
fail_on_error: false
when:
regexp:
data.aws.sourceIPAddress: \b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b
- rename:
fields:
- from: "data.srcip"
to: "@src_ip"
ignore_missing: true
fail_on_error: false
when:
regexp:
data.srcip: \b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b
- rename:
fields:
- from: "data.win.eventdata.ipAddress"
to: "@src_ip"
ignore_missing: true
fail_on_error: false
when:
regexp:
data.win.eventdata.ipAddress: \b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b
# Number of workers per Elasticsearch host.
#worker: 1
# Send events directly to Elasticsearch
output.elasticsearch:
hosts: {{ filebeat_output_elasticsearch_hosts | to_json }}
#pipeline: geoip
indices:
- index: 'wazuh-alerts-3.x-%{+yyyy.MM.dd}'
# Optional index name. The default is "filebeat" and generates
# [filebeat-]YYYY.MM.DD keys.
#index: "filebeat"
# Optional HTTP Path
#path: "/elasticsearch"
# Proxy server URL
# proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# The number of seconds to wait for new events between two bulk API index requests.
# If `bulk_max_size` is reached before this interval expires, addition bulk index
# requests are made.
#flush_interval: 1
# Boolean that sets if the topology is kept in Elasticsearch. The default is
# false. This option makes sense only for Packetbeat.
#save_topology: false
# The time to live in seconds for the topology information that is stored in
# Elasticsearch. The default is 15 seconds.
#topology_expire: 15
{% if filebeat_ssl_certificate_file and filebeat_ssl_key_file %}
# tls configuration. By default is off.
tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for TLS client authentication
certificate: "{{ filebeat_ssl_dir }}/{{ filebeat_ssl_certificate_file | basename }}"
# Client Certificate Key
certificate_key: "{{ filebeat_ssl_dir }}/{{ filebeat_ssl_key_file | basename}}"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
insecure: {{ filebeat_ssl_insecure }}
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
# Configure minimum TLS version allowed for connection to logstash
#min_version: 1.0
# Configure maximum TLS version allowed for connection to logstash
#max_version: 1.2
{% endif %}
{% endif %}
{% if filebeat_output_logstash_enabled %}
### Logstash as output
logstash:
# The Logstash hosts
hosts: {{ filebeat_output_logstash_hosts | to_json }}
# Number of workers per Logstash host.
#worker: 1
# Optional load balance the events between the Logstash hosts
#loadbalance: true
# Optional index name. The default index name depends on the each beat.
# For Packetbeat, the default is set to packetbeat, for Topbeat
# top topbeat and for Filebeat to filebeat.
#index: filebeat
{% if filebeat_ssl_certificate_file and filebeat_ssl_key_file %}
# Optional TLS. By default is off.
tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for TLS client authentication
certificate: "{{ filebeat_ssl_dir }}/{{ filebeat_ssl_certificate_file | basename }}"
# Client Certificate Key
certificate_key: "{{ filebeat_ssl_dir }}/{{ filebeat_ssl_key_file | basename}}"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
insecure: {{ filebeat_ssl_insecure }}
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
{% endif %}
{% if filebeat_enable_logging %}
logging:
### Filebeat log
level: {{ filebeat_log_level }}
# Enable file rotation with default configuration
to_files: true
# Do not log to syslog
to_syslog: false
files:
path: {{ filebeat_log_dir }}
name: {{ filebeat_log_filename }}
keepfiles: 7
{% endif %}
{% endif %}
# Optional. Send events to Logstash instead of Elasticsearch
#output.logstash.hosts: ["YOUR_LOGSTASH_SERVER_IP:5000"]

View File

@ -1,4 +1,3 @@
---
- src: geerlingguy.java
- src: geerlingguy.elasticsearch
- src: geerlingguy.logstash

View File

@ -17,5 +17,4 @@
roles:
- geerlingguy.java
- geerlingguy.elasticsearch
- geerlingguy.logstash
- role_under_test

View File

@ -1,6 +1,7 @@
<!-- Local rules -->
<!-- Modify it at your will. -->
<!-- Copyright (C) 2015-2019, Wazuh Inc. -->
<!-- Example -->
<group name="local,syslog,sshd,">
@ -15,21 +16,4 @@
<group>authentication_failed,pci_dss_10.2.4,pci_dss_10.2.5,</group>
</rule>
</group>
<!--
Used with active-response to restart an agent when agent.conf file
is successfully retrieved.
-->
<group name="local,ossec,">
<rule id="100002" level="1">
<if_group>syscheck</if_group>
<match>/var/ossec/etc/shared/agent.conf</match>
<description>Linux | agent.conf was modified</description>
</rule>
<rule id="100003" level="1">
<if_group>syscheck</if_group>
<match>C:\wazuh-agent/shared/agent.conf</match>
<description>Windows | agent.conf was modified</description>
</rule>
</group>
</group>