ELK roles and playbooks deleted

This commit is contained in:
Gonzalo Acuña 2022-03-16 09:51:30 -03:00
parent 403b1b6468
commit a7ce3cdaa9
No known key found for this signature in database
GPG Key ID: 646BA79A313A2270
44 changed files with 0 additions and 2130 deletions

View File

@ -1,5 +0,0 @@
---
- hosts: <YOUR_ELASTICSEARCH_IP>
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: '<YOUR_ELASTICSEARCH_IP>'

View File

@ -1,91 +0,0 @@
---
- hosts: <node-1 IP>
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: <node-1 IP>
elasticsearch_node_name: node-1
elasticsearch_bootstrap_node: true
elasticsearch_cluster_nodes:
- <node-1 IP>
- <node-2 IP>
- <node-3 IP>
elasticsearch_discovery_nodes:
- <node-1 IP>
- <node-2 IP>
- <node-3 IP>
elasticsearch_xpack_security: true
node_certs_generator: true
elasticsearch_xpack_security_password: elastic_pass
single_node: false
vars:
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: <node-1 IP> # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: <node-2 IP>
node3:
name: node-3
ip: <node-3 IP>
- hosts: <node-2 IP>
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: <node-2 IP>
elasticsearch_node_name: node-2
single_node: false
elasticsearch_xpack_security: true
elasticsearch_master_candidate: true
elasticsearch_discovery_nodes:
- <node-1 IP>
- <node-2 IP>
- <node-3 IP>
- hosts: <node-3 IP>
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: <node-3 IP>
elasticsearch_node_name: node-3
single_node: false
elasticsearch_xpack_security: true
elasticsearch_master_candidate: true
elasticsearch_discovery_nodes:
- <node-1 IP>
- <node-2 IP>
- <node-3 IP>
# - hosts: 172.16.0.162
# roles:
# - role: ../roles/wazuh/ansible-wazuh-manager
# - role: ../roles/wazuh/ansible-filebeat
# filebeat_output_elasticsearch_hosts: 172.16.0.161:9200
# filebeat_xpack_security: true
# filebeat_node_name: node-2
# node_certs_generator: false
# elasticsearch_xpack_security_password: elastic_pass
# - role: ../roles/elastic-stack/ansible-elasticsearch
# elasticsearch_network_host: 172.16.0.162
# node_name: node-2
# elasticsearch_bootstrap_node: false
# elasticsearch_master_candidate: true
# elasticsearch_discovery_nodes:
# - 172.16.0.161
# - 172.16.0.162
# elasticsearch_xpack_security: true
# node_certs_generator: false
# - hosts: 172.16.0.163
# roles:
# - role: ../roles/elastic-stack/ansible-kibana
# kibana_xpack_security: true
# kibana_node_name: node-3
# elasticsearch_network_host: 172.16.0.161
# node_certs_generator: false
# elasticsearch_xpack_security_password: elastic_pass

View File

@ -1,8 +0,0 @@
---
- hosts: <your server host>
roles:
- {role: ../roles/wazuh/ansible-wazuh-manager}
- role: ../roles/wazuh/ansible-filebeat
filebeat_output_elasticsearch_hosts: localhost:9200
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '0.0.0.0', single_node: true}
- { role: ../roles/elastic-stack/ansible-kibana, elasticsearch_network_host: '0.0.0.0', elasticsearch_reachable_host: 'localhost' }

View File

@ -1,7 +0,0 @@
---
- hosts: <KIBANA_HOST>
roles:
- role: ../roles/elastic-stack/ansible-kibana
elasticsearch_network_host: <YOUR_ELASTICSEARCH_IP>
vars:
ansible_shell_allow_world_readable_temp: true

View File

@ -1,8 +0,0 @@
---
- hosts: <WAZUH_MANAGER_HOST>
roles:
- role: ../roles/wazuh/ansible-wazuh-manager
- role: ../roles/wazuh/ansible-filebeat
filebeat_output_elasticsearch_hosts: <YOUR_ELASTICSEARCH_IP>:9200

View File

@ -1,145 +0,0 @@
Ansible Role: Elasticsearch
===========================
An Ansible Role that installs [Elasticsearch](https://www.elastic.co/products/elasticsearch).
Requirements
------------
This role will work on:
* Red Hat
* CentOS
* Fedora
* Debian
* Ubuntu
For the elasticsearch role with XPack security the `unzip` command must be available on the Ansible master.
Role Variables
--------------
Defaults variables are listed below, along with its values (see `defaults/main.yml`):
```
elasticsearch_cluster_name: wazuh
elasticsearch_node_name: node-1
elasticsearch_http_port: 9200
elasticsearch_network_host: 127.0.0.1
elasticsearch_jvm_xms: 1g
elastic_stack_version: 5.5.0
```
Example Playbook
----------------
- Single-node
```
- hosts: elasticsearch
roles:
- { role: ansible-role-elasticsearch, elasticsearch_network_host: '192.168.33.182', single_node: true }
```
- Three nodes Elasticsearch cluster
```
---
- hosts: 172.16.0.161
roles:
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '172.16.0.161', elasticsearch_bootstrap_node: true, elasticsearch_cluster_nodes: ['172.16.0.162','172.16.0.163','172.16.0.161']}
- hosts: 172.16.0.162
roles:
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '172.16.0.162', elasticsearch_node_master: true, elasticsearch_cluster_nodes: ['172.16.0.162','172.16.0.163','172.16.0.161']}
- hosts: 172.16.0.163
roles:
- {role: ../roles/elastic-stack/ansible-elasticsearch, elasticsearch_network_host: '172.16.0.163', elasticsearch_node_master: true, elasticsearch_cluster_nodes: ['172.16.0.162','172.16.0.163','172.16.0.161']}
```
- Three nodes Elasticsearch cluster with XPack security
```
---
- hosts: elastic-1
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: 172.16.0.111
elasticsearch_node_name: node-1
single_node: false
elasticsearch_node_master: true
elasticsearch_bootstrap_node: true
elasticsearch_cluster_nodes:
- 172.16.0.111
- 172.16.0.112
- 172.16.0.113
elasticsearch_discovery_nodes:
- 172.16.0.111
- 172.16.0.112
- 172.16.0.113
elasticsearch_xpack_security: true
node_certs_generator: true
node_certs_generator_ip: 172.16.0.111
vars:
instances:
node-1:
name: node-1
ip: 172.16.0.111
node-2:
name: node-2
ip: 172.16.0.112
node-3:
name: node-3
ip: 172.16.0.113
- hosts: elastic-2
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: 172.16.0.112
elasticsearch_node_name: node-2
single_node: false
elasticsearch_xpack_security: true
elasticsearch_node_master: true
node_certs_generator_ip: 172.16.0.111
elasticsearch_discovery_nodes:
- 172.16.0.111
- 172.16.0.112
- 172.16.0.113
- hosts: elastic-3
roles:
- role: ../roles/elastic-stack/ansible-elasticsearch
elasticsearch_network_host: 172.16.0.113
elasticsearch_node_name: node-3
single_node: false
elasticsearch_xpack_security: true
elasticsearch_node_master: true
node_certs_generator_ip: 172.16.0.111
elasticsearch_discovery_nodes:
- 172.16.0.111
- 172.16.0.112
- 172.16.0.113
vars:
elasticsearch_xpack_users:
anne:
password: 'PasswordHere'
roles: '["kibana_user", "monitoring_user"]'
jack:
password: 'PasswordHere'
roles: '["superuser"]'
```
It is possible to define users directly on the playbook, these must be defined on a variable `elasticsearch_xpack_users` on the last node of the cluster as in the example.
License and copyright
---------------------
WAZUH Copyright (C) 2021 Wazuh Inc. (License GPLv3)
### Based on previous work from geerlingguy
- https://github.com/geerlingguy/ansible-role-elasticsearch
### Modified by Wazuh
The playbooks have been modified by Wazuh, including some specific requirements, templates and configuration to improve integration with Wazuh ecosystem.

View File

@ -1,44 +0,0 @@
---
elasticsearch_http_port: 9200
elasticsearch_network_host: 127.0.0.1
elasticsearch_reachable_host: 127.0.0.1
elasticsearch_jvm_xms: null
elastic_stack_version: 7.10.2
elasticsearch_lower_disk_requirements: false
elasticsearch_path_repo: []
elasticsearch_start_timeout: 90
elasticrepo:
apt: 'https://artifacts.elastic.co/packages/7.x/apt'
yum: 'https://artifacts.elastic.co/packages/7.x/yum'
gpg: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch'
key_id: '46095ACC8548582C1A2699A9D27D666CD88E42B4'
# Cluster Settings
single_node: true
elasticsearch_cluster_name: wazuh
elasticsearch_node_name: node-1
elasticsearch_bootstrap_node: false
elasticsearch_node_master: false
elasticsearch_cluster_nodes:
- 127.0.0.1
elasticsearch_discovery_nodes:
- 127.0.0.1
elasticsearch_node_data: true
elasticsearch_node_ingest: true
# X-Pack Security
elasticsearch_xpack_security: false
elasticsearch_xpack_security_password: elastic_pass
node_certs_generator: false
node_certs_source: /usr/share/elasticsearch
node_certs_destination: /etc/elasticsearch/certs
# CA generation
master_certs_path: "{{ playbook_dir }}/es_certs"
generate_CA: true
ca_key_name: ""
ca_cert_name: ""
ca_password: ""

View File

@ -1,3 +0,0 @@
---
- name: restart elasticsearch
service: name=elasticsearch state=restarted

View File

@ -1,24 +0,0 @@
---
galaxy_info:
author: Wazuh
description: Installing and maintaining Elasticsearch server.
company: wazuh.com
license: license (GPLv3)
min_ansible_version: 2.0
platforms:
- name: EL
versions:
- all
- name: Ubuntu
versions:
- all
- name: Debian
versions:
- all
- name: Fedora
versions:
- all
galaxy_tags:
- web
- system
- monitoring

View File

@ -1,42 +0,0 @@
---
- name: Debian/Ubuntu | Install apt-transport-https and ca-certificates
apt:
name:
- apt-transport-https
- ca-certificates
state: present
register: elasticsearch_ca_packages_installed
until: elasticsearch_ca_packages_installed is succeeded
- name: Update and upgrade apt packages
become: true
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 86400 #One day
when:
- ansible_distribution == "Ubuntu"
- ansible_distribution_major_version | int == 14
- name: Debian/Ubuntu | Add Elasticsearch GPG key.
apt_key:
url: "{{ elasticrepo.gpg }}"
id: "{{ elasticrepo.key_id }}"
state: present
- name: Debian/Ubuntu | Install Elastic repo
apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main"
state: present
filename: 'elastic_repo_7'
update_cache: true
changed_when: false
- name: Debian/Ubuntu | Install Elasticsarch
apt:
name: "elasticsearch={{ elastic_stack_version }}"
state: present
cache_valid_time: 3600
register: elasticsearch_main_packages_installed
until: elasticsearch_main_packages_installed is succeeded
tags: install

View File

@ -1,6 +0,0 @@
---
- name: Debian/Ubuntu | Removing Elasticsearch repository
apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main"
state: absent
changed_when: false

View File

@ -1,6 +0,0 @@
---
- name: RedHat/CentOS/Fedora | Remove Elasticsearch repository (and clean up left-over metadata)
yum_repository:
name: elastic_repo_7
state: absent
changed_when: false

View File

@ -1,14 +0,0 @@
---
- name: RedHat/CentOS/Fedora | Install Elastic repo
yum_repository:
name: elastic_repo_7
description: Elastic repository for 7.x packages
baseurl: "{{ elasticrepo.yum }}"
gpgkey: "{{ elasticrepo.gpg }}"
gpgcheck: true
changed_when: false
- name: RedHat/CentOS/Fedora | Install Elasticsarch
package: name=elasticsearch-{{ elastic_stack_version }} state=present
tags: install

View File

@ -1,187 +0,0 @@
---
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == "Debian"
- name: Create elasticsearch.service.d folder.
file:
path: /etc/systemd/system/elasticsearch.service.d
state: directory
owner: root
group: root
mode: 0755
when:
- ansible_service_mgr == "systemd"
- name: Configure Elasticsearch System Resources.
template:
src: elasticsearch_systemd.conf.j2
dest: /etc/systemd/system/elasticsearch.service.d/elasticsearch.conf
owner: root
group: elasticsearch
mode: 0660
notify: restart elasticsearch
tags: configure
when:
- ansible_service_mgr == "systemd"
- name: Debian/Ubuntu | Configure Elasticsearch System Resources.
template:
src: elasticsearch_nonsystemd.j2
dest: /etc/default/elasticsearch
owner: root
group: elasticsearch
mode: 0660
notify: restart elasticsearch
tags: configure
when:
- ansible_service_mgr != "systemd"
- ansible_os_family == "Debian"
- name: RedHat/CentOS/Fedora | Configure Elasticsearch System Resources.
template:
src: elasticsearch_nonsystemd.j2
dest: /etc/sysconfig/elasticsearch
owner: root
group: elasticsearch
mode: 0660
notify: restart elasticsearch
tags: configure
when:
- ansible_service_mgr != "systemd"
- ansible_os_family == "RedHat"
- name: Configure Elasticsearch JVM memmory.
template:
src: jvm.options.j2
dest: /etc/elasticsearch/jvm.options
owner: root
group: elasticsearch
mode: 0660
notify: restart elasticsearch
tags: configure
- name: Configure disabled log4j.
template:
src: "templates/disabledlog4j.options.j2"
dest: /etc/elasticsearch/jvm.options.d/disabledlog4j.options
owner: root
group: elasticsearch
mode: 2750
force: yes
notify: restart elasticsearch
tags: install
# fix in new PR (ignore_errors)
- import_tasks: "RMRedHat.yml"
when: ansible_os_family == "RedHat"
- import_tasks: "xpack_security.yml"
when:
- elasticsearch_xpack_security
- name: Configure Elasticsearch.
template:
src: elasticsearch.yml.j2
dest: /etc/elasticsearch/elasticsearch.yml
owner: root
group: elasticsearch
mode: 0660
notify: restart elasticsearch
tags: configure
- name: Trusty | set MAX_LOCKED_MEMORY=unlimited in Elasticsearch in /etc/security/limits.conf
lineinfile: # noqa 208
path: /etc/security/limits.conf
line: elasticsearch - memlock unlimited
create: yes
become: true
when:
- ansible_distribution == "Ubuntu"
- ansible_distribution_major_version | int == 14
changed_when: false
- name: Trusty | set MAX_LOCKED_MEMORY=unlimited in Elasticsearch in /etc/security/limits.d/elasticsearch.conf
lineinfile:
path: /etc/security/limits.d/elasticsearch.conf
line: elasticsearch - memlock unlimited
owner: root
group: root
mode: 0644
create: yes
become: true
changed_when: false
when:
- ansible_distribution == "Ubuntu"
- ansible_distribution_major_version | int == 14
- name: Ensure extra time for Elasticsearch to start on reboots
lineinfile:
path: /usr/lib/systemd/system/elasticsearch.service
regexp: '^TimeoutStartSec='
line: "TimeoutStartSec={{ elasticsearch_start_timeout }}"
become: yes
tags: configure
- name: Ensure Elasticsearch started and enabled
service:
name: elasticsearch
enabled: true
state: started
tags:
- configure
- init
- name: Make sure Elasticsearch is running before proceeding
wait_for: host={{ elasticsearch_reachable_host }} port={{ elasticsearch_http_port }} delay=3 timeout=400
tags:
- configure
- init
- import_tasks: "RMRedHat.yml"
when: ansible_os_family == "RedHat"
- import_tasks: "RMDebian.yml"
when: ansible_os_family == "Debian"
- name: Wait for Elasticsearch API
uri:
url: "https://{{ node_certs_generator_ip }}:{{ elasticsearch_http_port }}/_cluster/health/"
user: "elastic" # Default Elasticsearch user is always "elastic"
password: "{{ elasticsearch_xpack_security_password }}"
validate_certs: no
status_code: 200,401
return_content: yes
force_basic_auth: yes
timeout: 4
register: _result
until: ( _result.json is defined) and (_result.json.status == "green")
retries: 24
delay: 5
when:
- elasticsearch_xpack_users is defined
- name: Create elasticsearch users
uri:
url: "https://{{ node_certs_generator_ip }}:{{ elasticsearch_http_port }}/_security/user/{{ item.key }}"
method: POST
body_format: json
user: "elastic"
password: "{{ elasticsearch_xpack_security_password }}"
body: '{ "password" : "{{ item.value["password"] }}", "roles" : {{ item.value["roles"] }} }'
validate_certs: no
force_basic_auth: yes
loop: "{{ elasticsearch_xpack_users|default({})|dict2items }}"
register: http_response
failed_when: http_response.status != 200
when:
- elasticsearch_xpack_users is defined
- name: Reload systemd configuration
systemd:
daemon_reload: true
become: yes
notify: restart elasticsearch

View File

@ -1,209 +0,0 @@
- name: Check if certificate exists locally
stat:
path: "{{ node_certs_destination }}/{{ elasticsearch_node_name }}.crt"
register: certificate_file_exists
- name: Write the instances.yml file in the selected node (force = no)
template:
src: instances.yml.j2
dest: "{{ node_certs_source }}/instances.yml"
owner: root
group: root
mode: 0644
force: no
register: instances_file_exists
tags:
- config
- xpack-security
when:
- node_certs_generator
- not certificate_file_exists.stat.exists
- name: Update instances.yml status after generation
stat:
path: "{{ node_certs_source }}/instances.yml"
register: instances_file_exists
when:
- node_certs_generator
- name: Check if the certificates ZIP file exists
stat:
path: "{{ node_certs_source }}/certs.zip"
register: xpack_certs_zip
when:
- node_certs_generator
- name: Importing custom CA key
copy:
src: "{{ master_certs_path }}/ca/{{ ca_key_name }}"
dest: "{{ node_certs_source }}/{{ ca_key_name }}"
mode: 0440
when:
- not generate_CA
- node_certs_generator
tags: xpack-security
- name: Importing custom CA cert
copy:
src: "{{ master_certs_path }}/ca/{{ ca_cert_name }}"
dest: "{{ node_certs_source }}/{{ ca_cert_name }}"
mode: 0440
when:
- not generate_CA
- node_certs_generator
tags: xpack-security
- name: Generating certificates for Elasticsearch security (generating CA)
command: >-
/usr/share/elasticsearch/bin/elasticsearch-certutil cert ca --pem
--in {{ node_certs_source }}/instances.yml
--out {{ node_certs_source }}/certs.zip
when:
- node_certs_generator
- not xpack_certs_zip.stat.exists
- generate_CA
tags:
- xpack-security
- molecule-idempotence-notest
- name: Generating certificates for Elasticsearch security (using provided CA | Without CA Password)
command: >-
/usr/share/elasticsearch/bin/elasticsearch-certutil cert
--ca-key {{ node_certs_source }}/{{ ca_key_name }}
--ca-cert {{ node_certs_source }}/{{ ca_cert_name }}
--pem --in {{ node_certs_source }}/instances.yml
--out {{ node_certs_source }}/certs.zip
when:
- node_certs_generator
- not xpack_certs_zip.stat.exists
- not generate_CA
- ca_password | length == 0
tags:
- xpack-security
- molecule-idempotence-notest
- name: Generating certificates for Elasticsearch security (using provided CA | Using CA Password)
command: >-
/usr/share/elasticsearch/bin/elasticsearch-certutil cert
--ca-key {{ node_certs_source }}/{{ ca_key_name }}
--ca-cert {{ node_certs_source }}/{{ ca_cert_name }}
--pem --in {{ node_certs_source }}/instances.yml --out {{ node_certs_source }}/certs.zip
--ca-pass {{ ca_password }}
when:
- node_certs_generator
- not xpack_certs_zip.stat.exists
- not generate_CA
- ca_password | length > 0
tags:
- xpack-security
- molecule-idempotence-notest
- name: Verify the Elastic certificates directory
file:
path: "{{ master_certs_path }}"
state: directory
mode: 0700
delegate_to: "127.0.0.1"
become: no
when:
- node_certs_generator
- name: Verify the Certificates Authority directory
file:
path: "{{ master_certs_path }}/ca/"
state: directory
mode: 0700
delegate_to: "127.0.0.1"
become: no
when:
- node_certs_generator
- name: Copying certificates to Ansible master
fetch:
src: "{{ node_certs_source }}/certs.zip"
dest: "{{ master_certs_path }}/"
flat: yes
mode: 0700
when:
- node_certs_generator
tags:
- xpack-security
- molecule-idempotence-notest
- name: Delete certs.zip in Generator node
file:
state: absent
path: "{{ node_certs_source }}/certs.zip"
when:
- node_certs_generator
tags: molecule-idempotence-notest
- name: Unzip generated certs.zip
unarchive:
src: "{{ master_certs_path }}/certs.zip"
dest: "{{ master_certs_path }}/"
delegate_to: "127.0.0.1"
become: no
when:
- node_certs_generator
tags:
- xpack-security
- molecule-idempotence-notest
- name: Copying node's certificate from master
copy:
src: "{{ item }}"
dest: "{{ node_certs_destination }}/"
owner: root
group: elasticsearch
mode: 0440
with_items:
- "{{ master_certs_path }}/{{ elasticsearch_node_name }}/{{ elasticsearch_node_name }}.key"
- "{{ master_certs_path }}/{{ elasticsearch_node_name }}/{{ elasticsearch_node_name }}.crt"
- "{{ master_certs_path }}/ca/ca.crt"
when:
- generate_CA
tags:
- xpack-security
- molecule-idempotence-notest
- name: Copying node's certificate from master (Custom CA)
copy:
src: "{{ item }}"
dest: "{{ node_certs_destination }}/"
owner: root
group: elasticsearch
mode: 0440
with_items:
- "{{ master_certs_path }}/{{ elasticsearch_node_name }}/{{ elasticsearch_node_name }}.key"
- "{{ master_certs_path }}/{{ elasticsearch_node_name }}/{{ elasticsearch_node_name }}.crt"
- "{{ master_certs_path }}/ca/{{ ca_cert_name }}"
when:
- not generate_CA
tags:
- xpack-security
- molecule-idempotence-notest
- name: Ensuring folder permissions
file:
path: "{{ node_certs_destination }}/"
owner: root
group: elasticsearch
mode: 0770
state: directory
recurse: no
when:
- elasticsearch_xpack_security
- generate_CA
tags: xpack-security
- name: Set elasticsearch bootstrap password
shell: |
set -o pipefail
echo {{ elasticsearch_xpack_security_password }} | {{ node_certs_source }}/bin/elasticsearch-keystore add -xf bootstrap.password
args:
executable: /bin/bash
when:
- node_certs_generator
tags: molecule-idempotence-notest

View File

@ -1,4 +0,0 @@
## JVM configuration
## Disable log4j
-Dlog4j2.formatMsgNoLookups=true

View File

@ -1,70 +0,0 @@
# {{ ansible_managed }}
cluster.name: {{ elasticsearch_cluster_name }}
node.name: {{ elasticsearch_node_name }}
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: true
network.host: {{ elasticsearch_network_host }}
{% if elasticsearch_path_repo | length>0 %}
path.repo:
{% for item in elasticsearch_path_repo %}
- {{ item }}
{% endfor %}
{% endif %}
{% if single_node %}
discovery.type: single-node
{% elif elasticsearch_bootstrap_node %}
node.master: true
cluster.initial_master_nodes:
{% for item in elasticsearch_cluster_nodes %}
- {{ item }}
{% endfor %}
discovery.seed_hosts:
{% for item in elasticsearch_discovery_nodes %}
- {{ item }}
{% endfor %}
{% else %}
node.master: {{ elasticsearch_node_master|lower }}
{% if elasticsearch_node_data|lower == 'false' %}
node.data: false
{% endif %}
{% if elasticsearch_node_ingest|lower == 'false' %}
node.ingest: false
{% endif %}
discovery.seed_hosts:
{% for item in elasticsearch_discovery_nodes %}
- {{ item }}
{% endfor %}
{% endif %}
{% if elasticsearch_lower_disk_requirements %}
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.flood_stage: 200mb
cluster.routing.allocation.disk.watermark.low: 500mb
cluster.routing.allocation.disk.watermark.high: 300mb
{% endif %}
{% if elasticsearch_xpack_security %}
# XPACK Security
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.key: {{node_certs_destination}}/{{ elasticsearch_node_name }}.key
xpack.security.transport.ssl.certificate: {{node_certs_destination}}/{{ elasticsearch_node_name }}.crt
{% if generate_CA == true %}
xpack.security.transport.ssl.certificate_authorities: [ "{{ node_certs_destination }}/ca.crt" ]
{% elif generate_CA == false %}
xpack.security.transport.ssl.certificate_authorities: [ "{{ node_certs_destination }}/{{ca_cert_name}}" ]
{% endif %}
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.verification_mode: certificate
xpack.security.http.ssl.key: {{node_certs_destination}}/{{ elasticsearch_node_name }}.key
xpack.security.http.ssl.certificate: {{node_certs_destination}}/{{ elasticsearch_node_name }}.crt
{% if generate_CA == true %}
xpack.security.http.ssl.certificate_authorities: [ "{{ node_certs_destination }}/ca.crt" ]
{% elif generate_CA == false %}
xpack.security.http.ssl.certificate_authorities: [ "{{ node_certs_destination }}/{{ca_cert_name}}" ]
{% endif %}
{% endif %}

View File

@ -1,52 +0,0 @@
# {{ ansible_managed }}
################################
# Elasticsearch
################################
# Elasticsearch home directory
#ES_HOME=/usr/share/elasticsearch
# Elasticsearch Java path
#JAVA_HOME=
# Elasticsearch configuration directory
ES_PATH_CONF=/etc/elasticsearch
# Elasticsearch PID directory
#PID_DIR=/var/run/elasticsearch
# Additional Java OPTS
#ES_JAVA_OPTS=
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
#RESTART_ON_UPGRADE=true
################################
# Elasticsearch service
################################
# SysV init.d
#
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
ES_STARTUP_SLEEP_TIME=5
################################
# System properties
################################
# Specifies the maximum file descriptor number that can be opened by this process
# When using Systemd, this setting is ignored and the LimitNOFILE defined in
# /usr/lib/systemd/system/elasticsearch.service takes precedence
#MAX_OPEN_FILES=65536
# The maximum number of bytes of memory that may be locked into RAM
# Set to "unlimited" if you use the 'bootstrap.memory_lock: true' option
# in elasticsearch.yml.
# When using systemd, LimitMEMLOCK must be set in a unit file such as
# /etc/systemd/system/elasticsearch.service.d/override.conf.
MAX_LOCKED_MEMORY=unlimited
# Maximum number of VMA (Virtual Memory Areas) a process can own
# When using Systemd, this setting is ignored and the 'vm.max_map_count'
# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf
#MAX_MAP_COUNT=262144

View File

@ -1,3 +0,0 @@
# {{ ansible_managed }}
[Service]
LimitMEMLOCK=infinity

View File

@ -1,17 +0,0 @@
# {{ ansible_managed }}
# TO-DO
{% if node_certs_generator %}
instances:
{% for (key,value) in instances.items() %}
- name: "{{ value.name }}"
{% if value.ip is defined and value.ip | length > 0 %}
ip:
- "{{ value.ip }}"
{% elif value.dns is defined and value.dns | length > 0 %}
dns:
- "{{ value.dns }}"
{% endif %}
{% endfor %}
{% endif %}

View File

@ -1,140 +0,0 @@
#jinja2: trim_blocks:False
# {{ ansible_managed }}
## JVM configuration
################################################################
## IMPORTANT: JVM heap size
################################################################
##
## You should always set the min and max JVM heap
## size to the same value. For example, to set
## the heap to 4 GB, set:
##
## -Xms4g
## -Xmx4g
##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
##
################################################################
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
{% if elasticsearch_jvm_xms is not none %}
{% if elasticsearch_jvm_xms < 32000 %}
-Xms{{ elasticsearch_jvm_xms }}m
-Xmx{{ elasticsearch_jvm_xms }}m
{% else %}
-Xms32000m
-Xmx32000m
{% endif %}
{% else %}
-Xms{% if ansible_memtotal_mb < 64000 %}{{ ((ansible_memtotal_mb|int)/2)|int }}m{% else %}32000m{% endif %}
-Xmx{% if ansible_memtotal_mb < 64000 %}{{ ((ansible_memtotal_mb|int)/2)|int }}m{% else %}32000m{% endif %}
{% endif %}
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly
## G1GC Configuration
# NOTE: G1 GC is only supported on JDK version 10 or later
# to use G1GC, uncomment the next two lines and update the version on the
# following three lines to your version of the JDK
# 10-13:-XX:-UseConcMarkSweepGC
# 10-13:-XX:-UseCMSInitiatingOccupancyOnly
14-:-XX:+UseG1GC
14-:-XX:G1ReservePercent=25
14-:-XX:InitiatingHeapOccupancyPercent=30
## JVM temporary directory
-Djava.io.tmpdir=${ES_TMPDIR}
## optimizations
# pre-touch memory pages used by the JVM during initialization
-XX:+AlwaysPreTouch
## basic
# force the server VM
-server
# explicitly set the stack size
-Xss1m
# set to headless, just in case
-Djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8
# use our provided JNA always versus the system one
-Djna.nosys=true
# turn off a JDK optimization that throws away stack traces for common
# exceptions because stack traces are important for debugging
-XX:-OmitStackTraceInFastThrow
# flags to configure Netty
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
-Dio.netty.recycler.maxCapacityPerThread=0
# log4j 2
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
## heap dumps
# generate a heap dump when an allocation from the Java heap fails
# heap dumps are created in the working directory of the JVM
-XX:+HeapDumpOnOutOfMemoryError
# specify an alternative path for heap dumps
# ensure the directory exists and has sufficient space
-XX:HeapDumpPath=/var/lib/elasticsearch
# specify an alternative path for JVM fatal error logs
-XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log
## GC logging
## JDK 8 GC logging
# 8:-XX:+PrintGCDetails
# 8:-XX:+PrintGCDateStamps
# 8:-XX:+PrintTenuringDistribution
# 8:-XX:+PrintGCApplicationStoppedTime
# 8:-Xloggc:/var/log/elasticsearch/gc.log
# 8:-XX:+UseGCLogFileRotation
# 8:-XX:NumberOfGCLogFiles=32
# 8:-XX:GCLogFileSize=64m
# JDK 9+ GC logging
# 9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
# log GC status to a file with time stamps
# ensure the directory exists
#-Xloggc:${loggc}
# By default, the GC log file will not rotate.
# By uncommenting the lines below, the GC log file
# will be rotated every 128MB at most 32 times.
#-XX:+UseGCLogFileRotation
#-XX:NumberOfGCLogFiles=32
#-XX:GCLogFileSize=128M

View File

@ -1,48 +0,0 @@
Ansible Role: Kibana for Elastic Stack
------------------------------------
An Ansible Role that installs [Kibana](https://www.elastic.co/products/kibana) and [Wazuh APP](https://github.com/wazuh/wazuh-kibana-app).
Requirements
------------
This role will work on:
* Red Hat
* CentOS
* Fedora
* Debian
* Ubuntu
Role Variables
--------------
```
---
elasticsearch_http_port: "9200"
elasticsearch_network_host: "127.0.0.1"
kibana_server_host: "0.0.0.0"
kibana_server_port: "5601"
elastic_stack_version: 5.5.0
```
Example Playbook
----------------
```
- hosts: kibana
roles:
- { role: ansible-role-kibana, elasticsearch_network_host: '192.168.33.182' }
```
License and copyright
---------------------
WAZUH Copyright (C) 2021 Wazuh Inc. (License GPLv3)
### Based on previous work from geerlingguy
- https://github.com/geerlingguy/ansible-role-elasticsearch
### Modified by Wazuh
The playbooks have been modified by Wazuh, including some specific requirements, templates and configuration to improve integration with Wazuh ecosystem.

View File

@ -1,53 +0,0 @@
---
kibana_node_name: node-1
elasticsearch_http_port: "9200"
elasticsearch_network_host: "127.0.0.1"
kibana_server_host: "0.0.0.0"
kibana_server_port: "5601"
kibana_conf_path: /etc/kibana
elastic_stack_version: 7.10.2
wazuh_version: 4.3.0
wazuh_app_url: https://packages.wazuh.com/4.x/ui/kibana/wazuh_kibana
elasticrepo:
apt: 'https://artifacts.elastic.co/packages/7.x/apt'
yum: 'https://artifacts.elastic.co/packages/7.x/yum'
gpg: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch'
key_id: '46095ACC8548582C1A2699A9D27D666CD88E42B4'
# API credentials
wazuh_api_credentials:
- id: "default"
url: "https://localhost"
port: 55000
username: "wazuh"
password: "wazuh"
# Xpack Security
kibana_xpack_security: false
kibana_ssl_verification_mode: "full"
elasticsearch_xpack_security_user: elastic
elasticsearch_xpack_security_password: elastic_pass
node_certs_destination: /etc/kibana/certs
# CA Generation
master_certs_path: "{{ playbook_dir }}/es_certs"
generate_CA: true
ca_cert_name: ""
# Nodejs
nodejs:
repo_dict:
debian: "deb"
redhat: "rpm"
repo_url_ext: "nodesource.com/setup_10.x"
# Build from sources
build_from_sources: false
wazuh_plugin_branch: 4.3-7.10
#Nodejs NODE_OPTIONS
node_options: --no-warnings --max-old-space-size=2048 --max-http-header-size=65536

View File

@ -1,5 +0,0 @@
---
- name: restart kibana
service:
name: kibana
state: restarted

View File

@ -1,24 +0,0 @@
---
galaxy_info:
author: Wazuh
description: Installing and maintaining Elasticsearch server.
company: wazuh.com
license: license (GPLv3)
min_ansible_version: 2.0
platforms:
- name: EL
versions:
- all
- name: Fedora
versions:
- all
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
galaxy_tags:
- web
- system
- monitoring

View File

@ -1,32 +0,0 @@
---
- name: Debian/Ubuntu | Install apt-transport-https and ca-certificates
apt:
name:
- apt-transport-https
- ca-certificates
state: present
register: kibana_installing_ca_package
until: kibana_installing_ca_package is succeeded
- name: Debian/Ubuntu | Add Elasticsearch GPG key
apt_key:
url: "{{ elasticrepo.gpg }}"
id: "{{ elasticrepo.key_id }}"
state: present
- name: Debian/Ubuntu | Install Elastic repo
apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main"
state: present
filename: 'elastic_repo_7'
update_cache: true
changed_when: false
- name: Debian/Ubuntu | Install Kibana
apt:
name: "kibana={{ elastic_stack_version }}"
state: present
cache_valid_time: 3600
register: installing_kibana_package
until: installing_kibana_package is succeeded
tags: install

View File

@ -1,6 +0,0 @@
---
- name: Debian/Ubuntu | Removing Elasticsearch repository
apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main"
state: absent
changed_when: false

View File

@ -1,6 +0,0 @@
---
- name: Remove Elasticsearch repository (and clean up left-over metadata)
yum_repository:
name: elastic_repo_7
state: absent
changed_when: false

View File

@ -1,15 +0,0 @@
---
- name: RedHat/CentOS/Fedora | Install Elastic repo
yum_repository:
name: elastic_repo_7
description: Elastic repository for 7.x packages
baseurl: "{{ elasticrepo.yum }}"
gpgkey: "{{ elasticrepo.gpg }}"
gpgcheck: true
changed_when: false
- name: RedHat/CentOS/Fedora | Install Kibana
package: name=kibana-{{ elastic_stack_version }} state=present
register: installing_kibana_package
until: installing_kibana_package is succeeded
tags: install

View File

@ -1,76 +0,0 @@
---
- name: Ensure the Git package is present
package:
name: git
state: present
- name: Modify repo url if host is in Debian family
set_fact:
node_js_repo_type: deb
when:
- ansible_os_family | lower == "debian"
- name: Download script to install Nodejs repository
get_url:
url: "https://{{ nodejs['repo_dict'][ansible_os_family|lower] }}.{{ nodejs['repo_url_ext'] }}"
dest: "/tmp/setup_nodejs_repo.sh"
mode: 0700
- name: Execute downloaded script to install Nodejs repo
command: /tmp/setup_nodejs_repo.sh
register: node_repo_installation_result
changed_when: false
- name: Install Nodejs
package:
name: nodejs
state: present
- name: Install yarn dependency to build the Wazuh Kibana Plugin
# Using shell due to errors when evaluating text between @ with command
shell: "npm install -g {{ 'yarn' }}{{ '@' }}{{ '1.10.1'}}" # noqa 305
register: install_yarn_result
changed_when: install_yarn_result == 0
- name: Remove old wazuh-kibana-app git directory
file:
path: /tmp/app
state: absent
changed_when: false
- name: Clone wazuh-kibana-app repository # Using command as git module doesn't cover single-branch nor depth
command: git clone https://github.com/wazuh/wazuh-kibana-app -b {{ wazuh_plugin_branch }} --single-branch --depth=1 app # noqa 303
register: clone_app_repo_result
changed_when: false
args:
chdir: "/tmp"
- name: Executing yarn to build the package
command: "{{ item }}"
with_items:
- "yarn"
- "yarn build"
register: yarn_execution_result
changed_when: false
args:
chdir: "/tmp/app/"
- name: Obtain name of generated package
shell: "find ./ -name 'wazuh-*.zip' -printf '%f\\n'"
register: wazuhapp_package_name
changed_when: false
args:
chdir: "/tmp/app/build"
- name: Install Wazuh Plugin (can take a while)
shell: NODE_OPTIONS="{{ node_options }}" /usr/share/kibana/bin/kibana-plugin install file:///tmp/app/build/{{ wazuhapp_package_name.stdout }}
args:
executable: /bin/bash
creates: /usr/share/kibana/plugins/wazuh/package.json
chdir: /usr/share/kibana
become: yes
become_user: kibana
notify: restart kibana
tags:
- install
- skip_ansible_lint

View File

@ -1,189 +0,0 @@
---
- name: Stopping early, trying to compile Wazuh Kibana Plugin on Debian 10 is not possible
fail:
msg: "It's not possible to compile the Wazuh Kibana plugin on Debian 10 due to: https://github.com/wazuh/wazuh-kibana-app/issues/1924"
when:
- build_from_sources
- ansible_distribution == "Debian"
- ansible_distribution_major_version == "10"
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == 'Debian'
- name: Copying node's certificate from master
copy:
src: "{{ item }}"
dest: "{{ node_certs_destination }}/"
owner: root
group: kibana
mode: 0440
with_items:
- "{{ master_certs_path }}/{{ kibana_node_name }}/{{ kibana_node_name }}.key"
- "{{ master_certs_path }}/{{ kibana_node_name }}/{{ kibana_node_name }}.crt"
- "{{ master_certs_path }}/ca/ca.crt"
tags: xpack-security
when:
- kibana_xpack_security
- generate_CA
- name: Copying node's certificate from master (Custom CA)
copy:
src: "{{ item }}"
dest: "{{ node_certs_destination }}/"
owner: root
group: kibana
mode: 0440
with_items:
- "{{ master_certs_path }}/{{ kibana_node_name }}/{{ kibana_node_name }}.key"
- "{{ master_certs_path }}/{{ kibana_node_name }}/{{ kibana_node_name }}.crt"
- "{{ master_certs_path }}/ca/{{ ca_cert_name }}"
when:
- kibana_xpack_security
- not generate_CA
tags: xpack-security
- name: Ensuring certificates folder owner and permissions
file:
path: "{{ node_certs_destination }}/"
state: directory
recurse: no
owner: kibana
group: kibana
mode: 0770
when:
- kibana_xpack_security
notify: restart kibana
tags: xpack-security
- name: Kibana configuration
template:
src: kibana.yml.j2
dest: /etc/kibana/kibana.yml
owner: root
group: root
mode: 0644
notify: restart kibana
tags: configure
- name: Checking Wazuh-APP version
shell: >-
grep -c -E 'version.*{{ elastic_stack_version }}' /usr/share/kibana/plugins/wazuh/package.json
args:
executable: /bin/bash
removes: /usr/share/kibana/plugins/wazuh/package.json
register: wazuh_app_verify
changed_when: false
failed_when:
- wazuh_app_verify.rc != 0
- wazuh_app_verify.rc != 1
- name: Removing old Wazuh-APP
command: /usr/share/kibana/bin/kibana-plugin --allow-root remove wazuh
when: wazuh_app_verify.rc == 1
tags: install
- name: Removing bundles
file:
path: /usr/share/kibana/data/bundles
state: absent
when: wazuh_app_verify.rc == 1
tags: install
- name: Explicitly starting Kibana to generate "wazuh-"
service:
name: kibana
state: started
- name: Ensuring Kibana directory owner
file:
# noqa 208
path: "/usr/share/kibana"
state: directory
owner: kibana
group: kibana
recurse: yes
- name: Build and Install Wazuh Kibana Plugin from sources
import_tasks: build_wazuh_plugin.yml
when:
- build_from_sources is defined
- build_from_sources
- name: Install Wazuh Plugin (can take a while)
shell: >-
NODE_OPTIONS="{{ node_options }}" /usr/share/kibana/bin/kibana-plugin install
{{ wazuh_app_url }}-{{ wazuh_version }}_{{ elastic_stack_version }}-1.zip
args:
executable: /bin/bash
creates: /usr/share/kibana/plugins/wazuh/package.json
chdir: /usr/share/kibana
become: yes
become_user: kibana
notify: restart kibana
tags:
- install
- skip_ansible_lint
when:
- not build_from_sources
- name: Kibana optimization (can take a while)
shell: /usr/share/kibana/node/bin/node {{ node_options }} /usr/share/kibana/src/cli/cli.js --optimize -c {{ kibana_conf_path }}/kibana.yml
args:
executable: /bin/bash
creates: /usr/share/kibana/data/wazuh/
become: yes
become_user: kibana
tags:
- skip_ansible_lint
- name: Wait for Elasticsearch port
wait_for: host={{ elasticsearch_network_host }} port={{ elasticsearch_http_port }}
- name: Select correct API protocol
set_fact:
elastic_api_protocol: "{% if kibana_xpack_security %}https{% else %}http{% endif %}"
- name: Attempting to delete legacy Wazuh index if exists
uri:
url: "{{ elastic_api_protocol }}://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}/.wazuh"
method: DELETE
user: "{{ elasticsearch_xpack_security_user }}"
password: "{{ elasticsearch_xpack_security_password }}"
validate_certs: no
status_code: 200, 404
force_basic_auth: yes
- name: Create wazuh plugin config directory
file:
path: /usr/share/kibana/data/wazuh/config/
state: directory
recurse: yes
owner: kibana
group: kibana
mode: 0751
changed_when: False
- name: Configure Wazuh Kibana Plugin
template:
src: wazuh.yml.j2
dest: /usr/share/kibana/data/wazuh/config/wazuh.yml
owner: kibana
group: kibana
mode: 0751
changed_when: False
- name: Ensure Kibana is started and enabled
service:
name: kibana
enabled: true
state: started
- import_tasks: RMRedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: RMDebian.yml
when: ansible_os_family == 'Debian'

View File

@ -1,121 +0,0 @@
# {{ ansible_managed }}
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: {{ kibana_server_port }}
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: {{ kibana_server_host }}
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
# to Kibana. This setting cannot end in a slash.
#server.basePath: ""
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
#server.name: "your-hostname"
# The URL of the Elasticsearch instance to use for all your queries.
{% if kibana_xpack_security %}
elasticsearch.hosts: "https://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}"
{% else %}
elasticsearch.hosts: "http://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}"
{% endif %}
# When this setting's value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
#elasticsearch.preserveHost: true
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "discover"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "user"
#elasticsearch.password: "pass"
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
# files enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.cert: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
#elasticsearch.ssl.cert: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.ca: /path/to/your/CA.pem
# To disregard the validity of SSL certificates, change this setting's value to false.
#elasticsearch.ssl.verify: true
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 0
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
#elasticsearch.startupTimeout: 5000
# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid
# Enables you specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# Xpack Security
{% if kibana_xpack_security %}
elasticsearch.username: "{{ elasticsearch_xpack_security_user }}"
elasticsearch.password: "{{ elasticsearch_xpack_security_password }}"
server.ssl.enabled: true
server.ssl.key: "{{node_certs_destination}}/{{ kibana_node_name }}.key"
server.ssl.certificate: "{{node_certs_destination}}/{{ kibana_node_name }}.crt"
elasticsearch.ssl.verificationMode: "{{ kibana_ssl_verification_mode }}"
{% if generate_CA == true %}
elasticsearch.ssl.certificateAuthorities: ["{{ node_certs_destination }}/ca.crt"]
{% elif generate_CA == false %}
elasticsearch.ssl.certificateAuthorities: ["{{ node_certs_destination }}/{{ca_cert_name}}"]
{% endif %}
{% endif %}
server.defaultRoute: /app/wazuh

View File

@ -1,134 +0,0 @@
---
#
# Wazuh app - App configuration file
# Copyright (C) 2015-2019 Wazuh, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Find more information about this on the LICENSE file.
#
# ======================== Wazuh app configuration file ========================
#
# Please check the documentation for more information on configuration options:
# https://documentation.wazuh.com/current/installation-guide/index.html
#
# Also, you can check our repository:
# https://github.com/wazuh/wazuh-kibana-app
#
# ------------------------------- Index patterns -------------------------------
#
# Default index pattern to use.
#pattern: wazuh-alerts-4.x-*
#
# ----------------------------------- Checks -----------------------------------
#
# Defines which checks must to be consider by the healthcheck
# step once the Wazuh app starts. Values must to be true or false.
#checks.pattern : true
#checks.template: true
#checks.api : true
#checks.setup : true
#
# --------------------------------- Extensions ---------------------------------
#
# Defines which extensions should be activated when you add a new API entry.
# You can change them after Wazuh app starts.
# Values must to be true or false.
#extensions.pci : true
#extensions.gdpr : true
#extensions.hipaa : true
#extensions.nist : true
#extensions.audit : true
#extensions.oscap : false
#extensions.ciscat : false
#extensions.aws : false
#extensions.virustotal: false
#extensions.osquery : false
#extensions.docker : false
#
# ---------------------------------- Time out ----------------------------------
#
# Defines maximum timeout to be used on the Wazuh app requests.
# It will be ignored if it is bellow 1500.
# It means milliseconds before we consider a request as failed.
# Default: 20000
#timeout: 20000
#
# ------------------------------ Advanced indices ------------------------------
#
# Configure .wazuh indices shards and replicas.
#wazuh.shards : 1
#wazuh.replicas : 0
#
# --------------------------- Index pattern selector ---------------------------
#
# Defines if the user is allowed to change the selected
# index pattern directly from the Wazuh app top menu.
# Default: true
#ip.selector: true
#
# List of index patterns to be ignored
#ip.ignore: []
#
# -------------------------------- X-Pack RBAC ---------------------------------
#
# Custom setting to enable/disable built-in X-Pack RBAC security capabilities.
# Default: enabled
#xpack.rbac.enabled: true
#
# ------------------------------ wazuh-monitoring ------------------------------
#
# Custom setting to enable/disable wazuh-monitoring indices.
# Values: true, false, worker
# If worker is given as value, the app will show the Agents status
# visualization but won't insert data on wazuh-monitoring indices.
# Default: true
#wazuh.monitoring.enabled: true
#
# Custom setting to set the frequency for wazuh-monitoring indices cron task.
# Default: 900 (s)
#wazuh.monitoring.frequency: 900
#
# Configure wazuh-monitoring-4.x-* indices shards and replicas.
#wazuh.monitoring.shards: 2
#wazuh.monitoring.replicas: 0
#
# Configure wazuh-monitoring-4.x-* indices custom creation interval.
# Values: h (hourly), d (daily), w (weekly), m (monthly)
# Default: d
#wazuh.monitoring.creation: d
#
# Default index pattern to use for Wazuh monitoring
#wazuh.monitoring.pattern: wazuh-monitoring-4.x-*
#
#
# ------------------------------- App privileges --------------------------------
#admin: true
#
# ------------------------------- App logging level -----------------------------
# Set the logging level for the Wazuh App log files.
# Default value: info
# Allowed values: info, debug
#logs.level: info
#
#-------------------------------- API entries -----------------------------------
#The following configuration is the default structure to define an API entry.
#
#hosts:
# - <id>:
# url: http(s)://<url>
# port: <port>
# user: <user>
# password: <password>
hosts:
{% for api in wazuh_api_credentials %}
- {{ api['id'] }}:
url: {{ api['url'] }}
port: {{ api['port'] }}
username: {{ api['username'] }}
password: {{ api['password'] }}
{% endfor %}

View File

@ -1,38 +0,0 @@
Ansible Role: Filebeat for Elastic Stack
------------------------------------
An Ansible Role that installs [Filebeat](https://www.elastic.co/products/beats/filebeat), this can be used in conjunction with [ansible-wazuh-manager](https://github.com/wazuh/wazuh-ansible/ansible-wazuh-server).
Requirements
------------
This role will work on:
* Red Hat
* CentOS
* Fedora
* Debian
* Ubuntu
Role Variables
--------------
Available variables are listed below, along with default values (see `defaults/main.yml`):
```
filebeat_output_elasticsearch_hosts:
- "localhost:9200"
```
License and copyright
---------------------
WAZUH Copyright (C) 2021 Wazuh Inc. (License GPLv3)
### Based on previous work from geerlingguy
- https://github.com/geerlingguy/ansible-role-filebeat
### Modified by Wazuh
The playbooks have been modified by Wazuh, including some specific requirements, templates and configuration to improve integration with Wazuh ecosystem.

View File

@ -1,37 +0,0 @@
---
filebeat_version: 7.10.2
wazuh_template_branch: v4.3.0
filebeat_create_config: true
filebeat_node_name: node-1
filebeat_output_elasticsearch_hosts:
- "localhost:9200"
filebeat_module_package_url: https://packages.wazuh.com/4.x/filebeat
filebeat_module_package_name: wazuh-filebeat-0.1.tar.gz
filebeat_module_package_path: /tmp/
filebeat_module_destination: /usr/share/filebeat/module
filebeat_module_folder: /usr/share/filebeat/module/wazuh
# Xpack Security
filebeat_xpack_security: false
elasticsearch_xpack_security_user: elastic
elasticsearch_xpack_security_password: elastic_pass
node_certs_destination: /etc/filebeat/certs
# CA Generation
master_certs_path: "{{ playbook_dir }}/es_certs"
generate_CA: true
ca_cert_name: ""
elasticrepo:
apt: 'https://artifacts.elastic.co/packages/7.x/apt'
yum: 'https://artifacts.elastic.co/packages/7.x/yum'
gpg: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch'
key_id: '46095ACC8548582C1A2699A9D27D666CD88E42B4'

View File

@ -1,5 +0,0 @@
---
- name: restart filebeat
service:
name: filebeat
state: restarted

View File

@ -1,29 +0,0 @@
---
dependencies: []
galaxy_info:
author: Wazuh
description: Installing and maintaining filebeat server.
company: wazuh.com
license: license (GPLv3)
min_ansible_version: 2.0
platforms:
- name: EL
versions:
- 6
- 7
- name: Fedora
versions:
- all
- name: Debian
versions:
- jessie
- name: Ubuntu
versions:
- precise
- trusty
- xenial
galaxy_tags:
- web
- system
- monitoring

View File

@ -1,23 +0,0 @@
---
- name: Debian/Ubuntu | Install apt-transport-https, ca-certificates and acl
apt:
name:
- apt-transport-https
- ca-certificates
- acl
state: present
register: filebeat_ca_packages_install
until: filebeat_ca_packages_install is succeeded
- name: Debian/Ubuntu | Add Elasticsearch apt key.
apt_key:
url: "{{ elasticrepo.gpg }}"
id: "{{ elasticrepo.key_id }}"
state: present
- name: Debian/Ubuntu | Add Filebeat repository.
apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main"
state: present
update_cache: true
changed_when: false

View File

@ -1,6 +0,0 @@
---
- name: Debian/Ubuntu | Remove Filebeat repository (and clean up left-over metadata)
apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main"
state: absent
changed_when: false

View File

@ -1,6 +0,0 @@
---
- name: RedHat/CentOS/Fedora | Remove Filebeat repository (and clean up left-over metadata)
yum_repository:
name: elastic_repo_7
state: absent
changed_when: false

View File

@ -1,9 +0,0 @@
---
- name: RedHat/CentOS/Fedora/Amazon Linux | Install Filebeats repo
yum_repository:
name: elastic_repo_7
description: Elastic repository for 7.x packages
baseurl: "{{ elasticrepo.yum }}"
gpgkey: "{{ elasticrepo.gpg }}"
gpgcheck: true
changed_when: false

View File

@ -1,20 +0,0 @@
---
- name: Copy Filebeat configuration.
template:
src: filebeat.yml.j2
dest: "/etc/filebeat/filebeat.yml"
owner: root
group: root
mode: 0400
notify: restart filebeat
tags: configure
- name: Fetch latest Wazuh alerts template
get_url:
url: https://raw.githubusercontent.com/wazuh/wazuh/{{ wazuh_template_branch }}/extensions/elasticsearch/7.x/wazuh-template.json
dest: "/etc/filebeat/wazuh-template.json"
owner: root
group: root
mode: 0400
notify: restart filebeat
tags: configure

View File

@ -1,124 +0,0 @@
---
- include_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- include_tasks: Debian.yml
when: ansible_os_family == 'Debian'
- name: CentOS/RedHat | Install Filebeat.
package: name=filebeat-{{ filebeat_version }} state=present
register: filebeat_installing_package
until: filebeat_installing_package is succeeded
when:
- ansible_distribution in ['CentOS','RedHat', 'Amazon']
tags:
- install
- name: Debian/Ubuntu | Install Filebeat.
apt:
name: filebeat={{ filebeat_version }}
state: present
cache_valid_time: 3600
register: filebeat_installing_package_debian
until: filebeat_installing_package_debian is succeeded
when:
- not (ansible_distribution in ['CentOS','RedHat', 'Amazon'])
tags:
- init
- name: Copying node's certificate from master
copy:
src: "{{ item }}"
dest: "{{ node_certs_destination }}/"
owner: root
group: root
mode: 0440
with_items:
- "{{ master_certs_path }}/{{ filebeat_node_name }}/{{ filebeat_node_name }}.key"
- "{{ master_certs_path }}/{{ filebeat_node_name }}/{{ filebeat_node_name }}.crt"
- "{{ master_certs_path }}/ca/ca.crt"
when:
- generate_CA
- filebeat_xpack_security
tags: xpack-security
- name: Copying node's certificate from master (Custom CA)
copy:
src: "{{ item }}"
dest: "{{ node_certs_destination }}/"
owner: root
group: root
mode: 0440
with_items:
- "{{ master_certs_path }}/{{ filebeat_node_name }}/{{ filebeat_node_name }}.key"
- "{{ master_certs_path }}/{{ filebeat_node_name }}/{{ filebeat_node_name }}.crt"
- "{{ master_certs_path }}/ca/{{ ca_cert_name }}"
when:
- not generate_CA
- filebeat_xpack_security
tags: xpack-security
- name: Ensuring folder & certs permissions
file:
path: "{{ node_certs_destination }}/"
mode: 0770
state: directory
recurse: no
when:
- filebeat_xpack_security
tags: xpack-security
- name: Checking if Filebeat Module folder file exists
stat:
path: "{{ filebeat_module_folder }}"
register: filebeat_module_folder_info
- name: Download, uncompress and apply permissions for Filebeat
block:
- name: Download Filebeat module package
get_url:
url: "{{ filebeat_module_package_url }}/{{ filebeat_module_package_name }}"
dest: "{{ filebeat_module_package_path }}"
- name: Unpacking Filebeat module package
unarchive:
src: "{{ filebeat_module_package_path }}/{{ filebeat_module_package_name }}"
dest: "{{ filebeat_module_destination }}"
remote_src: yes
- name: Setting 0755 permission for Filebeat module folder
file:
path: "{{ filebeat_module_folder }}"
mode: 0755
recurse: yes
when: not filebeat_module_folder_info.stat.exists
- name: Checking if Filebeat Module package file exists
stat:
path: "{{ filebeat_module_package_path }}/{{ filebeat_module_package_name }}"
register: filebeat_module_package
when: filebeat_module_package is not defined
- name: Delete Filebeat module package file
file:
state: absent
path: "{{ filebeat_module_package_path }}/{{ filebeat_module_package_name }}"
when: filebeat_module_package.stat.exists
- import_tasks: config.yml
when: filebeat_create_config
notify: restart filebeat
- name: Ensure Filebeat is started and enabled at boot.
service:
name: filebeat
state: started
enabled: true
- include_tasks: "RMRedHat.yml"
when: ansible_os_family == "RedHat"
- include_tasks: "RMDebian.yml"
when: ansible_os_family == "Debian"

View File

@ -1,39 +0,0 @@
# Wazuh - Filebeat configuration file
# Wazuh - Filebeat configuration file
filebeat.modules:
- module: wazuh
alerts:
enabled: true
archives:
enabled: false
setup.template.json.enabled: true
setup.template.json.path: '/etc/filebeat/wazuh-template.json'
setup.template.json.name: 'wazuh'
setup.template.overwrite: true
setup.ilm.enabled: false
# Send events directly to Elasticsearch
output.elasticsearch:
hosts: {{ filebeat_output_elasticsearch_hosts | to_json }}
{% if filebeat_xpack_security %}
username: {{ elasticsearch_xpack_security_user }}
password: {{ elasticsearch_xpack_security_password }}
protocol: https
{% if generate_CA == true %}
ssl.certificate_authorities:
- {{node_certs_destination}}/ca.crt
{% elif generate_CA == false %}
ssl.certificate_authorities:
- {{node_certs_destination}}/{{ca_cert_name}}
{% endif %}
ssl.certificate: "{{node_certs_destination}}/{{ filebeat_node_name }}.crt"
ssl.key: "{{node_certs_destination}}/{{ filebeat_node_name }}.key"
{% endif %}
# Optional. Send events to Logstash instead of Elasticsearch
#output.logstash.hosts: ["YOUR_LOGSTASH_SERVER_IP:5000"]