Merge pull request #705 from wazuh/732-indexer-dashboard

Wazuh indexer and dashboard for 4.3
This commit is contained in:
Alberto Rodríguez 2022-03-04 16:25:38 +01:00 committed by GitHub
commit db468bf239
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 1247 additions and 1261 deletions

257
README.md
View File

@ -5,7 +5,7 @@
[![Documentation](https://img.shields.io/badge/docs-view-green.svg)](https://documentation.wazuh.com) [![Documentation](https://img.shields.io/badge/docs-view-green.svg)](https://documentation.wazuh.com)
[![Documentation](https://img.shields.io/badge/web-view-green.svg)](https://wazuh.com) [![Documentation](https://img.shields.io/badge/web-view-green.svg)](https://wazuh.com)
These playbooks install and configure Wazuh agent, manager and Elastic Stack. These playbooks install and configure Wazuh agent, manager and indexer and dashboard.
## Branches ## Branches
* `master` branch contains the latest code, be aware of possible bugs on this branch. * `master` branch contains the latest code, be aware of possible bugs on this branch.
@ -15,7 +15,7 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
| Wazuh version | Elastic | ODFE | | Wazuh version | Elastic | ODFE |
|---------------|---------|--------| |---------------|---------|--------|
| v4.3.0 | 7.10.2 | 1.13.2 | | v4.3.0 | | |
| v4.2.5 | 7.10.2 | 1.13.2 | | v4.2.5 | 7.10.2 | 1.13.2 |
| v4.2.4 | 7.10.2 | 1.13.2 | | v4.2.4 | 7.10.2 | 1.13.2 |
| v4.2.3 | 7.10.2 | 1.13.2 | | v4.2.3 | 7.10.2 | 1.13.2 |
@ -41,9 +41,9 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
│ │ │ ├── ansible-elasticsearch │ │ │ ├── ansible-elasticsearch
│ │ │ ├── ansible-kibana │ │ │ ├── ansible-kibana
│ │ │ │
│ │ ├── opendistro │ │ ├── opensearch
│ │ │ ├── opendistro-elasticsearch │ │ │ ├── wazuh-dashboard
│ │ │ ├── opendistro-kibana │ │ │ ├── wazuh-indexer
│ │ │ │
│ │ ├── wazuh │ │ ├── wazuh
│ │ │ ├── ansible-filebeat │ │ │ ├── ansible-filebeat
@ -60,10 +60,12 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
│ │ ├── wazuh-elastic_stack-distributed.yml │ │ ├── wazuh-elastic_stack-distributed.yml
│ │ ├── wazuh-elastic_stack-single.yml │ │ ├── wazuh-elastic_stack-single.yml
│ │ ├── wazuh-kibana.yml │ │ ├── wazuh-kibana.yml
│ │ ├── wazuh-manager.yml
│ │ ├── wazuh-manager-oss.yml │ │ ├── wazuh-manager-oss.yml
│ │ ├── wazuh-opendistro.yml │ │ ├── wazuh-manager.yml
│ │ ├── wazuh-opendistro-kibana.yml │ │ ├── wazuh-opensearch-opensearch_dashboards.yml
| | ├── wazuh-opensearch-production-ready
│ │ ├── wazuh-opensearch-single.yml
│ │ ├── wazuh-opensearch.yml
│ ├── README.md │ ├── README.md
│ ├── VERSION │ ├── VERSION
@ -73,89 +75,104 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
## Example: production-ready distributed environment ## Example: production-ready distributed environment
### Playbook ### Playbook
The hereunder example playbook uses the `wazuh-ansible` role to provision a production-ready Wazuh environment. The architecture includes 2 Wazuh nodes, 3 ODFE nodes and a mixed ODFE-Kibana node. The hereunder example playbook uses the `wazuh-ansible` role to provision a production-ready Wazuh environment. The architecture includes 2 Wazuh nodes, 3 Wazuh indexer nodes and a mixed Wazuh dashboard node.
```yaml ```yaml
--- ---
# Certificates generation # Certificates generation
- hosts: es1 - hosts: wi1
roles: roles:
- role: ../roles/opendistro/opendistro-elasticsearch - role: ../roles/wazuh/wazuh-indexer
elasticsearch_network_host: "{{ private_ip }}" indexer_network_host: "{{ private_ip }}"
elasticsearch_cluster_nodes: indexer_cluster_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
elasticsearch_discovery_nodes: indexer_discovery_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
perform_installation: false perform_installation: false
become: yes become: no
become_user: root
vars: vars:
elasticsearch_node_master: true indexer_node_master: true
instances: instances:
node1: node1:
name: node-1 # Important: must be equal to elasticsearch_node_name. name: node-1 # Important: must be equal to indexer_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. ip: "{{ hostvars.wi1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
role: indexer
node2: node2:
name: node-2 name: node-2
ip: "{{ hostvars.es2.private_ip }}" ip: "{{ hostvars.wi2.private_ip }}"
role: indexer
node3: node3:
name: node-3 name: node-3
ip: "{{ hostvars.es3.private_ip }}" ip: "{{ hostvars.wi3.private_ip }}"
role: indexer
node4: node4:
name: node-4 name: node-4
ip: "{{ hostvars.manager.private_ip }}" ip: "{{ hostvars.manager.private_ip }}"
role: wazuh
node_type: master
node5: node5:
name: node-5 name: node-5
ip: "{{ hostvars.worker.private_ip }}" ip: "{{ hostvars.worker.private_ip }}"
role: wazuh
node_type: worker
node6: node6:
name: node-6 name: node-6
ip: "{{ hostvars.kibana.private_ip }}" ip: "{{ hostvars.dashboard.private_ip }}"
role: dashboard
tags: tags:
- generate-certs - generate-certs
#ODFE Cluster # Wazuh indexer cluster
- hosts: odfe_cluster - hosts: wi_cluster
strategy: free strategy: free
roles: roles:
- role: ../roles/opendistro/opendistro-elasticsearch - role: ../roles/wazuh/wazuh-indexer
elasticsearch_network_host: "{{ private_ip }}" indexer_network_host: "{{ private_ip }}"
become: yes become: yes
become_user: root become_user: root
vars: vars:
elasticsearch_cluster_nodes: indexer_cluster_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
elasticsearch_discovery_nodes: indexer_discovery_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
elasticsearch_node_master: true indexer_node_master: true
instances: instances:
node1: node1:
name: node-1 # Important: must be equal to elasticsearch_node_name. name: node-1 # Important: must be equal to indexer_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. ip: "{{ hostvars.wi1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
role: indexer
node2: node2:
name: node-2 name: node-2
ip: "{{ hostvars.es2.private_ip }}" ip: "{{ hostvars.wi2.private_ip }}"
role: indexer
node3: node3:
name: node-3 name: node-3
ip: "{{ hostvars.es3.private_ip }}" ip: "{{ hostvars.wi3.private_ip }}"
role: indexer
node4: node4:
name: node-4 name: node-4
ip: "{{ hostvars.manager.private_ip }}" ip: "{{ hostvars.manager.private_ip }}"
role: wazuh
node_type: master
node5: node5:
name: node-5 name: node-5
ip: "{{ hostvars.worker.private_ip }}" ip: "{{ hostvars.worker.private_ip }}"
role: wazuh
node_type: worker
node6: node6:
name: node-6 name: node-6
ip: "{{ hostvars.kibana.private_ip }}" ip: "{{ hostvars.dashboard.private_ip }}"
role: dashboard
#Wazuh cluster # Wazuh cluster
- hosts: manager - hosts: manager
roles: roles:
- role: "../roles/wazuh/ansible-wazuh-manager" - role: "../roles/wazuh/ansible-wazuh-manager"
@ -180,10 +197,13 @@ The hereunder example playbook uses the `wazuh-ansible` role to provision a prod
nodes: nodes:
- "{{ hostvars.manager.private_ip }}" - "{{ hostvars.manager.private_ip }}"
hidden: 'no' hidden: 'no'
filebeat_output_elasticsearch_hosts: wazuh_api_users:
- "{{ hostvars.es1.private_ip }}" - username: custom-user
- "{{ hostvars.es2.private_ip }}" password: SecretPassword!
- "{{ hostvars.es3.private_ip }}" filebeat_output_indexer_hosts:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
- hosts: worker - hosts: worker
roles: roles:
@ -209,58 +229,67 @@ The hereunder example playbook uses the `wazuh-ansible` role to provision a prod
nodes: nodes:
- "{{ hostvars.manager.private_ip }}" - "{{ hostvars.manager.private_ip }}"
hidden: 'no' hidden: 'no'
filebeat_output_elasticsearch_hosts: filebeat_output_indexer_hosts:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
#ODFE+Kibana node # Indexer + dashboard node
- hosts: kibana - hosts: dashboard
roles: roles:
- role: "../roles/opendistro/opendistro-elasticsearch" - role: "../roles/wazuh/wazuh-indexer"
- role: "../roles/opendistro/opendistro-kibana" - role: "../roles/wazuh/wazuh-dashboard"
become: yes become: yes
become_user: root become_user: root
vars: vars:
elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}" indexer_network_host: "{{ hostvars.dashboard.private_ip }}"
elasticsearch_node_name: node-6 indexer_node_name: node-6
elasticsearch_node_master: false indexer_node_master: false
elasticsearch_node_ingest: false indexer_node_ingest: false
elasticsearch_node_data: false indexer_node_data: false
elasticsearch_cluster_nodes: indexer_cluster_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
elasticsearch_discovery_nodes: indexer_discovery_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.wi3.private_ip }}"
kibana_node_name: node-6 dashboard_node_name: node-6
wazuh_api_credentials: wazuh_api_credentials:
- id: default - id: default
url: https://{{ hostvars.manager.private_ip }} url: https://{{ hostvars.manager.private_ip }}
port: 55000 port: 55000
user: foo username: custom-user
password: bar password: SecretPassword!
instances: instances:
node1: node1:
name: node-1 # Important: must be equal to elasticsearch_node_name. name: node-1
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. ip: "{{ hostvars.wi1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
role: indexer
node2: node2:
name: node-2 name: node-2
ip: "{{ hostvars.es2.private_ip }}" ip: "{{ hostvars.wi2.private_ip }}"
role: indexer
node3: node3:
name: node-3 name: node-3
ip: "{{ hostvars.es3.private_ip }}" ip: "{{ hostvars.wi3.private_ip }}"
role: indexer
node4: node4:
name: node-4 name: node-4
ip: "{{ hostvars.manager.private_ip }}" ip: "{{ hostvars.manager.private_ip }}"
role: wazuh
node_type: master
node5: node5:
name: node-5 name: node-5
ip: "{{ hostvars.worker.private_ip }}" ip: "{{ hostvars.worker.private_ip }}"
role: wazuh
node_type: worker
node6: node6:
name: node-6 name: node-6
ip: "{{ hostvars.kibana.private_ip }}" ip: "{{ hostvars.dashboard.private_ip }}"
role: dashboard
ansible_shell_allow_world_readable_temp: true
``` ```
### Inventory file ### Inventory file
@ -271,17 +300,17 @@ The hereunder example playbook uses the `wazuh-ansible` role to provision a prod
- The ssh credentials used by Ansible during the provision can be specified in this file too. Another option is including them directly on the playbook. - The ssh credentials used by Ansible during the provision can be specified in this file too. Another option is including them directly on the playbook.
```ini ```ini
es1 ansible_host=<es1_ec2_public_ip> private_ip=<es1_ec2_private_ip> elasticsearch_node_name=node-1 wi1 ansible_host=<wi1_ec2_public_ip> private_ip=<wi1_ec2_private_ip> indexer_node_name=node-1
es2 ansible_host=<es2_ec2_public_ip> private_ip=<es2_ec2_private_ip> elasticsearch_node_name=node-2 wi2 ansible_host=<wi2_ec2_public_ip> private_ip=<wi2_ec2_private_ip> indexer_node_name=node-2
es3 ansible_host=<es3_ec2_public_ip> private_ip=<es3_ec2_private_ip> elasticsearch_node_name=node-3 wi3 ansible_host=<wi3_ec2_public_ip> private_ip=<wi3_ec2_private_ip> indexer_node_name=node-3
kibana ansible_host=<kibana_node_public_ip> private_ip=<kibana_ec2_private_ip> dashboard ansible_host=<dashboard_node_public_ip> private_ip=<dashboard_ec2_private_ip>
manager ansible_host=<manager_node_public_ip> private_ip=<manager_ec2_private_ip> manager ansible_host=<manager_node_public_ip> private_ip=<manager_ec2_private_ip>
worker ansible_host=<worker_node_public_ip> private_ip=<worker_ec2_private_ip> worker ansible_host=<worker_node_public_ip> private_ip=<worker_ec2_private_ip>
[odfe_cluster] [wi_cluster]
es1 wi1
es2 wi2
es3 wi3
[all:vars] [all:vars]
ansible_ssh_user=vagrant ansible_ssh_user=vagrant
@ -292,47 +321,63 @@ ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
### Launching the playbook ### Launching the playbook
```bash ```bash
ansible-playbook wazuh-odfe-production-ready.yml -i inventory sudo ansible-playbook wazuh-opensearch-production-ready.yml -i inventory
``` ```
After the playbook execution, the Wazuh UI should be reachable through `https://<kibana_host>:5601` After the playbook execution, the Wazuh UI should be reachable through `https://<dashboard_host>:5601`
## Example: single-host environment ## Example: single-host environment
### Playbook ### Playbook
The hereunder example playbook uses the `wazuh-ansible` role to provision a single-host Wazuh environment. This architecture includes all the Wazuh and ODFE components in a single node. The hereunder example playbook uses the `wazuh-ansible` role to provision a single-host Wazuh environment. This architecture includes all the Wazuh and Opensearch components in a single node.
```yaml ```yaml
--- ---
# Certificates generation
- hosts: aio
roles:
- role: ../roles/wazuh/wazuh-indexer
perform_installation: false
become: no
#become_user: root
vars:
indexer_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to indexer_node_name.
ip: 127.0.0.1
role: indexer
tags:
- generate-certs
# Single node # Single node
- hosts: server - hosts: aio
become: yes become: yes
become_user: root become_user: root
roles: roles:
- role: ../roles/opendistro/opendistro-elasticsearch - role: ../roles/wazuh/wazuh-indexer
- role: "../roles/wazuh/ansible-wazuh-manager" - role: ../roles/wazuh/ansible-wazuh-manager
- role: "../roles/wazuh/ansible-filebeat-oss" - role: ../roles/wazuh/ansible-filebeat-oss
- role: "../roles/opendistro/opendistro-kibana" - role: ../roles/wazuh/wazuh-dashboard
vars: vars:
single_node: true single_node: true
minimum_master_nodes: 1 minimum_master_nodes: 1
elasticsearch_node_master: true indexer_node_master: true
elasticsearch_network_host: <your server host> indexer_network_host: 127.0.0.1
filebeat_node_name: node-1 filebeat_node_name: node-1
filebeat_output_elasticsearch_hosts: <your server host> filebeat_output_indexer_hosts:
ansible_ssh_user: vagrant - 127.0.0.1
ansible_ssh_private_key_file: /path/to/ssh/key.pem
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
instances: instances:
node1: node1:
name: node-1 # Important: must be equal to elasticsearch_node_name. name: node-1 # Important: must be equal to indexer_node_name.
ip: <your server host> ip: 127.0.0.1
role: indexer
ansible_shell_allow_world_readable_temp: true
``` ```
### Inventory file ### Inventory file
```ini ```ini
[server] [aio]
<your server host> <your server host>
[all:vars] [all:vars]
@ -344,7 +389,7 @@ ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
### Launching the playbook ### Launching the playbook
```bash ```bash
ansible-playbook wazuh-odfe-single.yml -i inventory sudo ansible-playbook wazuh-opensearch-single.yml -i inventory
``` ```
After the playbook execution, the Wazuh UI should be reachable through `https://<your server host>:5601` After the playbook execution, the Wazuh UI should be reachable through `https://<your server host>:5601`

View File

@ -1,6 +1,6 @@
--- ---
- hosts: es1 - hosts: wi1
roles: roles:
- role: ../roles/opendistro/opendistro-kibana - role: ../roles/wazuh/wazuh-dashboard
vars: vars:
ansible_shell_allow_world_readable_temp: true ansible_shell_allow_world_readable_temp: true

View File

@ -1,17 +1,20 @@
--- ---
- hosts: es_cluster - hosts: wi_cluster
roles: roles:
- role: ../roles/opendistro/opendistro-elasticsearch - role: ../roles/wazuh/wazuh-indexer
vars: vars:
instances: # A certificate will be generated for every node using the name as CN. instances: # A certificate will be generated for every node using the name as CN.
node1: node1:
name: node-1 name: node-1
ip: <node-1 IP> ip: <node-1 IP>
role: indexer
node2: node2:
name: node-2 name: node-2
ip: <node-2 IP> ip: <node-2 IP>
role: indexer
node3: node3:
name: node-3 name: node-3
ip: <node-3 IP> ip: <node-3 IP>
role: indexer

View File

@ -3,7 +3,7 @@
roles: roles:
- role: ../roles/wazuh/ansible-wazuh-manager - role: ../roles/wazuh/ansible-wazuh-manager
- role: ../roles/wazuh/ansible-filebeat-oss - role: ../roles/wazuh/ansible-filebeat-oss
filebeat_output_elasticsearch_hosts: filebeat_output_indexer_hosts:
- "<elastic-node-1>:9200" - "<indexer-node-1>:9200"
- "<elastic-node-2>:9200" - "<indexer-node-2>:9200"
- "<elastic-node-2>:9200" - "<indexer-node-2>:9200"

View File

@ -1,189 +0,0 @@
---
# Certificates generation
- hosts: es1
roles:
- role: ../roles/opendistro/opendistro-elasticsearch
elasticsearch_network_host: "{{ private_ip }}"
elasticsearch_cluster_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
elasticsearch_discovery_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
perform_installation: false
become: yes
become_user: root
vars:
elasticsearch_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: "{{ hostvars.es2.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
tags:
- generate-certs
#ODFE Cluster
- hosts: odfe_cluster
strategy: free
roles:
- role: ../roles/opendistro/opendistro-elasticsearch
elasticsearch_network_host: "{{ private_ip }}"
become: yes
become_user: root
vars:
elasticsearch_cluster_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
elasticsearch_discovery_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
elasticsearch_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: "{{ hostvars.es2.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
#Wazuh cluster
- hosts: manager
roles:
- role: "../roles/wazuh/ansible-wazuh-manager"
- role: "../roles/wazuh/ansible-filebeat-oss"
filebeat_node_name: node-4
become: yes
become_user: root
vars:
wazuh_manager_config:
connection:
- type: 'secure'
port: '1514'
protocol: 'tcp'
queue_size: 131072
api:
https: 'yes'
cluster:
disable: 'no'
node_name: 'master'
node_type: 'master'
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
nodes:
- "{{ hostvars.manager.private_ip }}"
hidden: 'no'
wazuh_api_users:
- username: custom-user
password: .S3cur3Pa55w0rd*-
filebeat_output_elasticsearch_hosts:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
- hosts: worker
roles:
- role: "../roles/wazuh/ansible-wazuh-manager"
- role: "../roles/wazuh/ansible-filebeat-oss"
filebeat_node_name: node-5
become: yes
become_user: root
vars:
wazuh_manager_config:
connection:
- type: 'secure'
port: '1514'
protocol: 'tcp'
queue_size: 131072
api:
https: 'yes'
cluster:
disable: 'no'
node_name: 'worker_01'
node_type: 'worker'
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
nodes:
- "{{ hostvars.manager.private_ip }}"
hidden: 'no'
filebeat_output_elasticsearch_hosts:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
#ODFE+Kibana node
- hosts: kibana
roles:
- role: "../roles/opendistro/opendistro-elasticsearch"
- role: "../roles/opendistro/opendistro-kibana"
become: yes
become_user: root
vars:
elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}"
elasticsearch_node_name: node-6
elasticsearch_node_master: false
elasticsearch_node_ingest: false
elasticsearch_node_data: false
elasticsearch_cluster_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
elasticsearch_discovery_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
kibana_node_name: node-6
wazuh_api_credentials:
- id: default
url: https://{{ hostvars.manager.private_ip }}
port: 55000
username: custom-user
password: .S3cur3Pa55w0rd*-
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: "{{ hostvars.es2.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
ansible_shell_allow_world_readable_temp: true

View File

@ -1,22 +0,0 @@
---
# Single node
- hosts: <your server host>
become: yes
become_user: root
roles:
- role: ../roles/opendistro/opendistro-elasticsearch
- role: ../roles/wazuh/ansible-wazuh-manager
- role: ../roles/wazuh/ansible-filebeat-oss
- role: ../roles/opendistro/opendistro-kibana
vars:
single_node: true
minimum_master_nodes: 1
elasticsearch_node_master: true
elasticsearch_network_host: 127.0.0.1
filebeat_node_name: node-1
filebeat_output_elasticsearch_hosts: 127.0.0.1
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: 127.0.0.1
ansible_shell_allow_world_readable_temp: true

View File

@ -0,0 +1,212 @@
---
# Certificates generation
- hosts: wi1
roles:
- role: ../roles/wazuh/wazuh-indexer
indexer_network_host: "{{ private_ip }}"
indexer_cluster_nodes:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
indexer_discovery_nodes:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
perform_installation: false
become: no
vars:
indexer_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to indexer_node_name.
ip: "{{ hostvars.wi1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
role: indexer
node2:
name: node-2
ip: "{{ hostvars.wi2.private_ip }}"
role: indexer
node3:
name: node-3
ip: "{{ hostvars.wi3.private_ip }}"
role: indexer
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
role: wazuh
node_type: master
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
role: wazuh
node_type: worker
node6:
name: node-6
ip: "{{ hostvars.dashboard.private_ip }}"
role: dashboard
tags:
- generate-certs
# Wazuh indexer cluster
- hosts: wi_cluster
strategy: free
roles:
- role: ../roles/wazuh/wazuh-indexer
indexer_network_host: "{{ private_ip }}"
become: yes
become_user: root
vars:
indexer_cluster_nodes:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
indexer_discovery_nodes:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
indexer_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to indexer_node_name.
ip: "{{ hostvars.wi1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
role: indexer
node2:
name: node-2
ip: "{{ hostvars.wi2.private_ip }}"
role: indexer
node3:
name: node-3
ip: "{{ hostvars.wi3.private_ip }}"
role: indexer
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
role: wazuh
node_type: master
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
role: wazuh
node_type: worker
node6:
name: node-6
ip: "{{ hostvars.dashboard.private_ip }}"
role: dashboard
# Wazuh cluster
- hosts: manager
roles:
- role: "../roles/wazuh/ansible-wazuh-manager"
- role: "../roles/wazuh/ansible-filebeat-oss"
filebeat_node_name: node-4
become: yes
become_user: root
vars:
wazuh_manager_config:
connection:
- type: 'secure'
port: '1514'
protocol: 'tcp'
queue_size: 131072
api:
https: 'yes'
cluster:
disable: 'no'
node_name: 'master'
node_type: 'master'
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
nodes:
- "{{ hostvars.manager.private_ip }}"
hidden: 'no'
wazuh_api_users:
- username: custom-user
password: SecretPassword1!
filebeat_output_indexer_hosts:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
- hosts: worker
roles:
- role: "../roles/wazuh/ansible-wazuh-manager"
- role: "../roles/wazuh/ansible-filebeat-oss"
filebeat_node_name: node-5
become: yes
become_user: root
vars:
wazuh_manager_config:
connection:
- type: 'secure'
port: '1514'
protocol: 'tcp'
queue_size: 131072
api:
https: 'yes'
cluster:
disable: 'no'
node_name: 'worker_01'
node_type: 'worker'
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
nodes:
- "{{ hostvars.manager.private_ip }}"
hidden: 'no'
filebeat_output_indexer_hosts:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
# Indexer + dashboard node
- hosts: dashboard
roles:
- role: "../roles/wazuh/wazuh-indexer"
- role: "../roles/wazuh/wazuh-dashboard"
become: yes
become_user: root
vars:
indexer_network_host: "{{ hostvars.dashboard.private_ip }}"
indexer_node_name: node-6
indexer_node_master: false
indexer_node_ingest: false
indexer_node_data: false
indexer_cluster_nodes:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
indexer_discovery_nodes:
- "{{ hostvars.wi1.private_ip }}"
- "{{ hostvars.wi2.private_ip }}"
- "{{ hostvars.wi3.private_ip }}"
dashboard_node_name: node-6
wazuh_api_credentials:
- id: default
url: https://{{ hostvars.manager.private_ip }}
port: 55000
username: custom-user
password: SecretPassword!
instances:
node1:
name: node-1
ip: "{{ hostvars.wi1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
role: indexer
node2:
name: node-2
ip: "{{ hostvars.wi2.private_ip }}"
role: indexer
node3:
name: node-3
ip: "{{ hostvars.wi3.private_ip }}"
role: indexer
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
role: wazuh
node_type: master
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
role: wazuh
node_type: worker
node6:
name: node-6
ip: "{{ hostvars.dashboard.private_ip }}"
role: dashboard
ansible_shell_allow_world_readable_temp: true

View File

@ -0,0 +1,40 @@
---
# Certificates generation
- hosts: aio
roles:
- role: ../roles/wazuh/wazuh-indexer
perform_installation: false
become: no
#become_user: root
vars:
indexer_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to indexer_node_name.
ip: 127.0.0.1
role: indexer
tags:
- generate-certs
# Single node
- hosts: aio
become: yes
become_user: root
roles:
- role: ../roles/wazuh/wazuh-indexer
- role: ../roles/wazuh/ansible-wazuh-manager
- role: ../roles/wazuh/ansible-filebeat-oss
- role: ../roles/wazuh/wazuh-dashboard
vars:
single_node: true
minimum_master_nodes: 1
indexer_node_master: true
indexer_network_host: 127.0.0.1
filebeat_node_name: node-1
filebeat_output_indexer_hosts:
- 127.0.0.1
instances:
node1:
name: node-1 # Important: must be equal to indexer_node_name.
ip: 127.0.0.1
role: indexer
ansible_shell_allow_world_readable_temp: true

View File

@ -1,69 +0,0 @@
---
# Cluster Settings
opendistro_version: 1.13.2
single_node: false
elasticsearch_node_name: node-1
opendistro_cluster_name: wazuh
elasticsearch_network_host: '0.0.0.0'
elasticsearch_node_master: true
elasticsearch_node_data: true
elasticsearch_node_ingest: true
elasticsearch_start_timeout: 90
elasticsearch_lower_disk_requirements: false
elasticsearch_cluster_nodes:
- 127.0.0.1
elasticsearch_discovery_nodes:
- 127.0.0.1
local_certs_path: "{{ playbook_dir }}/opendistro/certificates"
# Minimum master nodes in cluster, 2 for 3 nodes elasticsearch cluster
minimum_master_nodes: 2
# Configure hostnames for Elasticsearch nodes
# Example es1.example.com, es2.example.com
domain_name: wazuh.com
# The OpenDistro package repository
package_repos:
yum:
opendistro:
baseurl: 'https://packages.wazuh.com/4.x/yum/'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
apt:
opendistro:
baseurl: 'deb https://packages.wazuh.com/4.x/apt/ stable main'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
openjdk:
baseurl: 'deb http://deb.debian.org/debian stretch-backports main'
opendistro_sec_plugin_conf_path: /usr/share/elasticsearch/plugins/opendistro_security/securityconfig
opendistro_sec_plugin_tools_path: /usr/share/elasticsearch/plugins/opendistro_security/tools
opendistro_conf_path: /etc/elasticsearch/
# Security password
opendistro_custom_user: ""
opendistro_custom_user_role: "admin"
# Set JVM memory limits
opendistro_jvm_xms: null
opendistro_http_port: 9200
certs_gen_tool_version: 1.8
# Url of Search Guard certificates generator tool
certs_gen_tool_url: "https://search.maven.org/remotecontent?filepath=com/floragunn/search-guard-tlstool/{{ certs_gen_tool_version }}/search-guard-tlstool-{{ certs_gen_tool_version }}.zip"
opendistro_admin_password: changeme
opendistro_kibana_password: changeme
# Deployment settings
generate_certs: true
perform_installation: true
opendistro_nolog_sensible: true

View File

@ -1,5 +0,0 @@
---
- name: restart elasticsearch
service:
name: elasticsearch
state: restarted

View File

@ -1,87 +0,0 @@
---
- name: Check if certificates already exists
stat:
path: "{{ local_certs_path }}"
register: certificates_folder
delegate_to: localhost
become: no
tags:
- generate-certs
- block:
- name: Local action | Create local temporary directory for certificates generation
file:
path: "{{ local_certs_path }}"
mode: 0755
state: directory
- name: Local action | Check that the generation tool exists
stat:
path: "{{ local_certs_path }}/search-guard-tlstool-{{ certs_gen_tool_version }}.zip"
register: tool_package
- name: Local action | Download certificates generation tool
get_url:
url: "{{ certs_gen_tool_url }}"
dest: "{{ local_certs_path }}/search-guard-tlstool-{{ certs_gen_tool_version }}.zip"
when: not tool_package.stat.exists
- name: Local action | Extract the certificates generation tool
unarchive:
src: "{{ local_certs_path }}/search-guard-tlstool-{{ certs_gen_tool_version }}.zip"
dest: "{{ local_certs_path }}/"
- name: Local action | Add the execution bit to the binary
file:
dest: "{{ local_certs_path }}/tools/sgtlstool.sh"
mode: a+x
- name: Local action | Prepare the certificates generation template file
template:
src: "templates/tlsconfig.yml.j2"
dest: "{{ local_certs_path }}/config/tlsconfig.yml"
mode: 0644
register: tlsconfig_template
- name: Create a directory if it does not exist
file:
path: "{{ local_certs_path }}/certs/"
state: directory
mode: '0755'
- name: Local action | Check if root CA file exists
stat:
path: "{{ local_certs_path }}/certs/root-ca.key"
register: root_ca_file
- name: Local action | Generate the node & admin certificates in local
command: >-
{{ local_certs_path }}/tools/sgtlstool.sh
-c {{ local_certs_path }}/config/tlsconfig.yml
-ca -crt
-t {{ local_certs_path }}/certs/
-f -o
when:
- not root_ca_file.stat.exists
- tlsconfig_template.changed
- name: Local action | Generate the node & admin certificates using an existing root CA
command: >-
{{ local_certs_path }}/tools/sgtlstool.sh
-c {{ local_certs_path }}/config/tlsconfig.yml
-crt
-t {{ local_certs_path }}/certs/
-f
when:
- root_ca_file.stat.exists
- tlsconfig_template.changed
run_once: true
delegate_to: localhost
become: no
tags:
- generate-certs
when:
- not certificates_folder.stat.exists

View File

@ -1,129 +0,0 @@
---
- import_tasks: local_actions.yml
when:
- generate_certs
- block:
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == 'Debian'
- name: Remove performance analyzer plugin from elasticsearch
become: true
command: ./elasticsearch-plugin remove opendistro-performance-analyzer
ignore_errors: true
args:
chdir: /usr/share/elasticsearch/bin/
register: remove_elasticsearch_performance_analyzer
failed_when:
- remove_elasticsearch_performance_analyzer.rc != 0
- '"not found" not in remove_elasticsearch_performance_analyzer.stderr'
changed_when: "remove_elasticsearch_performance_analyzer.rc == 0"
- name: Remove elasticsearch configuration file
file:
path: "{{ opendistro_conf_path }}/elasticsearch.yml"
state: absent
tags: install
- name: Copy Configuration File
blockinfile:
block: "{{ lookup('template', 'elasticsearch.yml.j2') }}"
dest: "{{ opendistro_conf_path }}/elasticsearch.yml"
create: true
group: elasticsearch
mode: 0640
marker: "## {mark} Opendistro general settings ##"
tags: install
- include_tasks: security_actions.yml
tags:
- security
- name: Configure OpenDistro Elasticsearch JVM memmory.
template:
src: "templates/jvm.options.j2"
dest: /etc/elasticsearch/jvm.options
owner: root
group: elasticsearch
mode: 0644
force: yes
notify: restart elasticsearch
tags: install
- name: Configure disabled log4j.
template:
src: "templates/disabledlog4j.options.j2"
dest: /etc/elasticsearch/jvm.options.d/disabledlog4j.options
owner: root
group: elasticsearch
mode: 2750
force: yes
notify: restart elasticsearch
tags: install
- name: Ensure extra time for Elasticsearch to start on reboots
lineinfile:
path: /usr/lib/systemd/system/elasticsearch.service
regexp: '^TimeoutStartSec='
line: "TimeoutStartSec={{ elasticsearch_start_timeout }}"
become: yes
tags: configure
- name: Ensure Elasticsearch started and enabled
service:
name: elasticsearch
enabled: true
state: started
- name: Wait for Elasticsearch API
uri:
url: "https://{{ inventory_hostname if not single_node else elasticsearch_network_host }}:{{ opendistro_http_port }}/_cluster/health/"
user: "admin" # Default OpenDistro user is always "admin"
password: "{{ opendistro_admin_password }}"
validate_certs: no
status_code: 200,401
return_content: yes
timeout: 4
register: _result
until:
- _result.json is defined
- _result.json.status == "green" or ( _result.json.status == "yellow" and single_node )
retries: 24
delay: 5
tags: debug
when:
- hostvars[inventory_hostname]['private_ip'] is not defined or not hostvars[inventory_hostname]['private_ip']
- name: Wait for Elasticsearch API (Private IP)
uri:
url: "https://{{ hostvars[inventory_hostname]['private_ip'] if not single_node else elasticsearch_network_host }}:{{ opendistro_http_port }}/_cluster/health/"
user: "admin" # Default OpenDistro user is always "admin"
password: "{{ opendistro_admin_password }}"
validate_certs: no
status_code: 200,401
return_content: yes
timeout: 4
register: _result
until:
- _result.json is defined
- _result.json.status == "green" or ( _result.json.status == "yellow" and single_node )
retries: 24
delay: 5
tags: debug
when:
- hostvars[inventory_hostname]['private_ip'] is defined and hostvars[inventory_hostname]['private_ip']
- import_tasks: "RMRedHat.yml"
when: ansible_os_family == "RedHat"
- name: Reload systemd configuration
systemd:
daemon_reload: true
become: yes
notify: restart elasticsearch
when: perform_installation

View File

@ -1,129 +0,0 @@
- name: Remove demo certs
file:
path: "{{ item }}"
state: absent
with_items:
- "{{ opendistro_conf_path }}/kirk.pem"
- "{{ opendistro_conf_path }}/kirk-key.pem"
- "{{ opendistro_conf_path }}/esnode.pem"
- "{{ opendistro_conf_path }}/esnode-key.pem"
- name: Configure IP (Private address)
set_fact:
target_address: "{{ hostvars[inventory_hostname]['private_ip'] if not single_node else elasticsearch_network_host }}"
when:
- hostvars[inventory_hostname]['private_ip'] is defined
- name: Configure IP (Public address)
set_fact:
target_address: "{{ inventory_hostname if not single_node else elasticsearch_network_host }}"
when:
- hostvars[inventory_hostname]['private_ip'] is not defined
- name: Copy the node & admin certificates to Elasticsearch cluster
copy:
src: "{{ local_certs_path }}/certs/{{ item }}"
dest: /etc/elasticsearch/
mode: 0644
with_items:
- root-ca.pem
- root-ca.key
- "{{ elasticsearch_node_name }}.key"
- "{{ elasticsearch_node_name }}.pem"
- "{{ elasticsearch_node_name }}_http.key"
- "{{ elasticsearch_node_name }}_http.pem"
- "{{ elasticsearch_node_name }}_elasticsearch_config_snippet.yml"
- admin.key
- admin.pem
- name: Copy the OpenDistro security configuration file to cluster
blockinfile:
block: "{{ lookup('file', snippet_path ) }}"
dest: "{{ opendistro_conf_path }}/elasticsearch.yml"
insertafter: EOF
marker: "## {mark} Opendistro Security Node & Admin certificates configuration ##"
vars:
snippet_path: '{{ local_certs_path }}/certs/{{ elasticsearch_node_name }}_elasticsearch_config_snippet.yml'
- name: Prepare the OpenDistro security configuration file
replace:
path: "{{ opendistro_conf_path }}/elasticsearch.yml"
regexp: 'searchguard'
replace: 'opendistro_security'
tags: local
- name: Restart elasticsearch with security configuration
systemd:
name: elasticsearch
state: restarted
- name: Copy the OpenDistro security internal users template
template:
src: "templates/internal_users.yml.j2"
dest: "{{ opendistro_sec_plugin_conf_path }}/internal_users.yml"
mode: 0644
run_once: true
- name: Hashing the custom admin password
command: "{{ opendistro_sec_plugin_tools_path }}/hash.sh -p {{ opendistro_admin_password }}" # noqa 301
register: opendistro_admin_password_hashed
no_log: '{{ opendistro_nolog_sensible | bool }}'
run_once: true
- name: Set the Admin user password
replace:
path: "{{ opendistro_sec_plugin_conf_path }}/internal_users.yml"
regexp: '(?<=admin:\n hash: )(.*)(?=)'
replace: "{{ odfe_password_hash | quote }}"
vars:
odfe_password_hash: "{{ opendistro_admin_password_hashed.stdout_lines | last }}"
run_once: true
# this can also be achieved with password_hash, but it requires dependencies on the controller
- name: Hash the kibanaserver role/user pasword
command: "{{ opendistro_sec_plugin_tools_path }}/hash.sh -p {{ opendistro_kibana_password }}" # noqa 301
register: opendistro_kibanaserver_password_hashed
no_log: '{{ opendistro_nolog_sensible | bool }}'
run_once: true
- name: Set the kibanaserver user password
replace:
path: "{{ opendistro_sec_plugin_conf_path }}/internal_users.yml"
regexp: '(?<=kibanaserver:\n hash: )(.*)(?=)'
replace: "{{ odfe_password_hash | quote }}"
vars:
odfe_password_hash: "{{ opendistro_kibanaserver_password_hashed.stdout_lines | last }}"
run_once: true
- name: Initialize the OpenDistro security index in elasticsearch
command: >
{{ opendistro_sec_plugin_tools_path }}/securityadmin.sh
-cacert {{ opendistro_conf_path }}/root-ca.pem
-cert {{ opendistro_conf_path }}/admin.pem
-key {{ opendistro_conf_path }}/admin.key
-cd {{ opendistro_sec_plugin_conf_path }}/
-nhnv -icl
-h {{ target_address }}
run_once: true # noqa 301
- name: Create custom user
uri:
url: "https://{{ target_address }}:{{ opendistro_http_port }}/_opendistro/_security/api/internalusers/{{ opendistro_custom_user }}"
method: PUT
user: "admin" # Default OpenDistro user is always "admin"
password: "{{ opendistro_admin_password }}"
body: |
{
"password": "{{ opendistro_admin_password }}",
"backend_roles": ["{{ opendistro_custom_user_role }}"]
}
body_format: json
validate_certs: no
status_code: 200,201,401
return_content: yes
timeout: 4
when:
- opendistro_custom_user is defined and opendistro_custom_user

View File

@ -1,44 +0,0 @@
cluster.name: {{ opendistro_cluster_name }}
node.name: {{ elasticsearch_node_name }}
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: {{ elasticsearch_network_host }}
node.master: {{ elasticsearch_node_master|lower }}
{% if single_node == true %}
discovery.type: single-node
{% else %}
cluster.initial_master_nodes:
{% for item in elasticsearch_cluster_nodes %}
- {{ item }}
{% endfor %}
discovery.seed_hosts:
{% for item in elasticsearch_discovery_nodes %}
- {{ item }}
{% endfor %}
{% endif %}
{% if elasticsearch_node_data|lower == 'false' %}
node.data: false
{% endif %}
{% if elasticsearch_node_ingest|lower == 'false' %}
node.ingest: false
{% endif %}
{% if elasticsearch_lower_disk_requirements %}
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.flood_stage: 200mb
cluster.routing.allocation.disk.watermark.low: 500mb
cluster.routing.allocation.disk.watermark.high: 300mb
{% endif %}
discovery.zen.minimum_master_nodes: "{{ minimum_master_nodes }}"
opendistro_security.allow_default_init_securityindex: true
opendistro_security.audit.type: internal_elasticsearch
opendistro_security.enable_snapshot_restore_privilege: true
opendistro_security.check_snapshot_restore_write_privileges: true
opendistro_security.restapi.roles_enabled: ["all_access", "security_rest_api_access"]

View File

@ -1,60 +0,0 @@
---
# Kibana configuration
elasticsearch_http_port: 9200
elastic_api_protocol: https
kibana_conf_path: /etc/kibana
kibana_node_name: node-1
kibana_server_host: "0.0.0.0"
kibana_server_port: "5601"
kibana_server_name: "kibana"
kibana_max_payload_bytes: 1048576
elastic_stack_version: 7.10.2
wazuh_version: 4.3.0
wazuh_app_url: https://packages.wazuh.com/4.x/ui/kibana/wazuh_kibana
# The OpenDistro package repository
kibana_opendistro_version: 1.13.2-1 # Version includes the - for RedHat family compatibility, replace with = for Debian hosts
package_repos:
yum:
opendistro:
baseurl: 'https://packages.wazuh.com/4.x/yum/'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
apt:
opendistro:
baseurl: 'deb https://packages.wazuh.com/4.x/apt/ stable main'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
# API credentials
wazuh_api_credentials:
- id: "default"
url: "https://localhost"
port: 55000
username: "wazuh"
password: "wazuh"
# opendistro Security
kibana_opendistro_security: true
kibana_newsfeed_enabled: "false"
kibana_telemetry_optin: "false"
kibana_telemetry_enabled: "false"
opendistro_admin_password: changeme
opendistro_kibana_user: kibanaserver
opendistro_kibana_password: changeme
local_certs_path: "{{ playbook_dir }}/opendistro/certificates"
# Nodejs
nodejs:
repo_dict:
debian: "deb"
redhat: "rpm"
repo_url_ext: "nodesource.com/setup_10.x"
# Build from sources
build_from_sources: false
wazuh_plugin_branch: 4.1-7.10
#Nodejs NODE_OPTIONS
node_options: --no-warnings --max-old-space-size=2048 --max-http-header-size=65536

View File

@ -1,3 +0,0 @@
---
- name: restart kibana
service: name=kibana state=restarted

View File

@ -1,23 +0,0 @@
---
- block:
- include_vars: debian.yml
- name: Add apt repository signing key
apt_key:
url: "{{ package_repos.apt.opendistro.gpg }}"
state: present
- name: Debian systems | Add OpenDistro repo
apt_repository:
repo: "{{ package_repos.apt.opendistro.baseurl }}"
state: present
update_cache: yes
- name: Install Kibana
apt:
name: "opendistroforelasticsearch-kibana={{ kibana_opendistro_version }}"
state: present
register: install
tags:
- install

View File

@ -1,6 +0,0 @@
---
- name: Remove Elasticsearch repository (and clean up left-over metadata)
yum_repository:
name: opendistro_repo
state: absent
changed_when: false

View File

@ -1,20 +0,0 @@
---
- block:
- name: RedHat/CentOS/Fedora | Add OpenDistro repo
yum_repository:
file: opendistro
name: opendistro_repo
description: Opendistro yum repository
baseurl: "{{ package_repos.yum.opendistro.baseurl }}"
gpgkey: "{{ package_repos.yum.opendistro.gpg }}"
gpgcheck: true
- name: Install Kibana
package:
name: "opendistroforelasticsearch-kibana-{{ kibana_opendistro_version }}"
state: present
register: install
tags:
- install

View File

@ -1,76 +0,0 @@
---
- name: Ensure the Git package is present
package:
name: git
state: present
- name: Modify repo url if host is in Debian family
set_fact:
node_js_repo_type: deb
when:
- ansible_os_family | lower == "debian"
- name: Download script to install Nodejs repository
get_url:
url: "https://{{ nodejs['repo_dict'][ansible_os_family|lower] }}.{{ nodejs['repo_url_ext'] }}"
dest: "/tmp/setup_nodejs_repo.sh"
mode: 0700
- name: Execute downloaded script to install Nodejs repo
command: /tmp/setup_nodejs_repo.sh
register: node_repo_installation_result
changed_when: false
- name: Install Nodejs
package:
name: nodejs
state: present
- name: Install yarn dependency to build the Wazuh Kibana Plugin
# Using shell due to errors when evaluating text between @ with command
shell: "npm install -g {{ 'yarn' }}{{ '@' }}{{ '1.10.1'}}" # noqa 305
register: install_yarn_result
changed_when: install_yarn_result == 0
- name: Remove old wazuh-kibana-app git directory
file:
path: /tmp/app
state: absent
changed_when: false
- name: Clone wazuh-kibana-app repository # Using command as git module doesn't cover single-branch nor depth
command: git clone https://github.com/wazuh/wazuh-kibana-app -b {{ wazuh_plugin_branch }} --single-branch --depth=1 app # noqa 303
register: clone_app_repo_result
changed_when: false
args:
chdir: "/tmp"
- name: Executing yarn to build the package
command: "{{ item }}"
with_items:
- "yarn"
- "yarn build"
register: yarn_execution_result
changed_when: false
args:
chdir: "/tmp/app/"
- name: Obtain name of generated package
shell: "find ./ -name 'wazuh-*.zip' -printf '%f\\n'"
register: wazuhapp_package_name
changed_when: false
args:
chdir: "/tmp/app/build"
- name: Install Wazuh Plugin (can take a while)
shell: NODE_OPTIONS="{{ node_options }}" /usr/share/kibana/bin/kibana-plugin install file:///tmp/app/build/{{ wazuhapp_package_name.stdout }}
args:
executable: /bin/bash
creates: /usr/share/kibana/plugins/wazuh/package.json
chdir: /usr/share/kibana
become: yes
become_user: kibana
notify: restart kibana
tags:
- install
- skip_ansible_lint

View File

@ -1,124 +0,0 @@
---
- name: Stopping early, trying to compile Wazuh Kibana Plugin on Debian 10 is not possible
fail:
msg: "It's not possible to compile the Wazuh Kibana plugin on Debian 10 due to: https://github.com/wazuh/wazuh-kibana-app/issues/1924"
when:
- build_from_sources
- ansible_distribution == "Debian"
- ansible_distribution_major_version == "10"
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == 'Debian'
- name: Remove Kibana configuration file
file:
# noqa 503
path: "{{ kibana_conf_path }}/kibana.yml"
state: absent
tags: install
- import_tasks: security_actions.yml
- name: Copy Configuration File
blockinfile:
block: "{{ lookup('template', 'opendistro_kibana.yml.j2') }}"
dest: "{{ kibana_conf_path }}/kibana.yml"
create: true
group: kibana
owner: kibana
mode: 0640
marker: "## {mark} Kibana general settings ##"
notify: restart kibana
tags:
- install
- configure
- name: Ensuring Kibana directory owner
file:
# noqa 208
path: "/usr/share/kibana"
state: directory
owner: kibana
group: kibana
recurse: yes
- name: Build and Install Wazuh Kibana Plugin from sources
import_tasks: build_wazuh_plugin.yml
when:
- build_from_sources is defined
- build_from_sources
- name: Install Wazuh Plugin (can take a while)
shell: >-
NODE_OPTIONS="{{ node_options }}" /usr/share/kibana/bin/kibana-plugin install
{{ wazuh_app_url }}-{{ wazuh_version }}_{{ elastic_stack_version }}-1.zip
args:
executable: /bin/bash
creates: /usr/share/kibana/plugins/wazuh/package.json
chdir: /usr/share/kibana
become: yes
become_user: kibana
notify: restart kibana
tags:
- install
- skip_ansible_lint
when:
- not build_from_sources
- name: Kibana optimization (can take a while)
shell: /usr/share/kibana/node/bin/node {{ node_options }} /usr/share/kibana/src/cli/cli.js --optimize -c {{ kibana_conf_path }}/kibana.yml
args:
executable: /bin/bash
become: yes
become_user: kibana
changed_when: false
tags:
- skip_ansible_lint
- name: Wait for Elasticsearch port
wait_for: host={{ elasticsearch_network_host }} port={{ elasticsearch_http_port }}
- name: Select correct API protocol
set_fact:
elastic_api_protocol: "{% if kibana_opendistro_security is defined and kibana_opendistro_security %}https{% else %}http{% endif %}"
- name: Attempting to delete legacy Wazuh index if exists
uri:
url: "{{ elastic_api_protocol }}://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}/.wazuh"
method: DELETE
user: "admin"
password: "{{ opendistro_admin_password }}"
validate_certs: no
status_code: 200, 404
- name: Create wazuh plugin config directory
file:
path: /usr/share/kibana/data/wazuh/config/
state: directory
recurse: yes
owner: kibana
group: kibana
mode: 0751
changed_when: False
- name: Configure Wazuh Kibana Plugin
template:
src: wazuh.yml.j2
dest: /usr/share/kibana/data/wazuh/config/wazuh.yml
owner: kibana
group: kibana
mode: 0751
changed_when: False
- name: Ensure Kibana started and enabled
service:
name: kibana
enabled: true
state: started
- import_tasks: RMRedHat.yml
when: ansible_os_family == 'RedHat'

View File

@ -1,13 +0,0 @@
- block:
- name: Copy the certificates from local to the Kibana instance
copy:
src: "{{ local_certs_path }}/certs/{{ item }}"
dest: /usr/share/kibana
mode: 0644
with_items:
- "root-ca.pem"
- "{{ kibana_node_name }}_http.key"
- "{{ kibana_node_name }}_http.pem"
tags:
- security

View File

@ -1,36 +0,0 @@
# {{ ansible_managed }}
# Description:
# Default Kibana configuration for Open Distro.
server.port: {{ kibana_server_port }}
#server.basePath: ""
server.maxPayloadBytes: {{ kibana_max_payload_bytes }}
server.name: {{ kibana_server_name }}
server.host: {{ kibana_server_host }}
{% if kibana_opendistro_security %}
elasticsearch.hosts: "https://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}"
elasticsearch.username: {{ opendistro_kibana_user }}
elasticsearch.password: {{ opendistro_kibana_password }}
server.ssl.enabled: true
server.ssl.certificate: "/usr/share/kibana/{{ kibana_node_name }}_http.pem"
server.ssl.key: "/usr/share/kibana/{{ kibana_node_name }}_http.key"
elasticsearch.ssl.certificateAuthorities: ["/usr/share/kibana/root-ca.pem"]
elasticsearch.ssl.verificationMode: full
{% else %}
elasticsearch.hosts: "http://{{ elasticsearch_network_host }}:{{ elasticsearch_http_port }}"
{% endif %}
elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
opendistro_security.multitenancy.enabled: true
opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
opendistro_security.readonly_mode.roles: ["kibana_read_only"]
newsfeed.enabled: {{ kibana_newsfeed_enabled }}
telemetry.optIn: {{ kibana_telemetry_optin }}
telemetry.enabled: {{ kibana_telemetry_enabled }}
server.defaultRoute: /app/wazuh?security_tenant=global

View File

@ -1,3 +0,0 @@
---
kibana_opendistro_version: 1.13.2

View File

@ -19,7 +19,7 @@ Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`): Available variables are listed below, along with default values (see `defaults/main.yml`):
``` ```
filebeat_output_elasticsearch_hosts: filebeat_output_indexer_hosts:
- "localhost:9200" - "localhost:9200"
``` ```

View File

@ -3,7 +3,7 @@ filebeat_version: 7.10.2
wazuh_template_branch: v4.3.0 wazuh_template_branch: v4.3.0
filebeat_output_elasticsearch_hosts: filebeat_output_indexer_hosts:
- "localhost:9200" - "localhost:9200"
filebeat_module_package_url: https://packages.wazuh.com/4.x/filebeat filebeat_module_package_url: https://packages.wazuh.com/4.x/filebeat
@ -11,17 +11,17 @@ filebeat_module_package_name: wazuh-filebeat-0.1.tar.gz
filebeat_module_package_path: /tmp/ filebeat_module_package_path: /tmp/
filebeat_module_destination: /usr/share/filebeat/module filebeat_module_destination: /usr/share/filebeat/module
filebeat_module_folder: /usr/share/filebeat/module/wazuh filebeat_module_folder: /usr/share/filebeat/module/wazuh
elasticsearch_security_user: admin indexer_security_user: admin
elasticsearch_security_password: changeme indexer_security_password: changeme
# Security plugin # Security plugin
filebeat_security: true filebeat_security: true
filebeat_ssl_dir: /etc/pki/filebeat filebeat_ssl_dir: /etc/pki/filebeat
# Local path to store the generated certificates (OpenDistro security plugin) # Local path to store the generated certificates (Opensearch security plugin)
local_certs_path: ./opendistro/certificates local_certs_path: ./indexer/certificates
elasticrepo: filebeatrepo:
apt: 'https://artifacts.elastic.co/packages/oss-7.x/apt' apt: 'deb https://packages.wazuh.com/4.x/apt/ stable main'
yum: 'https://artifacts.elastic.co/packages/oss-7.x/yum' yum: 'https://packages.wazuh.com/4.x/yum/'
gpg: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch' gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
key_id: '46095ACC8548582C1A2699A9D27D666CD88E42B4' key_id: '0DCFCA5547B19D2A6099506096B3EE5F29111145'

View File

@ -11,13 +11,13 @@
- name: Debian/Ubuntu | Add Elasticsearch apt key. - name: Debian/Ubuntu | Add Elasticsearch apt key.
apt_key: apt_key:
url: "{{ elasticrepo.gpg }}" url: "{{ filebeatrepo.gpg }}"
id: "{{ elasticrepo.key_id }}" id: "{{ filebeatrepo.key_id }}"
state: present state: present
- name: Debian/Ubuntu | Add Filebeat-oss repository. - name: Debian/Ubuntu | Add Filebeat-oss repository.
apt_repository: apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main" repo: "{{ filebeatrepo.apt }}"
state: present state: present
update_cache: true update_cache: true
changed_when: false changed_when: false

View File

@ -1,6 +1,6 @@
--- ---
- name: Debian/Ubuntu | Remove Filebeat repository (and clean up left-over metadata) - name: Debian/Ubuntu | Remove Filebeat repository (and clean up left-over metadata)
apt_repository: apt_repository:
repo: "deb {{ elasticrepo.apt }} stable main" repo: "{{ filebeatrepo.apt }}"
state: absent state: absent
changed_when: false changed_when: false

View File

@ -1,6 +1,6 @@
--- ---
- name: RedHat/CentOS/Fedora | Remove Filebeat repository (and clean up left-over metadata) - name: RedHat/CentOS/Fedora | Remove Filebeat repository (and clean up left-over metadata)
yum_repository: yum_repository:
name: elastic_oss-repo_7 name: wazuh_repo
state: absent state: absent
changed_when: false changed_when: false

View File

@ -1,9 +1,9 @@
--- ---
- name: RedHat/CentOS/Fedora/Amazon Linux | Install Filebeats repo - name: RedHat/CentOS/Fedora/Amazon Linux | Install Filebeats repo
yum_repository: yum_repository:
name: elastic_oss-repo_7 name: wazuh_repo
description: Elastic repository for 7.x packages description: Wazuh Repo
baseurl: "{{ elasticrepo.yum }}" baseurl: "{{ filebeatrepo.yum }}"
gpgkey: "{{ elasticrepo.gpg }}" gpgkey: "{{ filebeatrepo.gpg }}"
gpgcheck: true gpgcheck: true
changed_when: false changed_when: false

View File

@ -16,7 +16,7 @@
group: root group: root
mode: 0644 mode: 0644
with_items: with_items:
- "{{ filebeat_node_name }}.key" - "{{ filebeat_node_name }}-key.pem"
- "{{ filebeat_node_name }}.pem" - "{{ filebeat_node_name }}.pem"
- "root-ca.pem" - "root-ca.pem"

View File

@ -1,5 +1,3 @@
# Wazuh - Filebeat configuration file
# Wazuh - Filebeat configuration file # Wazuh - Filebeat configuration file
filebeat.modules: filebeat.modules:
- module: wazuh - module: wazuh
@ -14,19 +12,22 @@ setup.template.json.name: 'wazuh'
setup.template.overwrite: true setup.template.overwrite: true
setup.ilm.enabled: false setup.ilm.enabled: false
# Send events directly to Elasticsearch # Send events directly to Wazuh indexer
output.elasticsearch: output.elasticsearch:
hosts: {{ filebeat_output_elasticsearch_hosts | to_json }} hosts:
{% for item in filebeat_output_indexer_hosts %}
- {{ item }}:9200
{% endfor %}
{% if filebeat_security %} {% if filebeat_security %}
username: {{ elasticsearch_security_user }} username: {{ indexer_security_user }}
password: {{ elasticsearch_security_password }} password: {{ indexer_security_password }}
protocol: https protocol: https
ssl.certificate_authorities: ssl.certificate_authorities:
- {{ filebeat_ssl_dir }}/root-ca.pem - {{ filebeat_ssl_dir }}/root-ca.pem
ssl.certificate: "{{ filebeat_ssl_dir }}/{{ filebeat_node_name }}.pem" ssl.certificate: "{{ filebeat_ssl_dir }}/{{ filebeat_node_name }}.pem"
ssl.key: "{{ filebeat_ssl_dir }}/{{ filebeat_node_name }}.key" ssl.key: "{{ filebeat_ssl_dir }}/{{ filebeat_node_name }}-key.pem"
{% endif %} {% endif %}
# Optional. Send events to Logstash instead of Elasticsearch # Optional. Send events to Logstash instead of Wazuh indexer
#output.logstash.hosts: ["YOUR_LOGSTASH_SERVER_IP:5000"] #output.logstash.hosts: ["YOUR_LOGSTASH_SERVER_IP:5000"]

View File

@ -3,13 +3,17 @@ import sys
import json import json
import random import random
import string import string
import argparse
import os import os
# Set framework path # Set framework path
sys.path.append("/var/ossec/framework") sys.path.append(os.path.dirname(sys.argv[0]) + "/../framework")
USER_FILE_PATH = "/var/ossec/api/configuration/admin.json"
SPECIAL_CHARS = "@$!%*?&-_"
try: try:
from wazuh.rbac.orm import create_rbac_db
from wazuh.security import ( from wazuh.security import (
create_user, create_user,
get_users, get_users,
@ -22,6 +26,12 @@ except Exception as e:
sys.exit(1) sys.exit(1)
def read_user_file(path=USER_FILE_PATH):
with open(path) as user_file:
data = json.load(user_file)
return data["username"], data["password"]
def db_users(): def db_users():
users_result = get_users() users_result = get_users()
return {user["username"]: user["id"] for user in users_result.affected_items} return {user["username"]: user["id"] for user in users_result.affected_items}
@ -31,15 +41,35 @@ def db_roles():
roles_result = get_roles() roles_result = get_roles()
return {role["name"]: role["id"] for role in roles_result.affected_items} return {role["name"]: role["id"] for role in roles_result.affected_items}
def disable_user(uid):
random_pass = "".join(
random.choices(
string.ascii_uppercase
+ string.ascii_lowercase
+ string.digits
+ SPECIAL_CHARS,
k=8,
)
)
# assure there must be at least one character from each group
random_pass = random_pass + ''.join([random.choice(chars) for chars in [string.ascii_lowercase, string.digits, string.ascii_uppercase, SPECIAL_CHARS]])
random_pass = ''.join(random.sample(random_pass,len(random_pass)))
update_user(
user_id=[
str(uid),
],
password=random_pass,
)
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description='add_user script') if not os.path.exists(USER_FILE_PATH):
parser.add_argument('--username', action="store", dest="username") # abort if no user file detected
parser.add_argument('--password', action="store", dest="password") sys.exit(0)
results = parser.parse_args() username, password = read_user_file()
username = results.username # create RBAC database
password = results.password create_rbac_db()
initial_users = db_users() initial_users = db_users()
if username not in initial_users: if username not in initial_users:
@ -66,28 +96,7 @@ if __name__ == "__main__":
], ],
password=password, password=password,
) )
# set a random password for all other users # disable unused default users
for name, id in initial_users.items(): #for def_user in ['wazuh', 'wazuh-wui']:
if name != username: # if def_user != username:
specials = "@$!%*?&-_" # disable_user(initial_users[def_user])
random_pass = "".join(
[
random.choice(string.ascii_uppercase),
random.choice(string.ascii_lowercase),
random.choice(string.digits),
random.choice(specials),
] +
random.choices(
string.ascii_uppercase
+ string.ascii_lowercase
+ string.digits
+ specials,
k=14,
)
)
update_user(
user_id=[
str(id),
],
password=random_pass,
)

View File

@ -260,6 +260,15 @@
group: wazuh group: wazuh
mode: 0644 mode: 0644
- name: Create admin.json
template:
src: templates/admin.json.j2
dest: "{{ wazuh_dir }}/api/configuration/admin.json"
owner: wazuh
group: wazuh
mode: 0644
no_log: true
- name: Execute create_user script - name: Execute create_user script
script: script:
chdir: "{{ wazuh_dir }}/framework/scripts/" chdir: "{{ wazuh_dir }}/framework/scripts/"

View File

@ -0,0 +1,4 @@
{% for api in wazuh_api_users %}
{"username":"{{ api['username'] }}", "password": "{{ api['password'] }}"}
{% endfor %}

View File

@ -0,0 +1,41 @@
---
# Dashboard configuration
indexer_http_port: 9200
indexer_api_protocol: https
dashboard_conf_path: /etc/wazuh-dashboard/
dashboard_node_name: node-1
dashboard_server_host: "0.0.0.0"
dashboard_server_port: "5601"
dashboard_server_name: "dashboard"
wazuh_version: 4.3.0
indexer_cluster_nodes:
- 127.0.0.1
# The Wazuh dashboard package repository
dashboard_version: "4.3.0"
package_repos:
yum:
dashboard:
baseurl: 'https://packages.wazuh.com/4.x/yum/'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
apt:
dashboard:
baseurl: 'deb https://packages.wazuh.com/4.x/apt/ stable main'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
# API credentials
wazuh_api_credentials:
- id: "default"
url: "https://localhost"
port: 55000
username: "wazuh"
password: "wazuh"
# Dashboard Security
dashboard_security: true
indexer_admin_password: changeme
dashboard_user: kibanaserver
dashboard_password: changeme
local_certs_path: "{{ playbook_dir }}/indexer/certificates"

View File

@ -0,0 +1,3 @@
---
- name: restart wazuh-dashboard
service: name=wazuh-dashboard state=restarted

View File

@ -0,0 +1,24 @@
---
- block:
- include_vars: debian.yml
- name: Add apt repository signing key
apt_key:
url: "{{ package_repos.apt.dashboard.gpg }}"
state: present
- name: Debian systems | Add Wazuh dashboard repo
apt_repository:
repo: "{{ package_repos.apt.dashboard.baseurl }}"
state: present
update_cache: yes
- name: Install Wazuh dashboard
apt:
name: "wazuh-dashboard={{ dashboard_version }}-1"
state: present
update_cache: yes
register: install
tags:
- install

View File

@ -0,0 +1,6 @@
---
- name: Remove Wazuh dashboard repository (and clean up left-over metadata)
yum_repository:
name: wazuh_repo
state: absent
changed_when: false

View File

@ -0,0 +1,21 @@
---
- block:
- name: RedHat/CentOS/Fedora | Add Wazuh dashboard repo
yum_repository:
file: wazuh
name: wazuh_repo
description: Wazuh yum repository
baseurl: "{{ package_repos.yum.dashboard.baseurl }}"
gpgkey: "{{ package_repos.yum.dashboard.gpg }}"
gpgcheck: true
- name: Install Wazuh dashboard
package:
name: "wazuh-dashboard-{{ dashboard_version }}"
state: present
update_cache: yes
register: install
tags:
- install

View File

@ -0,0 +1,89 @@
---
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == 'Debian'
- name: Remove Dashboard configuration file
file:
# noqa 503
path: "{{ dashboard_conf_path }}/opensearch_dashboards.yml"
state: absent
tags: install
- import_tasks: security_actions.yml
- name: Copy Configuration File
template:
src: "templates/opensearch_dashboards.yml.j2"
dest: "{{ dashboard_conf_path }}/opensearch_dashboards.yml"
group: wazuh-dashboard
owner: wazuh-dashboard
mode: 0640
force: yes
notify: restart wazuh-dashboard
tags:
- install
- configure
- name: Ensuring Wazuh dashboard directory owner
file:
# noqa 208
path: "/usr/share/wazuh-dashboard"
state: directory
owner: wazuh-dashboard
group: wazuh-dashboard
recurse: yes
- name: Wait for Wazuh-Indexer port
wait_for: host={{ indexer_network_host }} port={{ indexer_http_port }}
- name: Select correct API protocol
set_fact:
indexer_api_protocol: "{% if dashboard_security is defined and dashboard_security %}https{% else %}http{% endif %}"
- name: Attempting to delete legacy Wazuh index if exists
uri:
url: "{{ indexer_api_protocol }}://{{ indexer_network_host }}:{{ indexer_http_port }}/.wazuh"
method: DELETE
user: "admin"
password: "{{ indexer_admin_password }}"
validate_certs: no
status_code: 200, 404
- name: Create Wazuh Plugin config directory
file:
path: /usr/share/wazuh-dashboard/data/wazuh/config/
state: directory
recurse: yes
owner: wazuh-dashboard
group: wazuh-dashboard
mode: 0751
changed_when: False
- name: Configure Wazuh Dashboard Plugin
template:
src: wazuh.yml.j2
dest: /usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml
owner: wazuh-dashboard
group: wazuh-dashboard
mode: 0751
changed_when: False
- name: Configure opensearch.password in opensearch_dashboards.keystore
shell: >-
echo {{ dashboard_password }} | /usr/share/wazuh-dashboard/bin/opensearch-dashboards-keystore --allow-root add -f --stdin opensearch.password
args:
executable: /bin/bash
become: yes
- name: Ensure Wazuh dashboard started and enabled
service:
name: wazuh-dashboard
enabled: true
state: started
- import_tasks: RMRedHat.yml
when: ansible_os_family == 'RedHat'

View File

@ -0,0 +1,13 @@
- block:
- name: Copy the certificates from local to the Wazuh dashboard instance
copy:
src: "{{ local_certs_path }}/certs/{{ item }}"
dest: /etc/wazuh-dashboard/certs/
mode: 0644
with_items:
- "root-ca.pem"
- "{{ dashboard_node_name }}-key.pem"
- "{{ dashboard_node_name }}.pem"
tags:
- security

View File

@ -0,0 +1,16 @@
server.host: {{ dashboard_server_host }}
server.port: {{ dashboard_server_port }}
opensearch.hosts:
{% for item in indexer_cluster_nodes %}
- https://{{ item }}:{{ indexer_http_port }}
{% endfor %}
opensearch.ssl.verificationMode: certificate
opensearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
opensearch_security.multitenancy.enabled: true
opensearch_security.readonly_mode.roles: ["kibana_read_only"]
server.ssl.enabled: true
server.ssl.key: "/etc/wazuh-dashboard/certs/{{ dashboard_node_name }}-key.pem"
server.ssl.certificate: "/etc/wazuh-dashboard/certs/{{ dashboard_node_name }}.pem"
opensearch.ssl.certificateAuthorities: ["/etc/wazuh-dashboard/certs/root-ca.pem"]
logging.dest: "/var/log/wazuh-dashboard/wazuh-dashboard.log"
uiSettings.overrides.defaultRoute: /app/wazuh?security_tenant=global

View File

@ -16,7 +16,7 @@
# https://documentation.wazuh.com/current/installation-guide/index.html # https://documentation.wazuh.com/current/installation-guide/index.html
# #
# Also, you can check our repository: # Also, you can check our repository:
# https://github.com/wazuh/wazuh-kibana-app # https://github.com/wazuh/wazuh-dashboard
# #
# ------------------------------- Index patterns ------------------------------- # ------------------------------- Index patterns -------------------------------
# #

View File

@ -0,0 +1,3 @@
---
dashboard_version: 4.3.0

View File

@ -0,0 +1,68 @@
---
# Cluster Settings
indexer_version: 4.3.0
single_node: false
indexer_node_name: node-1
indexer_cluster_name: wazuh
indexer_network_host: '0.0.0.0'
indexer_node_master: true
indexer_node_data: true
indexer_node_ingest: true
indexer_start_timeout: 90
indexer_cluster_nodes:
- 127.0.0.1
indexer_discovery_nodes:
- 127.0.0.1
local_certs_path: "{{ playbook_dir }}/indexer/certificates"
# Minimum master nodes in cluster, 2 for 3 nodes Wazuh indexer cluster
minimum_master_nodes: 2
# Configure hostnames for Wazuh indexer nodes
# Example es1.example.com, es2.example.com
domain_name: wazuh.com
# The Wazuh indexer package repository
package_repos:
yum:
indexer:
baseurl: 'https://packages.wazuh.com/4.x/yum/'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
apt:
indexer:
baseurl: 'deb https://packages.wazuh.com/4.x/apt/ stable main'
gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH'
openjdk:
baseurl: 'deb http://deb.debian.org/debian stretch-backports main'
indexer_sec_plugin_conf_path: /usr/share/wazuh-indexer/plugins/opensearch-security/securityconfig
indexer_sec_plugin_tools_path: /usr/share/wazuh-indexer/plugins/opensearch-security/tools
indexer_conf_path: /etc/wazuh-indexer/
indexer_index_path: /var/lib/wazuh-indexer/
# Security password
indexer_custom_user: ""
indexer_custom_user_role: "admin"
# Set JVM memory limits
indexer_jvm_xms: null
indexer_http_port: 9200
certs_gen_tool_version: 4.3
# Url of certificates generator tool
certs_gen_tool_url: "https://packages.wazuh.com/resources/{{ certs_gen_tool_version }}/install_functions/opendistro/wazuh-cert-tool.sh"
indexer_admin_password: changeme
dashboard_password: changeme
# Deployment settings
generate_certs: true
perform_installation: true
indexer_nolog_sensible: true

View File

@ -0,0 +1,5 @@
---
- name: restart wazuh-indexer
service:
name: wazuh-indexer
state: restarted

View File

@ -1,7 +1,7 @@
--- ---
galaxy_info: galaxy_info:
author: Wazuh author: Wazuh
description: Installing and maintaining Opendistro server. description: Installing and maintaining Wazuh indexer.
company: wazuh.com company: wazuh.com
license: license (GPLv3) license: license (GPLv3)
min_ansible_version: 2.0 min_ansible_version: 2.0

View File

@ -9,7 +9,7 @@
when: (ansible_facts['distribution'] == "Debian" and ansible_facts['distribution_major_version'] == "9") when: (ansible_facts['distribution'] == "Debian" and ansible_facts['distribution_major_version'] == "9")
block: block:
- name: Install OpenDistro dependencies - name: Install Wazuh indexer dependencies
apt: apt:
name: [ name: [
'unzip', 'wget', 'curl', 'apt-transport-https', software-properties-common 'unzip', 'wget', 'curl', 'apt-transport-https', software-properties-common
@ -35,23 +35,23 @@
environment: environment:
JAVA_HOME: /usr JAVA_HOME: /usr
- name: Add Opendistro repository - name: Add Wazuh indexer repository
block: block:
- name: Add apt repository signing key - name: Add apt repository signing key
apt_key: apt_key:
url: "{{ package_repos.apt.opendistro.gpg }}" url: "{{ package_repos.apt.indexer.gpg }}"
state: present state: present
- name: Add Opendistro repository - name: Add Wazuh indexer repository
apt_repository: apt_repository:
repo: "{{ package_repos.apt.opendistro.baseurl }}" repo: "{{ package_repos.apt.indexer.baseurl }}"
state: present state: present
filename: 'wazuh-opendistro' filename: 'wazuh-indexer'
update_cache: yes update_cache: yes
- name: Install OpenDistro - name: Install Wazuh indexer
apt: apt:
name: opendistroforelasticsearch={{ opendistro_version }}-1 name: wazuh-indexer={{ indexer_version }}-1
state: present state: present
register: install register: install
tags: install tags: install

View File

@ -1,6 +1,6 @@
--- ---
- name: RedHat/CentOS/Fedora | Remove Elasticsearch repository (and clean up left-over metadata) - name: RedHat/CentOS/Fedora | Remove Wazuh indexer repository (and clean up left-over metadata)
yum_repository: yum_repository:
name: opendistro_repo name: wazuh_repo
state: absent state: absent
changed_when: false changed_when: false

View File

@ -1,13 +1,13 @@
--- ---
- block: - block:
- name: RedHat/CentOS/Fedora | Add OpenDistro repo - name: RedHat/CentOS/Fedora | Add Wazuh indexer repo
yum_repository: yum_repository:
file: opendistro file: wazuh
name: opendistro_repo name: wazuh_repo
description: Opendistro yum repository description: Wazuh yum repository
baseurl: "{{ package_repos.yum.opendistro.baseurl }}" baseurl: "{{ package_repos.yum.indexer.baseurl }}"
gpgkey: "{{ package_repos.yum.opendistro.gpg }}" gpgkey: "{{ package_repos.yum.indexer.gpg }}"
gpgcheck: true gpgcheck: true
changed_when: false changed_when: false
@ -28,10 +28,22 @@
- name: Install OpenJDK 11 - name: Install OpenJDK 11
shell: amazon-linux-extras install java-openjdk11 -y shell: amazon-linux-extras install java-openjdk11 -y
- name: Configure vm.max_map_count
lineinfile:
line: "vm.max_map_count=262144"
dest: "/etc/sysctl.conf"
insertafter: EOF
create: true
become: yes
- name: Update vm.max_map_count
shell: sysctl -p
become: yes
when: when:
- ansible_distribution == 'Amazon' - ansible_distribution == 'Amazon'
- name: RedHat/CentOS/Fedora | Install OpenDistro dependencies - name: RedHat/CentOS/Fedora | Install Indexer dependencies
yum: yum:
name: "{{ packages }}" name: "{{ packages }}"
vars: vars:
@ -39,9 +51,9 @@
- wget - wget
- unzip - unzip
- name: Install OpenDistro - name: Install Wazuh indexer
package: package:
name: opendistroforelasticsearch-{{ opendistro_version }} name: wazuh-indexer-{{ indexer_version }}
state: present state: present
register: install register: install
tags: install tags: install

View File

@ -0,0 +1,63 @@
---
- name: Check if certificates already exists
stat:
path: "{{ local_certs_path }}"
register: certificates_folder
delegate_to: localhost
become: no
tags:
- generate-certs
- block:
- name: Local action | Create local temporary directory for certificates generation
file:
path: "{{ local_certs_path }}"
mode: 0755
state: directory
- name: Local action | Check that the generation tool exists
stat:
path: "{{ local_certs_path }}/wazuh-cert-tool.sh"
register: tool_package
- name: Local action | Download certificates generation tool
get_url:
url: "{{ certs_gen_tool_url }}"
dest: "{{ local_certs_path }}/wazuh-cert-tool.sh"
when: not tool_package.stat.exists
- name: Local action | Prepare the certificates generation template file
template:
src: "templates/config.yml.j2"
dest: "{{ local_certs_path }}/config.yml"
mode: 0644
register: tlsconfig_template
- name: Local action | Generate the node & admin certificates in local
command: >-
bash {{ local_certs_path }}/wazuh-cert-tool.sh
become: yes
- name: Get Certificate files
find:
paths: "{{ local_certs_path }}/certs"
patterns: "*"
register: certificate_files
- name: Change Certificates Ownership
file:
path: "{{ item.path }}"
owner: "{{ ansible_effective_user_id }}"
group: "{{ ansible_effective_user_id }}"
become: yes
with_items: "{{ certificate_files.files }}"
run_once: true
delegate_to: localhost
become: no
tags:
- generate-certs
when:
- not certificates_folder.stat.exists

View File

@ -0,0 +1,130 @@
---
- import_tasks: local_actions.yml
when:
- generate_certs
- block:
- import_tasks: RedHat.yml
when: ansible_os_family == 'RedHat'
- import_tasks: Debian.yml
when: ansible_os_family == 'Debian'
- name: Remove performance analyzer plugin from Wazuh indexer
become: true
command: ./opensearch-plugin remove opensearch-performance-analyzer
ignore_errors: true
args:
chdir: /usr/share/wazuh-indexer/bin/
register: remove_opensearch_performance_analyzer
failed_when:
- remove_opensearch_performance_analyzer.rc != 0
- '"not found" not in remove_opensearch_performance_analyzer.stderr'
changed_when: "remove_opensearch_performance_analyzer.rc == 0"
- name: Remove Opensearch configuration file
file:
path: "{{ indexer_conf_path }}/opensearch.yml"
state: absent
tags: install
- name: Copy Opensearch Configuration File
template:
src: "templates/opensearch.yml.j2"
dest: "{{ indexer_conf_path }}/opensearch.yml"
owner: root
group: wazuh-indexer
mode: 0640
force: yes
tags: install
- include_tasks: security_actions.yml
tags:
- security
- name: Configure Wazuh indexer JVM memmory.
template:
src: "templates/jvm.options.j2"
dest: "{{ indexer_conf_path }}/jvm.options"
owner: root
group: wazuh-indexer
mode: 0644
force: yes
notify: restart wazuh-indexer
tags: install
- name: Ensure extra time for Wazuh indexer to start on reboots
lineinfile:
path: /usr/lib/systemd/system/wazuh-indexer.service
regexp: '^TimeoutStartSec='
line: "TimeoutStartSec={{ indexer_start_timeout }}"
become: yes
tags: configure
- name: Index files to remove
find:
paths: "{{ indexer_index_path }}"
patterns: "*"
register: files_to_delete
- name: Remove Index Files
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ files_to_delete.files }}"
- name: Ensure Wazuh indexer started and enabled
service:
name: wazuh-indexer
enabled: true
state: started
- name: Wait for Wazuh indexer API
uri:
url: "https://{{ inventory_hostname if not single_node else indexer_network_host }}:{{ indexer_http_port }}/_cat/health/"
user: "admin" # Default Indexer user is always "admin"
password: "{{ indexer_admin_password }}"
validate_certs: no
status_code: 200,401
return_content: yes
timeout: 4
register: _result
until:
- _result is defined
- '"green" in _result.content or ( "yellow" in _result.content and single_node )'
retries: 24
delay: 5
tags: debug
when:
- hostvars[inventory_hostname]['private_ip'] is not defined or not hostvars[inventory_hostname]['private_ip']
- name: Wait for Wazuh indexer API (Private IP)
uri:
url: "https://{{ hostvars[inventory_hostname]['private_ip'] if not single_node else indexer_network_host }}:{{ indexer_http_port }}/_cat/health/"
user: "admin" # Default Indexer user is always "admin"
password: "{{ indexer_admin_password }}"
validate_certs: no
status_code: 200,401
return_content: yes
timeout: 4
register: _result
until:
- _result is defined
- '"green" in _result.content or ( "yellow" in _result.content and single_node )'
retries: 24
delay: 5
tags: debug
when:
- hostvars[inventory_hostname]['private_ip'] is defined and hostvars[inventory_hostname]['private_ip']
- import_tasks: "RMRedHat.yml"
when: ansible_os_family == "RedHat"
- name: Reload systemd configuration
systemd:
daemon_reload: true
become: yes
notify: restart wazuh-indexer
when: perform_installation

View File

@ -0,0 +1,116 @@
- name: Remove demo certs
file:
path: "{{ item }}"
state: absent
with_items:
- "{{ indexer_conf_path }}/demo-indexer-key.pem"
- "{{ indexer_conf_path }}/demo-indexer.pem"
- name: Configure IP (Private address)
set_fact:
target_address: "{{ hostvars[inventory_hostname]['private_ip'] if not single_node else indexer_network_host }}"
when:
- hostvars[inventory_hostname]['private_ip'] is defined
- name: Configure IP (Public address)
set_fact:
target_address: "{{ inventory_hostname if not single_node else indexer_network_host }}"
when:
- hostvars[inventory_hostname]['private_ip'] is not defined
- name: Copy the node & admin certificates to Wazuh indexer cluster
copy:
src: "{{ local_certs_path }}/certs/{{ item }}"
dest: "{{ indexer_conf_path }}/certs/"
mode: 0644
become: yes
with_items:
- root-ca.pem
- root-ca.key
- "{{ indexer_node_name }}-key.pem"
- "{{ indexer_node_name }}.pem"
- admin-key.pem
- admin.pem
- name: Restart Wazuh indexer with security configuration
systemd:
name: wazuh-indexer
state: restarted
- name: Copy the Opensearch security internal users template
template:
src: "templates/internal_users.yml.j2"
dest: "{{ indexer_sec_plugin_conf_path }}/internal_users.yml"
mode: 0644
run_once: true
- name: Hashing the custom admin password
command: "{{ indexer_sec_plugin_tools_path }}/hash.sh -p {{ indexer_admin_password }}" # noqa 301
register: indexer_admin_password_hashed
no_log: '{{ indexer_nolog_sensible | bool }}'
run_once: true
- name: Set the Admin user password
replace:
path: "{{ indexer_sec_plugin_conf_path }}/internal_users.yml"
regexp: '(?<=admin:\n hash: )(.*)(?=)'
replace: "{{ indexer_password_hash | quote }}"
vars:
indexer_password_hash: "{{ indexer_admin_password_hashed.stdout_lines | last }}"
run_once: true
# this can also be achieved with password_hash, but it requires dependencies on the controller
- name: Hash the kibanaserver role/user pasword
command: "{{ indexer_sec_plugin_tools_path }}/hash.sh -p {{ dashboard_password }}" # noqa 301
register: indexer_kibanaserver_password_hashed
no_log: '{{ indexer_nolog_sensible | bool }}'
run_once: true
- name: Set the kibanaserver user password
replace:
path: "{{ indexer_sec_plugin_conf_path }}/internal_users.yml"
regexp: '(?<=kibanaserver:\n hash: )(.*)(?=)'
replace: "{{ indexer_password_hash | quote }}"
vars:
indexer_password_hash: "{{ indexer_kibanaserver_password_hashed.stdout_lines | last }}"
run_once: true
- name: Initialize the Opensearch security index in Wazuh indexer
command: >
sudo -u wazuh-indexer OPENSEARCH_PATH_CONF={{ indexer_conf_path }}
JAVA_HOME=/usr/share/wazuh-indexer/jdk
{{ indexer_sec_plugin_tools_path }}/securityadmin.sh
-cd {{ indexer_sec_plugin_conf_path }}/
-icl -p 9300 -cd {{ indexer_sec_plugin_conf_path }}/
-nhnv
-cacert {{ indexer_conf_path }}/certs/root-ca.pem
-cert {{ indexer_conf_path }}/certs/admin.pem
-key {{ indexer_conf_path }}/certs/admin-key.pem
-h {{ target_address }}
retries: 2
delay: 5
register: result
until: result.rc == 0
- name: Create custom user
uri:
url: "https://{{ target_address }}:{{ indexer_http_port }}/_plugins/_security/api/internalusers/{{ indexer_custom_user }}"
method: PUT
user: "admin" # Default Indexer user is always "admin"
password: "{{ indexer_admin_password }}"
body: |
{
"password": "{{ indexer_admin_password }}",
"backend_roles": ["{{ indexer_custom_user_role }}"]
}
body_format: json
validate_certs: no
status_code: 200,201,401
return_content: yes
timeout: 4
when:
- indexer_custom_user is defined and indexer_custom_user

View File

@ -0,0 +1,33 @@
nodes:
# Elasticsearch server nodes
elasticsearch:
{% for (key,value) in instances.items() %}
{% if (value.role is defined and value.role == 'indexer') %}
name: {{ value.name }}
ip: {{ value.ip }}
{% endif %}
{% endfor %}
# Wazuh server nodes
# Use node_type only with more than one Wazuh manager
wazuh_servers:
{% for (key,value) in instances.items() %}
{% if (value.role is defined and value.role == 'wazuh') %}
name: {{ value.name }}
ip: {{ value.ip }}
{% endif %}
{% if (value.node_type is defined and value.node_type == 'master') %}
node_type: master
{% elif (value.node_type is defined and value.node_type == 'worker') %}
node_type: worker
{% endif %}
{% endfor %}
# Kibana node
kibana:
{% for (key,value) in instances.items() %}
{% if (value.role is defined and value.role == 'dashboard') %}
name: {{ value.name }}
ip: {{ value.ip }}
{% endif %}
{% endfor %}

View File

@ -9,13 +9,13 @@ _meta:
# Define your internal users here # Define your internal users here
admin: admin:
hash: "{{ opendistro_admin_password }}" hash: "{{ indexer_admin_password }}"
reserved: true reserved: true
backend_roles: backend_roles:
- "admin" - "admin"
description: "admin user" description: "admin user"
kibanaserver: kibanaserver:
hash: "{{ opendistro_kibana_password }}" hash: "{{ dashboard_password }}"
reserved: true reserved: true
description: "kibanaserver user" description: "kibanaserver user"

View File

@ -11,19 +11,17 @@
## -Xms4g ## -Xms4g
## -Xmx4g ## -Xmx4g
## ##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
## ##
################################################################ ################################################################
# Xms represents the initial size of total heap space # Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space # Xmx represents the maximum size of total heap space
{% if opendistro_jvm_xms is not none %} {% if indexer_jvm_xms is not none %}
{% if opendistro_jvm_xms < 32000 %} {% if indexer_jvm_xms < 32000 %}
-Xms{{ opendistro_jvm_xms }}m -Xms{{ indexer_jvm_xms }}m
-Xmx{{ opendistro_jvm_xms }}m -Xmx{{ indexer_jvm_xms }}m
{% else %} {% else %}
-Xms32000m -Xms32000m
@ -62,7 +60,7 @@
14-:-XX:InitiatingHeapOccupancyPercent=30 14-:-XX:InitiatingHeapOccupancyPercent=30
## JVM temporary directory ## JVM temporary directory
-Djava.io.tmpdir=${ES_TMPDIR} -Djava.io.tmpdir=${OPENSEARCH_TMPDIR}
## heap dumps ## heap dumps
@ -72,25 +70,25 @@
# specify an alternative path for heap dumps; ensure the directory exists and # specify an alternative path for heap dumps; ensure the directory exists and
# has sufficient space # has sufficient space
-XX:HeapDumpPath=/var/lib/elasticsearch -XX:HeapDumpPath=data
# specify an alternative path for JVM fatal error logs # specify an alternative path for JVM fatal error logs
-XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log -XX:ErrorFile=/var/log/wazuh-indexer/hs_err_pid%p.log
## JDK 8 GC logging ## JDK 8 GC logging
8:-XX:+PrintGCDetails 8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps 8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution 8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime 8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:/var/log/elasticsearch/gc.log 8:-Xloggc:/var/log/wazuh-indexer/gc.log
8:-XX:+UseGCLogFileRotation 8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32 8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m 8:-XX:GCLogFileSize=64m
# JDK 9+ GC logging # JDK 9+ GC logging
9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m 9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/wazuh-indexer/gc.log:utctime,pid,tags:filecount=32,filesize=64m
## OpenDistro Performance Analyzer ## OpenDistro Performance Analyzer
-Dclk.tck=100 -Dclk.tck=100
-Djdk.attach.allowAttachSelf=true -Djdk.attach.allowAttachSelf=true
-Djava.security.policy=file:///usr/share/elasticsearch/plugins/opendistro_performance_analyzer/pa_config/es_security.policy -Djava.security.policy=file:///usr/share/wazuh-indexer/plugins/opensearch-performance-analyzer/pa_config/opensearch_security.policy

View File

@ -0,0 +1,60 @@
network.host: {{ indexer_network_host }}
node.name: {{ indexer_node_name }}
{% if single_node == true %}
discovery.type: single-node
{% else %}
cluster.initial_master_nodes:
{% for item in indexer_cluster_nodes %}
- {{ item }}
{% endfor %}
discovery.seed_hosts:
{% for item in indexer_discovery_nodes %}
- {{ item }}
{% endfor %}
{% endif %}
cluster.name: {{ indexer_cluster_name }}
http.port: 9200-9299
transport.tcp.port: 9300-9399
node.max_local_storage_nodes: "3"
path.data: /var/lib/wazuh-indexer
path.logs: /var/log/wazuh-indexer
###############################################################################
# #
# WARNING: Demo certificates set up in this file. #
# Please change on production cluster! #
# #
###############################################################################
plugins.security.ssl.http.pemcert_filepath: /etc/wazuh-indexer/certs/{{ indexer_node_name }}.pem
plugins.security.ssl.http.pemkey_filepath: /etc/wazuh-indexer/certs/{{ indexer_node_name }}-key.pem
plugins.security.ssl.http.pemtrustedcas_filepath: /etc/wazuh-indexer/certs/root-ca.pem
plugins.security.ssl.transport.pemcert_filepath: /etc/wazuh-indexer/certs/{{ indexer_node_name }}.pem
plugins.security.ssl.transport.pemkey_filepath: /etc/wazuh-indexer/certs/{{ indexer_node_name }}-key.pem
plugins.security.ssl.transport.pemtrustedcas_filepath: /etc/wazuh-indexer/certs/root-ca.pem
plugins.security.ssl.http.enabled: true
plugins.security.ssl.transport.enforce_hostname_verification: false
plugins.security.ssl.transport.resolve_hostname: false
plugins.security.audit.type: internal_opensearch
plugins.security.authcz.admin_dn:
- "CN=admin,OU=Docu,O=Wazuh,L=California,C=US"
plugins.security.check_snapshot_restore_write_privileges: true
plugins.security.enable_snapshot_restore_privilege: true
plugins.security.nodes_dn:
{% for (key,value) in instances.items() %}
- "CN={{ value.name }},OU=Docu,O=Wazuh,L=California,C=US"
{% endfor %}
plugins.security.restapi.roles_enabled:
- "all_access"
- "security_rest_api_access"
plugins.security.system_indices.enabled: true
plugins.security.system_indices.indices: [".opendistro-alerting-config", ".opendistro-alerting-alert*", ".opendistro-anomaly-results*", ".opendistro-anomaly-detector*", ".opendistro-anomaly-checkpoints", ".opendistro-anomaly-detection-state", ".opendistro-reports-*", ".opendistro-notifications-*", ".opendistro-notebooks", ".opensearch-observability", ".opendistro-asynchronous-search-response*", ".replication-metadata-store"]
### Option to allow Filebeat-oss 7.10.2 to work ###
compatibility.override_main_response_version: true