Merge master changes
This commit is contained in:
commit
583f38f19c
13
CHANGELOG.md
13
CHANGELOG.md
@ -7,6 +7,19 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
- Update to Wazuh v4.0.0
|
||||
|
||||
## [v3.13.2]
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh v3.13.2
|
||||
- Add kibana extra ssl option ([@xr09](https://github.com/xr09)) [PR#451](https://github.com/wazuh/wazuh-ansible/pull/451)
|
||||
- Force basic auth ([@xr09](https://github.com/xr09)) [PR#456](https://github.com/wazuh/wazuh-ansible/pull/456)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix check_mode condition ([@manuasir](https://github.com/manuasir)) [PR#452](https://github.com/wazuh/wazuh-ansible/pull/452)
|
||||
- Fixes for opendistro role ([@xr09](https://github.com/xr09)) [PR#453](https://github.com/wazuh/wazuh-ansible/pull/453)
|
||||
|
||||
## [v3.13.1_7.8.0]
|
||||
|
||||
### Added
|
||||
|
||||
294
README.md
294
README.md
@ -7,6 +7,10 @@
|
||||
|
||||
These playbooks install and configure Wazuh agent, manager and Elastic Stack.
|
||||
|
||||
## Branches
|
||||
* `master` branch corresponds to the latest Wazuh Ansible changes. It might be unstable.
|
||||
* `3.13` branch on correspond to the last Wazuh Ansible stable version.
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Wazuh Ansible documentation](https://documentation.wazuh.com/current/deploying-with-ansible/index.html)
|
||||
@ -20,8 +24,13 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
|
||||
│ │ │ ├── ansible-elasticsearch
|
||||
│ │ │ ├── ansible-kibana
|
||||
│ │
|
||||
│ │ ├── opendistro
|
||||
│ │ │ ├── opendistro-elasticsearch
|
||||
│ │ │ ├── opendistro-kibana
|
||||
│ │
|
||||
│ │ ├── wazuh
|
||||
│ │ │ ├── ansible-filebeat
|
||||
│ │ │ ├── ansible-filebeat-oss
|
||||
│ │ │ ├── ansible-wazuh-manager
|
||||
│ │ │ ├── ansible-wazuh-agent
|
||||
│ │
|
||||
@ -35,40 +44,293 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
|
||||
│ │ ├── wazuh-elastic_stack-single.yml
|
||||
│ │ ├── wazuh-kibana.yml
|
||||
│ │ ├── wazuh-manager.yml
|
||||
│ │ ├── wazuh-manager-oss.yml
|
||||
│ │ ├── wazuh-opendistro.yml
|
||||
│ │ ├── wazuh-opendistro-kibana.yml
|
||||
│
|
||||
│ ├── README.md
|
||||
│ ├── VERSION
|
||||
│ ├── CHANGELOG.md
|
||||
|
||||
|
||||
## Branches
|
||||
## Example: production-ready distributed environment
|
||||
|
||||
* `stable` branch on correspond to the last Wazuh-Ansible stable version.
|
||||
* `master` branch contains the latest code, be aware of possible bugs on this branch.
|
||||
### Playbook
|
||||
The hereunder example playbook uses the `wazuh-ansible` role to provision a production-ready Wazuh environment. The architecture includes 2 Wazuh nodes, 3 ODFE nodes and a mixed ODFE-Kibana node.
|
||||
|
||||
## Testing
|
||||
```yaml
|
||||
---
|
||||
# Certificates generation
|
||||
- hosts: es1
|
||||
roles:
|
||||
- role: ../roles/opendistro/opendistro-elasticsearch
|
||||
elasticsearch_network_host: "{{ private_ip }}"
|
||||
elasticsearch_cluster_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_discovery_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
perform_installation: false
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
elasticsearch_node_master: true
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
|
||||
node2:
|
||||
name: node-2
|
||||
ip: "{{ hostvars.es2.private_ip }}"
|
||||
node3:
|
||||
name: node-3
|
||||
ip: "{{ hostvars.es3.private_ip }}"
|
||||
node4:
|
||||
name: node-4
|
||||
ip: "{{ hostvars.manager.private_ip }}"
|
||||
node5:
|
||||
name: node-5
|
||||
ip: "{{ hostvars.worker.private_ip }}"
|
||||
node6:
|
||||
name: node-6
|
||||
ip: "{{ hostvars.kibana.private_ip }}"
|
||||
tags:
|
||||
- generate-certs
|
||||
|
||||
1. Get the `wazuh-ansible` folder from the `wazuh-qa` [repository](https://github.com/wazuh/wazuh-qa/tree/master/ansible/wazuh-ansible).
|
||||
#ODFE Cluster
|
||||
- hosts: odfe_cluster
|
||||
strategy: free
|
||||
roles:
|
||||
- role: ../roles/opendistro/opendistro-elasticsearch
|
||||
elasticsearch_network_host: "{{ private_ip }}"
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
elasticsearch_cluster_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_discovery_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_node_master: true
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
|
||||
node2:
|
||||
name: node-2
|
||||
ip: "{{ hostvars.es2.private_ip }}"
|
||||
node3:
|
||||
name: node-3
|
||||
ip: "{{ hostvars.es3.private_ip }}"
|
||||
node4:
|
||||
name: node-4
|
||||
ip: "{{ hostvars.manager.private_ip }}"
|
||||
node5:
|
||||
name: node-5
|
||||
ip: "{{ hostvars.worker.private_ip }}"
|
||||
node6:
|
||||
name: node-6
|
||||
ip: "{{ hostvars.kibana.private_ip }}"
|
||||
|
||||
```
|
||||
git clone https://github.com/wazuh/wazuh-qa
|
||||
#Wazuh cluster
|
||||
- hosts: manager
|
||||
roles:
|
||||
- role: "../roles/wazuh/ansible-wazuh-manager"
|
||||
- role: "../roles/wazuh/ansible-filebeat-oss"
|
||||
filebeat_node_name: node-4
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
wazuh_manager_config:
|
||||
connection:
|
||||
- type: 'secure'
|
||||
port: '1514'
|
||||
protocol: 'tcp'
|
||||
queue_size: 131072
|
||||
api:
|
||||
https: 'yes'
|
||||
cluster:
|
||||
disable: 'no'
|
||||
node_name: 'master'
|
||||
node_type: 'master'
|
||||
nodes:
|
||||
- '"{{ hostvars.manager.private_ip }}"'
|
||||
hidden: 'no'
|
||||
filebeat_output_elasticsearch_hosts:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
|
||||
- hosts: worker
|
||||
roles:
|
||||
- role: "../roles/wazuh/ansible-wazuh-manager"
|
||||
- role: "../roles/wazuh/ansible-filebeat-oss"
|
||||
filebeat_node_name: node-5
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
wazuh_manager_config:
|
||||
connection:
|
||||
- type: 'secure'
|
||||
port: '1514'
|
||||
protocol: 'tcp'
|
||||
queue_size: 131072
|
||||
api:
|
||||
https: 'yes'
|
||||
cluster:
|
||||
disable: 'no'
|
||||
node_name: 'worker_01'
|
||||
node_type: 'worker'
|
||||
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
|
||||
nodes:
|
||||
- '"{{ hostvars.manager.private_ip }}"'
|
||||
hidden: 'no'
|
||||
filebeat_output_elasticsearch_hosts:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
|
||||
#ODFE+Kibana node
|
||||
- hosts: kibana
|
||||
roles:
|
||||
- role: "../roles/opendistro/opendistro-elasticsearch"
|
||||
- role: "../roles/opendistro/opendistro-kibana"
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}"
|
||||
elasticsearch_node_name: node-6
|
||||
elasticsearch_node_master: false
|
||||
elasticsearch_node_ingest: false
|
||||
elasticsearch_node_data: false
|
||||
elasticsearch_cluster_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_discovery_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
kibana_node_name: node-6
|
||||
wazuh_api_credentials:
|
||||
- id: default
|
||||
url: https://{{ hostvars.manager.private_ip }}
|
||||
port: 55000
|
||||
user: foo
|
||||
password: bar
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
|
||||
node2:
|
||||
name: node-2
|
||||
ip: "{{ hostvars.es2.private_ip }}"
|
||||
node3:
|
||||
name: node-3
|
||||
ip: "{{ hostvars.es3.private_ip }}"
|
||||
node4:
|
||||
name: node-4
|
||||
ip: "{{ hostvars.manager.private_ip }}"
|
||||
node5:
|
||||
name: node-5
|
||||
ip: "{{ hostvars.worker.private_ip }}"
|
||||
node6:
|
||||
name: node-6
|
||||
ip: "{{ hostvars.kibana.private_ip }}"
|
||||
```
|
||||
|
||||
2. Copy the `Pipfile` and the `molecule` folder into the root wazuh-ansible directory:
|
||||
### Inventory file
|
||||
|
||||
```
|
||||
cp wazuh-qa/ansible/wazuh-ansible/* . -R
|
||||
- The `ansible_host` variable should contain the `address/FQDN` used to gather facts and provision each node.
|
||||
- The `private_ip` variable should contain the `address/FQDN` used for the internal cluster communications.
|
||||
- Whether the environment is located in a local subnet, `ansible_host` and `private_ip` variables should match.
|
||||
- The ssh credentials used by Ansible during the provision can be specified in this file too. Another option is including them directly on the playbook.
|
||||
|
||||
```ini
|
||||
es1 ansible_host=<es1_ec2_public_ip> private_ip=<es1_ec2_private_ip> elasticsearch_node_name=node-1
|
||||
es2 ansible_host=<es2_ec2_public_ip> private_ip=<es2_ec2_private_ip> elasticsearch_node_name=node-2
|
||||
es3 ansible_host=<es3_ec2_public_ip> private_ip=<es3_ec2_private_ip> elasticsearch_node_name=node-3
|
||||
kibana ansible_host=<kibana_node_public_ip> private_ip=<kibana_ec2_private_ip>
|
||||
manager ansible_host=<manager_node_public_ip> private_ip=<manager_ec2_private_ip>
|
||||
worker ansible_host=<worker_node_public_ip> private_ip=<worker_ec2_private_ip>
|
||||
|
||||
[odfe_cluster]
|
||||
es1
|
||||
es2
|
||||
es3
|
||||
|
||||
[all:vars]
|
||||
ansible_ssh_user=vagrant
|
||||
ansible_ssh_private_key_file=/path/to/ssh/key.pem
|
||||
ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
|
||||
```
|
||||
|
||||
3. Follow these steps for launching the tests. Check the Pipfile for running different scenarios:
|
||||
### Launching the playbook
|
||||
|
||||
```bash
|
||||
ansible-playbook wazuh-odfe-production-ready.yml -i inventory
|
||||
```
|
||||
pip install pipenv
|
||||
sudo pipenv install
|
||||
pipenv run test
|
||||
pipenv run agent
|
||||
|
||||
After the playbook execution, the Wazuh UI should be reachable through `https://<kibana_host>:5601`
|
||||
|
||||
## Example: single-host environment
|
||||
|
||||
### Playbook
|
||||
The hereunder example playbook uses the `wazuh-ansible` role to provision a single-host Wazuh environment. This architecture includes all the Wazuh and ODFE components in a single node.
|
||||
|
||||
```yaml
|
||||
---
|
||||
# Single node
|
||||
- hosts: server
|
||||
become: yes
|
||||
become_user: root
|
||||
roles:
|
||||
- role: ../roles/opendistro/opendistro-elasticsearch
|
||||
- role: "../roles/wazuh/ansible-wazuh-manager"
|
||||
- role: "../roles/wazuh/ansible-filebeat-oss"
|
||||
- role: "../roles/opendistro/opendistro-kibana"
|
||||
vars:
|
||||
single_node: true
|
||||
minimum_master_nodes: 1
|
||||
elasticsearch_node_master: true
|
||||
elasticsearch_network_host: <your server host>
|
||||
filebeat_node_name: node-1
|
||||
filebeat_output_elasticsearch_hosts: <your server host>
|
||||
ansible_ssh_user: vagrant
|
||||
ansible_ssh_private_key_file: /path/to/ssh/key.pem
|
||||
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: <your server host>
|
||||
```
|
||||
|
||||
### Inventory file
|
||||
|
||||
```ini
|
||||
[server]
|
||||
<your server host>
|
||||
|
||||
[all:vars]
|
||||
ansible_ssh_user=vagrant
|
||||
ansible_ssh_private_key_file=/path/to/ssh/key.pem
|
||||
ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
|
||||
```
|
||||
|
||||
### Launching the playbook
|
||||
|
||||
```bash
|
||||
ansible-playbook wazuh-odfe-single.yml -i inventory
|
||||
```
|
||||
|
||||
After the playbook execution, the Wazuh UI should be reachable through `https://<your server host>:5601`
|
||||
|
||||
## Contribute
|
||||
|
||||
If you want to contribute to our repository, please fork our Github repository and submit a pull request.
|
||||
@ -88,7 +350,7 @@ https://github.com/dj-wasabi/ansible-ossec-server
|
||||
## License and copyright
|
||||
|
||||
WAZUH
|
||||
Copyright (C) 2016-2018 Wazuh Inc. (License GPLv2)
|
||||
Copyright (C) 2016-2020 Wazuh Inc. (License GPLv2)
|
||||
|
||||
## Web references
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import pytest
|
||||
import testinfra.utils.ansible_runner
|
||||
import re
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
@ -10,7 +11,7 @@ def get_wazuh_version():
|
||||
return "4.0.0"
|
||||
|
||||
def test_wazuh_packages_are_installed(host):
|
||||
"""Test if the main packages are installed."""
|
||||
"""Test the main packages are installed."""
|
||||
manager = host.package("wazuh-manager")
|
||||
api = host.package("wazuh-api")
|
||||
assert manager.is_installed
|
||||
@ -19,15 +20,27 @@ def test_wazuh_packages_are_installed(host):
|
||||
assert api.version.startswith(get_wazuh_version())
|
||||
|
||||
def test_wazuh_services_are_running(host):
|
||||
"""Test if the services are enabled and running.
|
||||
"""Test the services are enabled and running.
|
||||
|
||||
When assert commands are commented, this means that the service command has
|
||||
a wrong exit code: https://github.com/wazuh/wazuh-ansible/issues/107
|
||||
"""
|
||||
manager = host.service("wazuh-manager")
|
||||
api = host.service("wazuh-api")
|
||||
assert manager.is_running
|
||||
assert api.is_running
|
||||
# This currently doesn't work with out current Docker base images
|
||||
# manager = host.service("wazuh-manager")
|
||||
# api = host.service("wazuh-api")
|
||||
# assert manager.is_running
|
||||
# assert api.is_running
|
||||
output = host.check_output('ps aux | grep ossec | tr -s " " | cut -d" " -f11')
|
||||
assert 'ossec-authd' in output
|
||||
assert 'wazuh-modulesd' in output
|
||||
assert 'wazuh-db' in output
|
||||
assert 'ossec-execd' in output
|
||||
assert 'ossec-monitord' in output
|
||||
assert 'ossec-remoted' in output
|
||||
assert 'ossec-logcollector' in output
|
||||
assert 'ossec-analysisd' in output
|
||||
assert 'ossec-syscheckd' in output
|
||||
|
||||
|
||||
@pytest.mark.parametrize("wazuh_file, wazuh_owner, wazuh_group, wazuh_mode", [
|
||||
("/var/ossec/etc/sslmanager.cert", "root", "root", 0o640),
|
||||
@ -37,14 +50,14 @@ def test_wazuh_services_are_running(host):
|
||||
])
|
||||
|
||||
def test_wazuh_files(host, wazuh_file, wazuh_owner, wazuh_group, wazuh_mode):
|
||||
"""Test if Wazuh related files exist and have proper owners and mode."""
|
||||
"""Test Wazuh related files exist and have proper owners and mode."""
|
||||
wazuh_file_host = host.file(wazuh_file)
|
||||
assert wazuh_file_host.user == wazuh_owner
|
||||
assert wazuh_file_host.group == wazuh_group
|
||||
assert wazuh_file_host.mode == wazuh_mode
|
||||
|
||||
def test_filebeat_is_installed(host):
|
||||
"""Test if the elasticsearch package is installed."""
|
||||
"""Test the elasticsearch package is installed."""
|
||||
filebeat = host.package("filebeat")
|
||||
assert filebeat.is_installed
|
||||
assert filebeat.version.startswith('7.9.1')
|
||||
|
||||
184
playbooks/wazuh-odfe-production-ready.yml
Normal file
184
playbooks/wazuh-odfe-production-ready.yml
Normal file
@ -0,0 +1,184 @@
|
||||
---
|
||||
# Certificates generation
|
||||
- hosts: es1
|
||||
roles:
|
||||
- role: ../roles/opendistro/opendistro-elasticsearch
|
||||
elasticsearch_network_host: "{{ private_ip }}"
|
||||
elasticsearch_cluster_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_discovery_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
perform_installation: false
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
elasticsearch_node_master: true
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
|
||||
node2:
|
||||
name: node-2
|
||||
ip: "{{ hostvars.es2.private_ip }}"
|
||||
node3:
|
||||
name: node-3
|
||||
ip: "{{ hostvars.es3.private_ip }}"
|
||||
node4:
|
||||
name: node-4
|
||||
ip: "{{ hostvars.manager.private_ip }}"
|
||||
node5:
|
||||
name: node-5
|
||||
ip: "{{ hostvars.worker.private_ip }}"
|
||||
node6:
|
||||
name: node-6
|
||||
ip: "{{ hostvars.kibana.private_ip }}"
|
||||
tags:
|
||||
- generate-certs
|
||||
|
||||
#ODFE Cluster
|
||||
- hosts: odfe_cluster
|
||||
strategy: free
|
||||
roles:
|
||||
- role: ../roles/opendistro/opendistro-elasticsearch
|
||||
elasticsearch_network_host: "{{ private_ip }}"
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
elasticsearch_cluster_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_discovery_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_node_master: true
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
|
||||
node2:
|
||||
name: node-2
|
||||
ip: "{{ hostvars.es2.private_ip }}"
|
||||
node3:
|
||||
name: node-3
|
||||
ip: "{{ hostvars.es3.private_ip }}"
|
||||
node4:
|
||||
name: node-4
|
||||
ip: "{{ hostvars.manager.private_ip }}"
|
||||
node5:
|
||||
name: node-5
|
||||
ip: "{{ hostvars.worker.private_ip }}"
|
||||
node6:
|
||||
name: node-6
|
||||
ip: "{{ hostvars.kibana.private_ip }}"
|
||||
|
||||
#Wazuh cluster
|
||||
- hosts: manager
|
||||
roles:
|
||||
- role: "../roles/wazuh/ansible-wazuh-manager"
|
||||
- role: "../roles/wazuh/ansible-filebeat-oss"
|
||||
filebeat_node_name: node-4
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
wazuh_manager_config:
|
||||
connection:
|
||||
- type: 'secure'
|
||||
port: '1514'
|
||||
protocol: 'tcp'
|
||||
queue_size: 131072
|
||||
api:
|
||||
https: 'yes'
|
||||
cluster:
|
||||
disable: 'no'
|
||||
node_name: 'master'
|
||||
node_type: 'master'
|
||||
nodes:
|
||||
- '"{{ hostvars.manager.private_ip }}"'
|
||||
hidden: 'no'
|
||||
filebeat_output_elasticsearch_hosts:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
|
||||
- hosts: worker
|
||||
roles:
|
||||
- role: "../roles/wazuh/ansible-wazuh-manager"
|
||||
- role: "../roles/wazuh/ansible-filebeat-oss"
|
||||
filebeat_node_name: node-5
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
wazuh_manager_config:
|
||||
connection:
|
||||
- type: 'secure'
|
||||
port: '1514'
|
||||
protocol: 'tcp'
|
||||
queue_size: 131072
|
||||
api:
|
||||
https: 'yes'
|
||||
cluster:
|
||||
disable: 'no'
|
||||
node_name: 'worker_01'
|
||||
node_type: 'worker'
|
||||
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
|
||||
nodes:
|
||||
- '"{{ hostvars.manager.private_ip }}"'
|
||||
hidden: 'no'
|
||||
filebeat_output_elasticsearch_hosts:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
|
||||
#ODFE+Kibana node
|
||||
- hosts: kibana
|
||||
roles:
|
||||
- role: "../roles/opendistro/opendistro-elasticsearch"
|
||||
- role: "../roles/opendistro/opendistro-kibana"
|
||||
become: yes
|
||||
become_user: root
|
||||
vars:
|
||||
elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}"
|
||||
elasticsearch_node_name: node-6
|
||||
elasticsearch_node_master: false
|
||||
elasticsearch_node_ingest: false
|
||||
elasticsearch_node_data: false
|
||||
elasticsearch_cluster_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
elasticsearch_discovery_nodes:
|
||||
- "{{ hostvars.es1.private_ip }}"
|
||||
- "{{ hostvars.es2.private_ip }}"
|
||||
- "{{ hostvars.es3.private_ip }}"
|
||||
kibana_node_name: node-6
|
||||
wazuh_api_credentials:
|
||||
- id: default
|
||||
url: https://{{ hostvars.manager.private_ip }}
|
||||
port: 55000
|
||||
user: foo
|
||||
password: bar
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
|
||||
node2:
|
||||
name: node-2
|
||||
ip: "{{ hostvars.es2.private_ip }}"
|
||||
node3:
|
||||
name: node-3
|
||||
ip: "{{ hostvars.es3.private_ip }}"
|
||||
node4:
|
||||
name: node-4
|
||||
ip: "{{ hostvars.manager.private_ip }}"
|
||||
node5:
|
||||
name: node-5
|
||||
ip: "{{ hostvars.worker.private_ip }}"
|
||||
node6:
|
||||
name: node-6
|
||||
ip: "{{ hostvars.kibana.private_ip }}"
|
||||
21
playbooks/wazuh-odfe-single.yml
Normal file
21
playbooks/wazuh-odfe-single.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
# Single node
|
||||
- hosts: <your server host>
|
||||
become: yes
|
||||
become_user: root
|
||||
roles:
|
||||
- role: ../roles/opendistro/opendistro-elasticsearch
|
||||
- role: ../roles/wazuh/ansible-wazuh-manager
|
||||
- role: ../roles/wazuh/ansible-filebeat-oss
|
||||
- role: ../roles/opendistro/opendistro-kibana
|
||||
vars:
|
||||
single_node: true
|
||||
minimum_master_nodes: 1
|
||||
elasticsearch_node_master: true
|
||||
elasticsearch_network_host: <your server host>
|
||||
filebeat_node_name: node-1
|
||||
filebeat_output_elasticsearch_hosts: <your server host>
|
||||
instances:
|
||||
node1:
|
||||
name: node-1 # Important: must be equal to elasticsearch_node_name.
|
||||
ip: <your server host>
|
||||
@ -4,7 +4,7 @@ elasticsearch_http_port: 9200
|
||||
elasticsearch_network_host: 127.0.0.1
|
||||
elasticsearch_reachable_host: 127.0.0.1
|
||||
elasticsearch_jvm_xms: null
|
||||
elastic_stack_version: 7.8.0
|
||||
elastic_stack_version: 7.9.1
|
||||
elasticsearch_lower_disk_requirements: false
|
||||
elasticsearch_path_repo: []
|
||||
|
||||
|
||||
2
roles/elastic-stack/ansible-elasticsearch/tasks/main.yml
Normal file → Executable file
2
roles/elastic-stack/ansible-elasticsearch/tasks/main.yml
Normal file → Executable file
@ -128,6 +128,7 @@
|
||||
validate_certs: no
|
||||
status_code: 200,401
|
||||
return_content: yes
|
||||
force_basic_auth: yes
|
||||
timeout: 4
|
||||
register: _result
|
||||
until: ( _result.json is defined) and (_result.json.status == "green")
|
||||
@ -145,6 +146,7 @@
|
||||
password: "{{ elasticsearch_xpack_security_password }}"
|
||||
body: '{ "password" : "{{ item.value["password"] }}", "roles" : {{ item.value["roles"] }} }'
|
||||
validate_certs: no
|
||||
force_basic_auth: yes
|
||||
loop: "{{ elasticsearch_xpack_users|default({})|dict2items }}"
|
||||
register: http_response
|
||||
failed_when: http_response.status != 200
|
||||
|
||||
9
roles/elastic-stack/ansible-kibana/tasks/main.yml
Normal file → Executable file
9
roles/elastic-stack/ansible-kibana/tasks/main.yml
Normal file → Executable file
@ -98,6 +98,14 @@
|
||||
name: kibana
|
||||
state: started
|
||||
|
||||
- name: Ensuring Kibana directory owner
|
||||
file:
|
||||
path: "/usr/share/kibana"
|
||||
state: directory
|
||||
owner: kibana
|
||||
group: kibana
|
||||
recurse: yes
|
||||
|
||||
- name: Build and Install Wazuh Kibana Plugin from sources
|
||||
import_tasks: build_wazuh_plugin.yml
|
||||
when:
|
||||
@ -146,6 +154,7 @@
|
||||
password: "{{ elasticsearch_xpack_security_password }}"
|
||||
validate_certs: no
|
||||
status_code: 200, 404
|
||||
force_basic_auth: yes
|
||||
|
||||
- name: Create wazuh plugin config directory
|
||||
file:
|
||||
|
||||
@ -1,12 +1,12 @@
|
||||
---
|
||||
# Cluster Settings
|
||||
es_version: "7.3.2"
|
||||
es_version: "7.8.0"
|
||||
es_major_version: "7.x"
|
||||
|
||||
opendistro_version: 1.8.0
|
||||
opendistro_version: 1.10.1
|
||||
|
||||
elasticsearch_cluster_name: wazuh-cluster
|
||||
single_node: true
|
||||
single_node: false
|
||||
elasticsearch_node_name: node-1
|
||||
opendistro_cluster_name: wazuh
|
||||
elasticsearch_node_data: true
|
||||
elasticsearch_node_ingest: true
|
||||
@ -56,7 +56,7 @@ opendistro_http_port: 9200
|
||||
certs_gen_tool_version: 1.8
|
||||
|
||||
# Url of Search Guard certificates generator tool
|
||||
certs_gen_tool_url: "https://maven.search-guard.com/search-guard-tlstool/{{ certs_gen_tool_version }}/search-guard-tlstool-{{ certs_gen_tool_version }}.zip"
|
||||
certs_gen_tool_url: "https://search.maven.org/remotecontent?filepath=com/floragunn/search-guard-tlstool/{{ certs_gen_tool_version }}/search-guard-tlstool-{{ certs_gen_tool_version }}.zip"
|
||||
|
||||
elasticrepo:
|
||||
apt: 'https://artifacts.elastic.co/packages/7.x/apt'
|
||||
|
||||
@ -70,6 +70,7 @@
|
||||
tags: debug
|
||||
when:
|
||||
- hostvars[inventory_hostname]['private_ip'] is not defined or not hostvars[inventory_hostname]['private_ip']
|
||||
- single_node == false
|
||||
|
||||
- name: Wait for Elasticsearch API (Private IP)
|
||||
uri:
|
||||
@ -87,6 +88,7 @@
|
||||
tags: debug
|
||||
when:
|
||||
- hostvars[inventory_hostname]['private_ip'] is defined and hostvars[inventory_hostname]['private_ip']
|
||||
- single_node == false
|
||||
|
||||
- import_tasks: "RMRedHat.yml"
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
cluster.name: {{ elasticsearch_cluster_name }}
|
||||
cluster.name: {{ opendistro_cluster_name }}
|
||||
node.name: {{ elasticsearch_node_name }}
|
||||
path.data: /var/lib/elasticsearch
|
||||
path.logs: /var/log/elasticsearch
|
||||
@ -6,6 +6,9 @@ network.host: {{ elasticsearch_network_host }}
|
||||
|
||||
node.master: {{ elasticsearch_node_master|lower }}
|
||||
|
||||
{% if single_node == true %}
|
||||
discovery.type: single-node
|
||||
{% else %}
|
||||
cluster.initial_master_nodes:
|
||||
{% for item in elasticsearch_cluster_nodes %}
|
||||
- {{ item }}
|
||||
@ -15,6 +18,7 @@ discovery.seed_hosts:
|
||||
{% for item in elasticsearch_discovery_nodes %}
|
||||
- {{ item }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if elasticsearch_node_data|lower == 'false' %}
|
||||
node.data: false
|
||||
|
||||
@ -6,7 +6,6 @@ elasticsearch_nodes: |-
|
||||
{% for item in groups['es_cluster'] -%}
|
||||
{{ hostvars[item]['ip'] }}{% if not loop.last %}","{% endif %}
|
||||
{%- endfor %}
|
||||
elasticsearch_network_host: 172.16.0.161
|
||||
elastic_api_protocol: https
|
||||
kibana_conf_path: /etc/kibana
|
||||
kibana_node_name: node-1
|
||||
|
||||
@ -41,6 +41,14 @@
|
||||
- install
|
||||
- configure
|
||||
|
||||
- name: Ensuring Kibana directory owner
|
||||
file:
|
||||
path: "/usr/share/kibana"
|
||||
state: directory
|
||||
owner: kibana
|
||||
group: kibana
|
||||
recurse: yes
|
||||
|
||||
- name: Build and Install Wazuh Kibana Plugin from sources
|
||||
import_tasks: build_wazuh_plugin.yml
|
||||
when:
|
||||
|
||||
@ -237,16 +237,6 @@ wazuh_agent_config:
|
||||
java_path_win: '\\server\jre\bin\java.exe'
|
||||
ciscat_path: 'wodles/ciscat'
|
||||
ciscat_path_win: 'C:\cis-cat'
|
||||
vuls:
|
||||
disable: 'yes'
|
||||
interval: '1d'
|
||||
run_on_start: 'yes'
|
||||
args:
|
||||
- 'mincvss 5'
|
||||
- 'antiquity-limit 20'
|
||||
- 'updatenvd'
|
||||
- 'nvd-year 2016'
|
||||
- 'autoupdate'
|
||||
localfiles:
|
||||
debian:
|
||||
- format: 'syslog'
|
||||
|
||||
@ -194,19 +194,6 @@
|
||||
- config
|
||||
- api
|
||||
|
||||
- name: Linux | Vuls integration deploy (runs in background, can take a while)
|
||||
command: /var/ossec/wodles/vuls/deploy_vuls.sh {{ ansible_distribution|lower }} {{ ansible_distribution_major_version|int }}
|
||||
args:
|
||||
creates: /var/ossec/wodles/vuls/config.toml
|
||||
async: 3600
|
||||
poll: 0
|
||||
when:
|
||||
- wazuh_agent_config.vuls.disable != 'yes'
|
||||
- ansible_distribution in ['Redhat', 'CentOS', 'Ubuntu', 'Debian', 'Oracle']
|
||||
- not ansible_check_mode
|
||||
tags:
|
||||
- init
|
||||
|
||||
- name: Linux | Installing agent configuration (ossec.conf)
|
||||
template: src=var-ossec-etc-ossec-agent.conf.j2
|
||||
dest=/var/ossec/etc/ossec.conf
|
||||
|
||||
@ -339,18 +339,6 @@
|
||||
</syscheck>
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if ansible_system == "Linux" and wazuh_agent_config.vuls.disable == 'no' %}
|
||||
<wodle name="command">
|
||||
<disabled>no</disabled>
|
||||
<tag>Wazuh-VULS</tag>
|
||||
<command>/usr/bin/python /var/ossec/wodles/vuls/vuls.py{% for arg in wazuh_agent_config.vuls.args %} --{{ arg }}{% endfor %}</command>
|
||||
<interval>{{ wazuh_agent_config.vuls.interval }}</interval>
|
||||
<ignore_output>yes</ignore_output>
|
||||
<run_on_start>{{ wazuh_agent_config.vuls.run_on_start }}</run_on_start>
|
||||
</wodle>
|
||||
{% endif %}
|
||||
|
||||
<!-- Files to monitor (localfiles) -->
|
||||
{% if ansible_system == "Linux" %}
|
||||
{% for localfile in wazuh_agent_config.localfiles.linux %}
|
||||
|
||||
@ -244,16 +244,6 @@ wazuh_manager_config:
|
||||
update_from_year: '2010'
|
||||
update_interval: '1h'
|
||||
name: '"nvd"'
|
||||
vuls:
|
||||
disable: 'yes'
|
||||
interval: '1d'
|
||||
run_on_start: 'yes'
|
||||
args:
|
||||
- 'mincvss 5'
|
||||
- 'antiquity-limit 20'
|
||||
- 'updatenvd'
|
||||
- 'nvd-year 2016'
|
||||
- 'autoupdate'
|
||||
log_level: 3
|
||||
email_level: 12
|
||||
localfiles:
|
||||
|
||||
@ -250,19 +250,6 @@
|
||||
- init
|
||||
- config
|
||||
|
||||
- name: Linux | Vuls integration deploy (runs in background, can take a while)
|
||||
command: /var/ossec/wodles/vuls/deploy_vuls.sh {{ ansible_distribution|lower }} {{ ansible_distribution_major_version|int }}
|
||||
args:
|
||||
creates: /var/ossec/wodles/vuls/config.toml
|
||||
async: 3600
|
||||
poll: 0
|
||||
when:
|
||||
- wazuh_manager_config.vuls.disable != 'yes'
|
||||
- ansible_distribution in ['Redhat', 'CentOS', 'Ubuntu', 'Debian', 'Oracle', 'Amazon']
|
||||
- not ansible_check_mode
|
||||
tags:
|
||||
- init
|
||||
|
||||
- name: Configure ossec.conf
|
||||
template: src=var-ossec-etc-ossec-server.conf.j2
|
||||
dest=/var/ossec/etc/ossec.conf
|
||||
|
||||
@ -374,17 +374,6 @@
|
||||
</command>
|
||||
{% endfor %}
|
||||
|
||||
{% if ansible_system == "Linux" and wazuh_manager_config.vuls.disable == 'no' %}
|
||||
<wodle name="command">
|
||||
<disabled>no</disabled>
|
||||
<tag>Wazuh-VULS</tag>
|
||||
<command>/usr/bin/python /var/ossec/wodles/vuls/vuls.py{% for arg in wazuh_manager_config.vuls.args %} --{{ arg }}{% endfor %}</command>
|
||||
<interval>{{ wazuh_manager_config.vuls.interval }}</interval>
|
||||
<ignore_output>yes</ignore_output>
|
||||
<run_on_start>{{ wazuh_manager_config.vuls.run_on_start }}</run_on_start>
|
||||
</wodle>
|
||||
{% endif -%}
|
||||
|
||||
{% if agentless_creds is defined %}
|
||||
{% for agentless in agentless_creds %}
|
||||
<agentless>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user