Updat README

This commit is contained in:
zenidd 2020-10-15 16:44:54 +02:00
parent 32d38d70bb
commit 25b2a8a946

487
README.md
View File

@ -48,18 +48,63 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack.
│ ├── VERSION │ ├── VERSION
│ ├── CHANGELOG.md │ ├── CHANGELOG.md
## Example custom deploy: Wazuh cluster, ODFE cluster, Kibana
## Example: production-ready distributed environment
### Playbook ### Playbook
The hereunder example playbook deploys a complete Wazuh distributed architecture with two Wazuh nodes (master+worker), 3 ODFE nodes and a mixed ODFE and Kibana node. The hereunder example playbook uses the `wazuh-ansible` role to provision a production-ready Wazuh environment. The architecture includes 2 Wazuh nodes, 3 ODFE nodes and a mixed ODFE-Kibana node.
```yaml ```yaml
--- ---
# Certificates generation # Certificates generation
- hosts: es1 - hosts: es1
roles: roles:
- role: ../roles/opendistro/opendistro-elasticsearch - role: ../roles/opendistro/opendistro-elasticsearch
elasticsearch_network_host: "{{ private_ip }}" elasticsearch_network_host: "{{ private_ip }}"
elasticsearch_cluster_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
elasticsearch_discovery_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
perform_installation: false
become: yes
become_user: root
vars:
elasticsearch_node_master: true
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: "{{ hostvars.es2.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
tags:
- generate-certs
#ODFE Cluster
- hosts: odfe_cluster
strategy: free
roles:
- role: ../roles/opendistro/opendistro-elasticsearch
elasticsearch_network_host: "{{ private_ip }}"
become: yes
become_user: root
vars:
elasticsearch_cluster_nodes: elasticsearch_cluster_nodes:
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.es2.private_ip }}"
@ -68,305 +113,183 @@ The hereunder example playbook deploys a complete Wazuh distributed architecture
- "{{ hostvars.es1.private_ip }}" - "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.es3.private_ip }}"
perform_installation: false elasticsearch_node_master: true
become: yes instances:
become_user: root node1:
vars: name: node-1 # Important: must be equal to elasticsearch_node_name.
opendistro_standalone_installation: false ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
ansible_ssh_user: centos node2:
ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem name: node-2
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' ip: "{{ hostvars.es2.private_ip }}"
elasticsearch_node_master: true node3:
elasticsearch_cluster_name: wazuh name: node-3
opendistro_version: 1.10.1 ip: "{{ hostvars.es3.private_ip }}"
opendistro_admin_password: T3stP4ssw0rd node4:
certs_gen_tool_url: https://wazuh-demo.s3-us-west-1.amazonaws.com/search-guard-tlstool-1.7.zip name: node-4
instances: ip: "{{ hostvars.manager.private_ip }}"
node1: node5:
name: node-1 # Important: must be equal to elasticsearch_node_name. name: node-5
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. ip: "{{ hostvars.worker.private_ip }}"
node2: node6:
name: node-2 name: node-6
ip: "{{ hostvars.es2.private_ip }}" ip: "{{ hostvars.kibana.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
tags:
- generate-certs
#ODFE Cluster #Wazuh cluster
- hosts: odfe_cluster - hosts: manager
strategy: free roles:
roles: - role: "../roles/wazuh/ansible-wazuh-manager"
- role: ../roles/opendistro/opendistro-elasticsearch - role: "../roles/wazuh/ansible-filebeat-oss"
elasticsearch_network_host: "{{ private_ip }}" filebeat_node_name: node-4
become: yes become: yes
become_user: root become_user: root
vars: vars:
elasticsearch_cluster_nodes: wazuh_manager_config:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
elasticsearch_discovery_nodes:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
opendistro_standalone_installation: false
ansible_ssh_user: centos
ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
elasticsearch_node_master: true
elasticsearch_cluster_name: wazuh
opendistro_version: 1.10.1
opendistro_admin_password: T3stP4ssw0rd
opendistro_custom_user_role: admin
certs_gen_tool_url: https://wazuh-demo.s3-us-west-1.amazonaws.com/search-guard-tlstool-1.7.zip
instances:
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: "{{ hostvars.es2.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
#Wazuh cluster
- hosts: manager
roles:
- role: "../roles/wazuh/ansible-wazuh-manager"
- role: "../roles/wazuh/ansible-filebeat-oss"
filebeat_node_name: node-4
become: yes
become_user: root
vars:
ansible_ssh_user: "centos"
ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
wazuh_manager_version: 3.13.2
wazuh_manager_config:
connection: connection:
- type: 'secure' - type: 'secure'
port: '1514' port: '1514'
protocol: 'tcp' protocol: 'tcp'
queue_size: 131072 queue_size: 131072
api: api:
port: "55000" https: 'yes'
https: 'yes'
cluster: cluster:
disable: 'no' disable: 'no'
name: 'wazuh' node_name: 'master'
node_name: 'master' node_type: 'master'
node_type: 'master' nodes:
key: 'c98b62a9b6169ac5f67dae55ae4a9088' - '"{{ hostvars.manager.private_ip }}"'
port: '1516' hidden: 'no'
bind_addr: '0.0.0.0' filebeat_output_elasticsearch_hosts:
nodes: - "{{ hostvars.es1.private_ip }}"
- '"{{ hostvars.manager.private_ip }}"' - "{{ hostvars.es2.private_ip }}"
hidden: 'no' - "{{ hostvars.es3.private_ip }}"
filebeat_version: 7.9.1
filebeat_security: true
elasticsearch_security_user: wazuh
elasticsearch_security_password: T3stP4ssw0rd
filebeat_output_elasticsearch_hosts:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
- hosts: worker - hosts: worker
roles: roles:
- role: "../roles/wazuh/ansible-wazuh-manager" - role: "../roles/wazuh/ansible-wazuh-manager"
- role: "../roles/wazuh/ansible-filebeat-oss" - role: "../roles/wazuh/ansible-filebeat-oss"
filebeat_node_name: node-5 filebeat_node_name: node-5
become: yes become: yes
become_user: root become_user: root
vars: vars:
wazuh_manager_config: wazuh_manager_config:
authd: connection:
enable: false - type: 'secure'
port: 1515 port: '1514'
use_source_ip: 'no' protocol: 'tcp'
force_insert: 'yes' queue_size: 131072
force_time: 0 api:
purge: 'yes' https: 'yes'
use_password: 'no' cluster:
limit_maxagents: 'yes' disable: 'no'
ciphers: 'HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH' node_name: 'worker_01'
ssl_agent_ca: null node_type: 'worker'
ssl_verify_host: 'no' key: 'c98b62a9b6169ac5f67dae55ae4a9088'
ssl_manager_cert: 'sslmanager.cert' nodes:
ssl_manager_key: 'sslmanager.key' - '"{{ hostvars.manager.private_ip }}"'
ssl_auto_negotiate: 'no' hidden: 'no'
connection: filebeat_output_elasticsearch_hosts:
- type: 'secure' - "{{ hostvars.es1.private_ip }}"
port: '1514' - "{{ hostvars.es2.private_ip }}"
protocol: 'tcp' - "{{ hostvars.es3.private_ip }}"
queue_size: 131072
api:
port: "55000"
https: 'yes'
cluster:
disable: 'no'
name: 'wazuh'
node_name: 'worker_01'
node_type: 'worker'
key: 'c98b62a9b6169ac5f67dae55ae4a9088'
port: '1516'
bind_addr: '0.0.0.0'
nodes:
- '"{{ hostvars.manager.private_ip }}"'
hidden: 'no'
ansible_ssh_user: centos
ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
wazuh_manager_version: 3.13.2
filebeat_version: 7.9.1
filebeat_security: true
elasticsearch_security_user: wazuh
elasticsearch_security_password: T3stP4ssw0rd
filebeat_output_elasticsearch_hosts:
- "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es2.private_ip }}"
- "{{ hostvars.es3.private_ip }}"
#ODFE+Kibana node #ODFE+Kibana node
- hosts: kibana - hosts: kibana
roles: roles:
- role: "../roles/opendistro/opendistro-elasticsearch" - role: "../roles/opendistro/opendistro-elasticsearch"
- role: "../roles/opendistro/opendistro-kibana" - role: "../roles/opendistro/opendistro-kibana"
become: yes become: yes
become_user: root become_user: root
vars: vars:
elasticsearch_jvm_xms: 2560 elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}"
elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}" elasticsearch_node_name: node-6
elasticsearch_node_name: node-6 elasticsearch_node_master: false
opendistro_kibana_user: wazuh elasticsearch_node_ingest: false
opendistro_kibana_password: T3stP4ssw0rd elasticsearch_node_data: false
elasticsearch_node_master: false elasticsearch_cluster_nodes:
elasticsearch_node_ingest: false - "{{ hostvars.es1.private_ip }}"
elasticsearch_node_data: false - "{{ hostvars.es2.private_ip }}"
elasticsearch_cluster_nodes: - "{{ hostvars.es3.private_ip }}"
- "{{ hostvars.es1.private_ip }}" elasticsearch_discovery_nodes:
- "{{ hostvars.es2.private_ip }}" - "{{ hostvars.es1.private_ip }}"
- "{{ hostvars.es3.private_ip }}" - "{{ hostvars.es2.private_ip }}"
elasticsearch_discovery_nodes: - "{{ hostvars.es3.private_ip }}"
- "{{ hostvars.es1.private_ip }}" kibana_node_name: node-6
- "{{ hostvars.es2.private_ip }}" wazuh_api_credentials:
- "{{ hostvars.es3.private_ip }}" - id: default
kibana_node_name: node-6 url: https://{{ hostvars.manager.private_ip }}
opendistro_standalone_installation: false port: 55000
ansible_ssh_user: centos user: foo
ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem password: bar
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' instances:
wazuh_version: 3.13.2 node1:
elastic_stack_version: 7.9.1 name: node-1 # Important: must be equal to elasticsearch_node_name.
opendistro_version: 1.10.1 ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
kibana_opendistro_version: -1.10.1-1 node2:
elasticsearch_cluster_name: wazuh name: node-2
kibana_opendistro_security: true ip: "{{ hostvars.es2.private_ip }}"
opendistro_admin_password: T3stP4ssw0rd node3:
opendistro_custom_user: wazuh name: node-3
opendistro_custom_user_role: admin ip: "{{ hostvars.es3.private_ip }}"
node_options: "--max-old-space-size=2048" node4:
certs_gen_tool_url: https://wazuh-demo.s3-us-west-1.amazonaws.com/search-guard-tlstool-1.7.zip name: node-4
wazuh_api_credentials: ip: "{{ hostvars.manager.private_ip }}"
- id: default node5:
url: https://{{ hostvars.manager.private_ip }} name: node-5
port: 55000 ip: "{{ hostvars.worker.private_ip }}"
user: foo node6:
password: bar name: node-6
instances: ip: "{{ hostvars.kibana.private_ip }}"
node1:
name: node-1 # Important: must be equal to elasticsearch_node_name.
ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert.
node2:
name: node-2
ip: "{{ hostvars.es2.private_ip }}"
node3:
name: node-3
ip: "{{ hostvars.es3.private_ip }}"
node4:
name: node-4
ip: "{{ hostvars.manager.private_ip }}"
node5:
name: node-5
ip: "{{ hostvars.worker.private_ip }}"
node6:
name: node-6
ip: "{{ hostvars.kibana.private_ip }}"
``` ```
### Inventory file
### Example inventory file The inventory file sets the public and private address of each node. The public addresses are used to gather facts and provision the instances while the private addresses are used for the cluster communications.
The ssh credentials used by Ansible during the provision can be specified in this file too. Another option is including them directly on the playbook.
```ini ```ini
es1 ansible_host=<es1_ec2_public_ip> private_ip=<es1_ec2_private_ip> elasticsearch_node_name=node-1 es1 ansible_host=<es1_ec2_public_ip> private_ip=<es1_ec2_private_ip> elasticsearch_node_name=node-1
es2 ansible_host=<es2_ec2_public_ip> private_ip=<es2_ec2_private_ip> elasticsearch_node_name=node-2 es2 ansible_host=<es2_ec2_public_ip> private_ip=<es2_ec2_private_ip> elasticsearch_node_name=node-2
es3 ansible_host=<es3_ec2_public_ip> private_ip=<es3_ec2_private_ip> elasticsearch_node_name=node-3 opendistro_custom_user=wazuh es3 ansible_host=<es3_ec2_public_ip> private_ip=<es3_ec2_private_ip> elasticsearch_node_name=node-3
kibana ansible_host=<kibana_node_public_ip> private_ip=<kibana_ec2_private_ip> kibana ansible_host=<kibana_node_public_ip> private_ip=<kibana_ec2_private_ip>
manager ansible_host=<manager_node_public_ip> private_ip=<manager_ec2_private_ip> manager ansible_host=<manager_node_public_ip> private_ip=<manager_ec2_private_ip>
worker ansible_host=<worker_node_public_ip> private_ip=<worker_ec2_private_ip> worker ansible_host=<worker_node_public_ip> private_ip=<worker_ec2_private_ip>
[odfe_cluster] [odfe_cluster]
es1 es1
es2 es2
es3 es3
[wui]
kibana [all:vars]
[managers] ansible_ssh_user=vagrant
manager ansible_ssh_private_key_file=/path/to/ssh/key.pem
worker ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
```
### Launching playbook
```bash
ansible-playbook wazuh-odfe-production-ready.yml -i inventory
```
## Example: single-host environment
### Playbook
The hereunder example playbook uses the `wazuh-ansible` role to provision a single-host Wazuh environment. This architecture includes all the Wazuh and ODFE components in a single node.
```yaml
```
### Launching playbook
```bash
ansible-playbook wazuh-odfe-single.yml -i inventory
``` ```
## Branches ## Branches
* `master` branch on correspond to the last Wazuh-Ansible stable version.
* `stable` branch on correspond to the last Wazuh-Ansible stable version.
* `master` branch contains the latest code, be aware of possible bugs on this branch.
## Testing
1. Get the `wazuh-ansible` folder from the `wazuh-qa` [repository](https://github.com/wazuh/wazuh-qa/tree/master/ansible/wazuh-ansible).
```
git clone https://github.com/wazuh/wazuh-qa
```
2. Copy the `Pipfile` and the `molecule` folder into the root wazuh-ansible directory:
```
cp wazuh-qa/ansible/wazuh-ansible/* . -R
```
3. Follow these steps for launching the tests. Check the Pipfile for running different scenarios:
```
pip install pipenv
sudo pipenv install
pipenv run test
pipenv run agent
```
## Contribute ## Contribute
@ -387,7 +310,7 @@ https://github.com/dj-wasabi/ansible-ossec-server
## License and copyright ## License and copyright
WAZUH WAZUH
Copyright (C) 2016-2018 Wazuh Inc. (License GPLv2) Copyright (C) 2016-2020 Wazuh Inc. (License GPLv2)
## Web references ## Web references