From 25b2a8a946ae69f5111f17a6b8c0455ffa3b76fb Mon Sep 17 00:00:00 2001 From: zenidd Date: Thu, 15 Oct 2020 16:44:54 +0200 Subject: [PATCH] Updat README --- README.md | 491 +++++++++++++++++++++++------------------------------- 1 file changed, 207 insertions(+), 284 deletions(-) diff --git a/README.md b/README.md index f591ca1a..b6755343 100644 --- a/README.md +++ b/README.md @@ -48,18 +48,63 @@ These playbooks install and configure Wazuh agent, manager and Elastic Stack. │ ├── VERSION │ ├── CHANGELOG.md -## Example custom deploy: Wazuh cluster, ODFE cluster, Kibana + +## Example: production-ready distributed environment ### Playbook -The hereunder example playbook deploys a complete Wazuh distributed architecture with two Wazuh nodes (master+worker), 3 ODFE nodes and a mixed ODFE and Kibana node. +The hereunder example playbook uses the `wazuh-ansible` role to provision a production-ready Wazuh environment. The architecture includes 2 Wazuh nodes, 3 ODFE nodes and a mixed ODFE-Kibana node. ```yaml --- # Certificates generation - - hosts: es1 - roles: - - role: ../roles/opendistro/opendistro-elasticsearch - elasticsearch_network_host: "{{ private_ip }}" + - hosts: es1 + roles: + - role: ../roles/opendistro/opendistro-elasticsearch + elasticsearch_network_host: "{{ private_ip }}" + elasticsearch_cluster_nodes: + - "{{ hostvars.es1.private_ip }}" + - "{{ hostvars.es2.private_ip }}" + - "{{ hostvars.es3.private_ip }}" + elasticsearch_discovery_nodes: + - "{{ hostvars.es1.private_ip }}" + - "{{ hostvars.es2.private_ip }}" + - "{{ hostvars.es3.private_ip }}" + perform_installation: false + become: yes + become_user: root + vars: + elasticsearch_node_master: true + instances: + node1: + name: node-1 # Important: must be equal to elasticsearch_node_name. + ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. + node2: + name: node-2 + ip: "{{ hostvars.es2.private_ip }}" + node3: + name: node-3 + ip: "{{ hostvars.es3.private_ip }}" + node4: + name: node-4 + ip: "{{ hostvars.manager.private_ip }}" + node5: + name: node-5 + ip: "{{ hostvars.worker.private_ip }}" + node6: + name: node-6 + ip: "{{ hostvars.kibana.private_ip }}" + tags: + - generate-certs + +#ODFE Cluster + - hosts: odfe_cluster + strategy: free + roles: + - role: ../roles/opendistro/opendistro-elasticsearch + elasticsearch_network_host: "{{ private_ip }}" + become: yes + become_user: root + vars: elasticsearch_cluster_nodes: - "{{ hostvars.es1.private_ip }}" - "{{ hostvars.es2.private_ip }}" @@ -68,305 +113,183 @@ The hereunder example playbook deploys a complete Wazuh distributed architecture - "{{ hostvars.es1.private_ip }}" - "{{ hostvars.es2.private_ip }}" - "{{ hostvars.es3.private_ip }}" - perform_installation: false - become: yes - become_user: root - vars: - opendistro_standalone_installation: false - ansible_ssh_user: centos - ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem - ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' - elasticsearch_node_master: true - elasticsearch_cluster_name: wazuh - opendistro_version: 1.10.1 - opendistro_admin_password: T3stP4ssw0rd - certs_gen_tool_url: https://wazuh-demo.s3-us-west-1.amazonaws.com/search-guard-tlstool-1.7.zip - instances: - node1: - name: node-1 # Important: must be equal to elasticsearch_node_name. - ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. - node2: - name: node-2 - ip: "{{ hostvars.es2.private_ip }}" - node3: - name: node-3 - ip: "{{ hostvars.es3.private_ip }}" - node4: - name: node-4 - ip: "{{ hostvars.manager.private_ip }}" - node5: - name: node-5 - ip: "{{ hostvars.worker.private_ip }}" - node6: - name: node-6 - ip: "{{ hostvars.kibana.private_ip }}" - tags: - - generate-certs + elasticsearch_node_master: true + instances: + node1: + name: node-1 # Important: must be equal to elasticsearch_node_name. + ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. + node2: + name: node-2 + ip: "{{ hostvars.es2.private_ip }}" + node3: + name: node-3 + ip: "{{ hostvars.es3.private_ip }}" + node4: + name: node-4 + ip: "{{ hostvars.manager.private_ip }}" + node5: + name: node-5 + ip: "{{ hostvars.worker.private_ip }}" + node6: + name: node-6 + ip: "{{ hostvars.kibana.private_ip }}" -#ODFE Cluster - - hosts: odfe_cluster - strategy: free - roles: - - role: ../roles/opendistro/opendistro-elasticsearch - elasticsearch_network_host: "{{ private_ip }}" - become: yes - become_user: root - vars: - elasticsearch_cluster_nodes: - - "{{ hostvars.es1.private_ip }}" - - "{{ hostvars.es2.private_ip }}" - - "{{ hostvars.es3.private_ip }}" - elasticsearch_discovery_nodes: - - "{{ hostvars.es1.private_ip }}" - - "{{ hostvars.es2.private_ip }}" - - "{{ hostvars.es3.private_ip }}" - opendistro_standalone_installation: false - ansible_ssh_user: centos - ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem - ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' - elasticsearch_node_master: true - elasticsearch_cluster_name: wazuh - opendistro_version: 1.10.1 - opendistro_admin_password: T3stP4ssw0rd - opendistro_custom_user_role: admin - certs_gen_tool_url: https://wazuh-demo.s3-us-west-1.amazonaws.com/search-guard-tlstool-1.7.zip - instances: - node1: - name: node-1 # Important: must be equal to elasticsearch_node_name. - ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. - node2: - name: node-2 - ip: "{{ hostvars.es2.private_ip }}" - node3: - name: node-3 - ip: "{{ hostvars.es3.private_ip }}" - node4: - name: node-4 - ip: "{{ hostvars.manager.private_ip }}" - node5: - name: node-5 - ip: "{{ hostvars.worker.private_ip }}" - node6: - name: node-6 - ip: "{{ hostvars.kibana.private_ip }}" - -#Wazuh cluster - - hosts: manager - roles: - - role: "../roles/wazuh/ansible-wazuh-manager" - - role: "../roles/wazuh/ansible-filebeat-oss" - filebeat_node_name: node-4 - become: yes - become_user: root - vars: - ansible_ssh_user: "centos" - ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem - ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' - wazuh_manager_version: 3.13.2 - wazuh_manager_config: + #Wazuh cluster + - hosts: manager + roles: + - role: "../roles/wazuh/ansible-wazuh-manager" + - role: "../roles/wazuh/ansible-filebeat-oss" + filebeat_node_name: node-4 + become: yes + become_user: root + vars: + wazuh_manager_config: connection: - - type: 'secure' - port: '1514' - protocol: 'tcp' - queue_size: 131072 + - type: 'secure' + port: '1514' + protocol: 'tcp' + queue_size: 131072 api: - port: "55000" - https: 'yes' + https: 'yes' cluster: - disable: 'no' - name: 'wazuh' - node_name: 'master' - node_type: 'master' - key: 'c98b62a9b6169ac5f67dae55ae4a9088' - port: '1516' - bind_addr: '0.0.0.0' - nodes: - - '"{{ hostvars.manager.private_ip }}"' - hidden: 'no' - filebeat_version: 7.9.1 - filebeat_security: true - elasticsearch_security_user: wazuh - elasticsearch_security_password: T3stP4ssw0rd - filebeat_output_elasticsearch_hosts: - - "{{ hostvars.es1.private_ip }}" - - "{{ hostvars.es2.private_ip }}" - - "{{ hostvars.es3.private_ip }}" - - - hosts: worker - roles: - - role: "../roles/wazuh/ansible-wazuh-manager" - - role: "../roles/wazuh/ansible-filebeat-oss" - filebeat_node_name: node-5 - become: yes - become_user: root - vars: - wazuh_manager_config: - authd: - enable: false - port: 1515 - use_source_ip: 'no' - force_insert: 'yes' - force_time: 0 - purge: 'yes' - use_password: 'no' - limit_maxagents: 'yes' - ciphers: 'HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH' - ssl_agent_ca: null - ssl_verify_host: 'no' - ssl_manager_cert: 'sslmanager.cert' - ssl_manager_key: 'sslmanager.key' - ssl_auto_negotiate: 'no' - connection: - - type: 'secure' - port: '1514' - protocol: 'tcp' - queue_size: 131072 - api: - port: "55000" - https: 'yes' - cluster: - disable: 'no' - name: 'wazuh' - node_name: 'worker_01' - node_type: 'worker' - key: 'c98b62a9b6169ac5f67dae55ae4a9088' - port: '1516' - bind_addr: '0.0.0.0' - nodes: - - '"{{ hostvars.manager.private_ip }}"' - hidden: 'no' - ansible_ssh_user: centos - ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem - ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' - wazuh_manager_version: 3.13.2 - filebeat_version: 7.9.1 - filebeat_security: true - elasticsearch_security_user: wazuh - elasticsearch_security_password: T3stP4ssw0rd - filebeat_output_elasticsearch_hosts: - - "{{ hostvars.es1.private_ip }}" - - "{{ hostvars.es2.private_ip }}" - - "{{ hostvars.es3.private_ip }}" - -#ODFE+Kibana node - - hosts: kibana - roles: - - role: "../roles/opendistro/opendistro-elasticsearch" - - role: "../roles/opendistro/opendistro-kibana" - become: yes - become_user: root - vars: - elasticsearch_jvm_xms: 2560 - elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}" - elasticsearch_node_name: node-6 - opendistro_kibana_user: wazuh - opendistro_kibana_password: T3stP4ssw0rd - elasticsearch_node_master: false - elasticsearch_node_ingest: false - elasticsearch_node_data: false - elasticsearch_cluster_nodes: - - "{{ hostvars.es1.private_ip }}" - - "{{ hostvars.es2.private_ip }}" - - "{{ hostvars.es3.private_ip }}" - elasticsearch_discovery_nodes: - - "{{ hostvars.es1.private_ip }}" - - "{{ hostvars.es2.private_ip }}" - - "{{ hostvars.es3.private_ip }}" - kibana_node_name: node-6 - opendistro_standalone_installation: false - ansible_ssh_user: centos - ansible_ssh_private_key_file: /home/zenid/.ssh/core-dev-nv.pem - ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' - wazuh_version: 3.13.2 - elastic_stack_version: 7.9.1 - opendistro_version: 1.10.1 - kibana_opendistro_version: -1.10.1-1 - elasticsearch_cluster_name: wazuh - kibana_opendistro_security: true - opendistro_admin_password: T3stP4ssw0rd - opendistro_custom_user: wazuh - opendistro_custom_user_role: admin - node_options: "--max-old-space-size=2048" - certs_gen_tool_url: https://wazuh-demo.s3-us-west-1.amazonaws.com/search-guard-tlstool-1.7.zip - wazuh_api_credentials: - - id: default - url: https://{{ hostvars.manager.private_ip }} - port: 55000 - user: foo - password: bar - instances: - node1: - name: node-1 # Important: must be equal to elasticsearch_node_name. - ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. - node2: - name: node-2 - ip: "{{ hostvars.es2.private_ip }}" - node3: - name: node-3 - ip: "{{ hostvars.es3.private_ip }}" - node4: - name: node-4 - ip: "{{ hostvars.manager.private_ip }}" - node5: - name: node-5 - ip: "{{ hostvars.worker.private_ip }}" - node6: - name: node-6 - ip: "{{ hostvars.kibana.private_ip }}" + disable: 'no' + node_name: 'master' + node_type: 'master' + nodes: + - '"{{ hostvars.manager.private_ip }}"' + hidden: 'no' + filebeat_output_elasticsearch_hosts: + - "{{ hostvars.es1.private_ip }}" + - "{{ hostvars.es2.private_ip }}" + - "{{ hostvars.es3.private_ip }}" + + - hosts: worker + roles: + - role: "../roles/wazuh/ansible-wazuh-manager" + - role: "../roles/wazuh/ansible-filebeat-oss" + filebeat_node_name: node-5 + become: yes + become_user: root + vars: + wazuh_manager_config: + connection: + - type: 'secure' + port: '1514' + protocol: 'tcp' + queue_size: 131072 + api: + https: 'yes' + cluster: + disable: 'no' + node_name: 'worker_01' + node_type: 'worker' + key: 'c98b62a9b6169ac5f67dae55ae4a9088' + nodes: + - '"{{ hostvars.manager.private_ip }}"' + hidden: 'no' + filebeat_output_elasticsearch_hosts: + - "{{ hostvars.es1.private_ip }}" + - "{{ hostvars.es2.private_ip }}" + - "{{ hostvars.es3.private_ip }}" + + #ODFE+Kibana node + - hosts: kibana + roles: + - role: "../roles/opendistro/opendistro-elasticsearch" + - role: "../roles/opendistro/opendistro-kibana" + become: yes + become_user: root + vars: + elasticsearch_network_host: "{{ hostvars.kibana.private_ip }}" + elasticsearch_node_name: node-6 + elasticsearch_node_master: false + elasticsearch_node_ingest: false + elasticsearch_node_data: false + elasticsearch_cluster_nodes: + - "{{ hostvars.es1.private_ip }}" + - "{{ hostvars.es2.private_ip }}" + - "{{ hostvars.es3.private_ip }}" + elasticsearch_discovery_nodes: + - "{{ hostvars.es1.private_ip }}" + - "{{ hostvars.es2.private_ip }}" + - "{{ hostvars.es3.private_ip }}" + kibana_node_name: node-6 + wazuh_api_credentials: + - id: default + url: https://{{ hostvars.manager.private_ip }} + port: 55000 + user: foo + password: bar + instances: + node1: + name: node-1 # Important: must be equal to elasticsearch_node_name. + ip: "{{ hostvars.es1.private_ip }}" # When unzipping, the node will search for its node name folder to get the cert. + node2: + name: node-2 + ip: "{{ hostvars.es2.private_ip }}" + node3: + name: node-3 + ip: "{{ hostvars.es3.private_ip }}" + node4: + name: node-4 + ip: "{{ hostvars.manager.private_ip }}" + node5: + name: node-5 + ip: "{{ hostvars.worker.private_ip }}" + node6: + name: node-6 + ip: "{{ hostvars.kibana.private_ip }}" ``` +### Inventory file -### Example inventory file +The inventory file sets the public and private address of each node. The public addresses are used to gather facts and provision the instances while the private addresses are used for the cluster communications. +The ssh credentials used by Ansible during the provision can be specified in this file too. Another option is including them directly on the playbook. ```ini es1 ansible_host= private_ip= elasticsearch_node_name=node-1 es2 ansible_host= private_ip= elasticsearch_node_name=node-2 -es3 ansible_host= private_ip= elasticsearch_node_name=node-3 opendistro_custom_user=wazuh -kibana ansible_host= private_ip= +es3 ansible_host= private_ip= elasticsearch_node_name=node-3 +kibana ansible_host= private_ip= manager ansible_host= private_ip= -worker ansible_host= private_ip= - +worker ansible_host= private_ip= [odfe_cluster] es1 es2 es3 -[wui] -kibana -[managers] -manager -worker + +[all:vars] +ansible_ssh_user=vagrant +ansible_ssh_private_key_file=/path/to/ssh/key.pem +ansible_ssh_extra_args='-o StrictHostKeyChecking=no' +``` + +### Launching playbook + +```bash +ansible-playbook wazuh-odfe-production-ready.yml -i inventory +``` + +## Example: single-host environment + +### Playbook +The hereunder example playbook uses the `wazuh-ansible` role to provision a single-host Wazuh environment. This architecture includes all the Wazuh and ODFE components in a single node. + +```yaml + +``` + +### Launching playbook + +```bash +ansible-playbook wazuh-odfe-single.yml -i inventory ``` ## Branches - -* `stable` branch on correspond to the last Wazuh-Ansible stable version. -* `master` branch contains the latest code, be aware of possible bugs on this branch. - -## Testing - -1. Get the `wazuh-ansible` folder from the `wazuh-qa` [repository](https://github.com/wazuh/wazuh-qa/tree/master/ansible/wazuh-ansible). - -``` -git clone https://github.com/wazuh/wazuh-qa -``` - -2. Copy the `Pipfile` and the `molecule` folder into the root wazuh-ansible directory: - -``` -cp wazuh-qa/ansible/wazuh-ansible/* . -R -``` - -3. Follow these steps for launching the tests. Check the Pipfile for running different scenarios: - -``` -pip install pipenv -sudo pipenv install -pipenv run test -pipenv run agent -``` +* `master` branch on correspond to the last Wazuh-Ansible stable version. ## Contribute @@ -387,7 +310,7 @@ https://github.com/dj-wasabi/ansible-ossec-server ## License and copyright WAZUH -Copyright (C) 2016-2018 Wazuh Inc. (License GPLv2) +Copyright (C) 2016-2020 Wazuh Inc. (License GPLv2) ## Web references