6cddd9f2 by Javier Franco

first commit

0 parents
Showing 52 changed files with 1747 additions and 0 deletions
1 files/*
2 roles/beat/files/*.rpm
3 roles/elasticsearch/files/*.rpm
4 roles/kibana/files/*.rpm
5 roles/beat/files/*.deb
6 roles/elasticsearch/files/*.deb
7 roles/kibana/files/*.deb
8 *.swp
1 [defaults]
2 inventory = inventories/lab/hosts
3 remote_user = root
4 retry_file_enabled = false
5 module_name = shell
6 nocows = 1
7 stdout_callback = debug
8 roles_path = roles
9 interpreter_python = /usr/bin/python
10 callback_whitelist = profile_tasks
11
12
13 [privilege_escalation]
14 #become = true
15 #become_method = sudo
16 #become_user = root
17 #become_ask_pass = false
1 ---
2 - name: Playbook para instalar beats
3 hosts: "{{ server | default('beat') }}"
4 become: true
5 tasks:
6 - name: Install beats
7 include_role:
8 name: beat
9 vars:
10 - beat_name: "{{ item.beat_name }}"
11 - rpm_name: "{{ item.rpm_name }}"
12 - elastic_master_ip: 10.9.3.68
13 - repo_url: "http://10.9.3.54/repo/"
14 loop:
15 - {rpm_name: metricbeat-7.15.2-x86_64.rpm, beat_name: metricbeat}
16 - {rpm_name: filebeat-7.15.2-x86_64.rpm, beat_name: filebeat}
17 - {rpm_name: packetbeat-7.15.2-x86_64.rpm, beat_name: packetbeat}
1 ---
2 - name: Playbook para instalar elasticsearch
3 hosts: elasticsearch
4 become: true
5 roles:
6 - role: elk-prerequisitos
7
8 - name: Install Elasticsearch
9 hosts: elasticsearch
10 become: true
11 roles:
12 - role: elasticsearch
13 vars:
14 repo_url: "http://10.9.3.54/repo/"
15 rpm_name: "elasticsearch-7.15.2-x86_64.rpm"
16 discovery_seed_hosts: '["10.9.3.68"]' # configurar el master tambien para esto
17 elastic_master_ip: "10.9.3.68"
1 ---
2 node_master_name: "master-1"
3 cluster_name: elasticsearch
4 network_host: "[_local_, _site_]"
5 discovery_seed_hosts: '["127.0.0.1"]'
6 cluster_initial_master_nodes: '["master-1"]'
7 elastic_master_ip: "127.0.0.1"
8 elasticsearch_hosts: '["http://127.0.0.1:9200"]'
1 ---
2 node_name: "master-1"
3 node_master: "true"
4 node_data: "false"
5 node_ingest: "true"
1 ---
2 node_name: node-1
3 node_master: "false"
4 node_data: "true"
5 node_ingest: "true"
1 ---
2 node_name: node-2
3 node_master: "false"
4 node_data: "true"
5 node_ingest: "true"
1 [all]
2 elk-nodo1 ansible_host=127.0.0.1
3 elk-nodo2 ansible_host=127.0.0.2
4 elk-nodo3 ansible_host=127.0.0.3
5
6 [kibana]
7 elk-nodo1
8
9 [beat]
10
11 [elasticsearch]
12 elk-nodo1
13 elk-nodo2
14 elk-nodo3
1 ---
2 - name: Playbook para instalar Kibana
3 hosts: kibana
4 become: true
5 roles:
6 - role: kibana
7 vars:
8 repo_url: "http://10.9.3.54/repo/"
9 rpm_name: "kibana-7.15.2-x86_64.rpm"
10 elasticsearch_hosts: '["http://10.9.3.68:9200"]' # donde se publica el kibana
1 ---
2 - name: Playbook para instalar elasticsearch
3 hosts: elasticsearch
4 become: true
5 roles:
6 - role: elk-prerequisitos
7
8 - name: Install Elasticsearch
9 hosts: elasticsearch
10 become: true
11 roles:
12 - role: elasticsearch
13 vars:
14 repo_url: "http://10.9.3.54/repo/"
15 rpm_name: "elasticsearch-7.15.2-x86_64.rpm"
16 discovery_seed_hosts: '["10.9.3.68"]' # configurar el master tambien para esto
17 elastic_master_ip: "10.9.3.68"
18
19
20 - name: Playbook para instalar Kibana
21 hosts: kibana
22 become: true
23 roles:
24 - role: kibana
25 vars:
26 repo_url: "http://10.9.3.54/repo/"
27 rpm_name: "kibana-7.15.2-x86_64.rpm"
28 elasticsearch_hosts: '["http://10.9.3.68:9200"]' # donde se publica el kibana
29
30
31 - name: Playbook para instalar beats
32 hosts: beat
33 become: true
34 tasks:
35 - name: Install beats
36 include_role:
37 name: beat
38 vars:
39 - beat_name: "{{ item.beat_name }}"
40 - rpm_name: "{{ item.rpm_name }}"
41 - elastic_master_ip: 192.168.102.224
42 - repo_url: "http://10.9.3.54/repo/"
43 loop:
44 - {rpm_name: metricbeat-7.15.2-x86_64.rpm, beat_name: metricbeat}
45 - {rpm_name: filebeat-7.15.2-x86_64.rpm, beat_name: filebeat}
46 - {rpm_name: packetbeat-7.15.2-x86_64.rpm, beat_name: packetbeat}
1 Role Name
2 =========
3
4 A brief description of the role goes here.
5
6 Requirements
7 ------------
8
9 Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10
11 Role Variables
12 --------------
13
14 A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15
16 Dependencies
17 ------------
18
19 A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20
21 Example Playbook
22 ----------------
23
24 Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25
26 - hosts: servers
27 roles:
28 - { role: username.rolename, x: 42 }
29
30 License
31 -------
32
33 BSD
34
35 Author Information
36 ------------------
37
38 An optional section for the role authors to include contact information, or a website (HTML is not allowed).
1 ---
2 # defaults file for beat
...\ No newline at end of file ...\ No newline at end of file
1 ---
2 # handlers file for beat
...\ No newline at end of file ...\ No newline at end of file
1 galaxy_info:
2 author: Javier Franco
3 description: Elastic Beats for linux
4 company: Datasystems
5
6 min_ansible_version: 2.4
7
8 platforms:
9 - name: Fedora
10 versions:
11 - 28+
12 - name: RHEL
13 versions:
14 - 7
15
16 galaxy_tags:
17 - web
18 - system
19 - monitoring
20 - logging
21 - elk
22 - elasticsearch
1 ---
2 # tasks file for beat---
3
4 - include_tasks: os-RedHat.yml
5 when: ansible_os_family == "RedHat"
6
7 - include_tasks: os-Debian.yml
8 when: ansible_os_family == "Debian"
9
10 - name: Configure {{ beat_name }}.yml
11 template:
12 src: templates/{{ beat_name }}.yml.j2
13 dest: /etc/{{ beat_name }}/{{ beat_name }}.yml
14
15 - name: Test {{ beat_name }} connection
16 shell: '{{ beat_name }} test output'
17 register: result
18 ignore_errors: yes
19
20 - name: Test connection output
21 debug:
22 msg: "{{ result.stdout }}"
23
24 - name: Reload systemd daemons
25 systemd:
26 daemon_reload: yes
27 when: ansible_distribution_major_version >= "7"
28
29 - name: Configure system module of {{ beat_name }}
30 shell: '{{ beat_name }} modules enable system'
31 #when: modules == 'yes'
32 when: ( beat_name == "metricbeat") or
33 ( beat_name == "filebeat")
34
35 - name: Run {{ beat_name }} setup and dashboard config on kibana
36 shell: '{{ beat_name }} setup'
37 ignore_errors: yes
38
39 - name: Start {{ beat_name }} service
40 service:
41 name: '{{ beat_name }}'
42 state: started
43 enabled: yes
1 ---
2 - name: Copy the .deb to the server
3 copy:
4 src: "{{ pkg_location | default('files') }}/{{ beat_name }}.deb"
5 dest: /root/
6
7 - name: Install the {{ beat_name }} package
8 apt:
9 deb: /root/{{ beat_name }}.deb
1 ---
2 #- name: Copy the rpm to the server
3 # copy:
4 # src: "{{ pkg_location | default('files') }}/{{ beat_name }}.rpm"
5 # dest: /root/
6 - name: download the {{ rpm_name }} file to install
7 get_url:
8 url: "{{ repo_url }}{{ rpm_name }}"
9 dest: /root/
10 owner: root
11 group: root
12
13 - name: Install the {{ rpm_name }} package
14 yum:
15 #name: /root/{{ beat_name }}.rpm
16 name: /root/{{ rpm_name }}
17 state: present
1 ###################### Auditbeat Configuration Example #########################
2
3 # This is an example configuration file highlighting only the most common
4 # options. The auditbeat.reference.yml file from the same directory contains all
5 # the supported options with more comments. You can use it as a reference.
6 #
7 # You can find the full configuration reference here:
8 # https://www.elastic.co/guide/en/beats/auditbeat/index.html
9
10 #========================== Modules configuration =============================
11 auditbeat.modules:
12
13 - module: auditd
14 # Load audit rules from separate files. Same format as audit.rules(7).
15 audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]
16 audit_rules: |
17 ## Define audit rules here.
18 ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these
19 ## examples or add your own rules.
20
21 ## If you are on a 64 bit platform, everything should be running
22 ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls
23 ## because this might be a sign of someone exploiting a hole in the 32
24 ## bit API.
25 #-a always,exit -F arch=b32 -S all -F key=32bit-abi
26
27 ## Executions.
28 #-a always,exit -F arch=b64 -S execve,execveat -k exec
29
30 ## External access (warning: these can be expensive to audit).
31 #-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access
32
33 ## Identity changes.
34 #-w /etc/group -p wa -k identity
35 #-w /etc/passwd -p wa -k identity
36 #-w /etc/gshadow -p wa -k identity
37
38 ## Unauthorized access attempts.
39 #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access
40 #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access
41
42 - module: file_integrity
43 paths:
44 - /bin
45 - /usr/bin
46 - /sbin
47 - /usr/sbin
48 - /etc
49
50 - module: system
51 datasets:
52 - host # General host information, e.g. uptime, IPs
53 - login # User logins, logouts, and system boots.
54 - package # Installed, updated, and removed packages
55 - process # Started and stopped processes
56 - socket # Opened and closed sockets
57 - user # User information
58
59 # How often datasets send state updates with the
60 # current state of the system (e.g. all currently
61 # running processes, all open sockets).
62 state.period: 1m
63
64 # Enabled by default. Auditbeat will read password fields in
65 # /etc/passwd and /etc/shadow and store a hash locally to
66 # detect any changes.
67 user.detect_password_changes: true
68
69 # File patterns of the login record files.
70 login.wtmp_file_pattern: /var/log/wtmp*
71 login.btmp_file_pattern: /var/log/btmp*
72
73 #==================== Elasticsearch template setting ==========================
74 setup.template.settings:
75 index.number_of_shards: 1
76 #index.codec: best_compression
77 #_source.enabled: false
78
79 #================================ General =====================================
80
81 # The name of the shipper that publishes the network data. It can be used to group
82 # all the transactions sent by a single shipper in the web interface.
83 #name:
84
85 # The tags of the shipper are included in their own field with each
86 # transaction published.
87 #tags: ["service-X", "web-tier"]
88
89 # Optional fields that you can specify to add additional information to the
90 # output.
91 #fields:
92 # env: staging
93
94
95 #============================== Dashboards =====================================
96 # These settings control loading the sample dashboards to the Kibana index. Loading
97 # the dashboards is disabled by default and can be enabled either by setting the
98 # options here or by using the `setup` command.
99 #setup.dashboards.enabled: false
100
101 # The URL from where to download the dashboards archive. By default this URL
102 # has a value which is computed based on the Beat name and version. For released
103 # versions, this URL points to the dashboard archive on the artifacts.elastic.co
104 # website.
105 #setup.dashboards.url:
106
107 #============================== Kibana =====================================
108
109 # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
110 # This requires a Kibana endpoint configuration.
111 setup.kibana:
112
113 # Kibana Host
114 # Scheme and port can be left out and will be set to the default (http and 5601)
115 # In case you specify and additional path, the scheme is required: http://localhost:5601/path
116 # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
117 host: "{{ elastic_master_ip }}:5601"
118
119 # Kibana Space ID
120 # ID of the Kibana Space into which the dashboards should be loaded. By default,
121 # the Default Space will be used.
122 #space.id:
123
124 #============================= Elastic Cloud ==================================
125
126 # These settings simplify using Auditbeat with the Elastic Cloud (https://cloud.elastic.co/).
127
128 # The cloud.id setting overwrites the `output.elasticsearch.hosts` and
129 # `setup.kibana.host` options.
130 # You can find the `cloud.id` in the Elastic Cloud web UI.
131 #cloud.id:
132
133 # The cloud.auth setting overwrites the `output.elasticsearch.username` and
134 # `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
135 #cloud.auth:
136
137 #================================ Outputs =====================================
138
139 # Configure what output to use when sending the data collected by the beat.
140
141 #-------------------------- Elasticsearch output ------------------------------
142 output.elasticsearch:
143 # Array of hosts to connect to.
144 hosts: ["{{ elastic_master_ip}}:9200"]
145
146 # Optional protocol and basic auth credentials.
147 #protocol: "https"
148 #username: "elastic"
149 #password: "changeme"
150
151 #----------------------------- Logstash output --------------------------------
152 #output.logstash:
153 # The Logstash hosts
154 #hosts: ["localhost:5044"]
155
156 # Optional SSL. By default is off.
157 # List of root certificates for HTTPS server verifications
158 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
159
160 # Certificate for SSL client authentication
161 #ssl.certificate: "/etc/pki/client/cert.pem"
162
163 # Client Certificate Key
164 #ssl.key: "/etc/pki/client/cert.key"
165
166 #================================ Processors =====================================
167
168 # Configure processors to enhance or manipulate events generated by the beat.
169
170 processors:
171 - add_host_metadata: ~
172 - add_cloud_metadata: ~
173 - add_docker_metadata: ~
174
175 #================================ Logging =====================================
176
177 # Sets log level. The default log level is info.
178 # Available log levels are: error, warning, info, debug
179 #logging.level: debug
180
181 # At debug level, you can selectively enable logging only for some components.
182 # To enable all selectors use ["*"]. Examples of other selectors are "beat",
183 # "publish", "service".
184 #logging.selectors: ["*"]
185
186 #============================== X-Pack Monitoring ===============================
187 # auditbeat can export internal metrics to a central Elasticsearch monitoring
188 # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
189 # reporting is disabled by default.
190
191 # Set to true to enable the monitoring reporter.
192 #monitoring.enabled: false
193
194 # Sets the UUID of the Elasticsearch cluster under which monitoring data for this
195 # Auditbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
196 # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
197 #monitoring.cluster_uuid:
198
199 # Uncomment to send the metrics to Elasticsearch. Most settings from the
200 # Elasticsearch output are accepted here as well.
201 # Note that the settings should point to your Elasticsearch *monitoring* cluster.
202 # Any setting that is not set is automatically inherited from the Elasticsearch
203 # output configuration, so if you have the Elasticsearch output configured such
204 # that it is pointing to your Elasticsearch monitoring cluster, you can simply
205 # uncomment the following line.
206 #monitoring.elasticsearch:
207
208 #================================= Migration ==================================
209
210 # This allows to enable 6.7 migration aliases
211 #migration.6_to_7.enabled: true
This diff could not be displayed because it is too large.
1 ###################### Filebeat Configuration Example #########################
2 # This file is an example configuration file highlighting only the most common
3 # options. The filebeat.reference.yml file from the same directory contains all the
4 # supported options with more comments. You can use it as a reference.
5 #
6 # You can find the full configuration reference here:
7 # https://www.elastic.co/guide/en/beats/filebeat/index.html
8
9 # For more available modules and options, please see the filebeat.reference.yml sample
10 # configuration file.
11
12 #=========================== Filebeat inputs =============================
13
14 filebeat.inputs:
15
16 # Each - is an input. Most options can be set at the input level, so
17 # you can use different inputs for various configurations.
18 # Below are the input specific configurations.
19
20 - type: log
21
22 # Change to true to enable this input configuration.
23 enabled: false
24
25 # Paths that should be crawled and fetched. Glob based paths.
26 paths:
27 - /var/log/*.log
28 #- c:\programdata\elasticsearch\logs\*
29
30 # Exclude lines. A list of regular expressions to match. It drops the lines that are
31 # matching any regular expression from the list.
32 #exclude_lines: ['^DBG']
33
34 # Include lines. A list of regular expressions to match. It exports the lines that are
35 # matching any regular expression from the list.
36 #include_lines: ['^ERR', '^WARN']
37
38 # Exclude files. A list of regular expressions to match. Filebeat drops the files that
39 # are matching any regular expression from the list. By default, no files are dropped.
40 #exclude_files: ['.gz$']
41
42 # Optional additional fields. These fields can be freely picked
43 # to add additional information to the crawled log files for filtering
44 #fields:
45 # level: debug
46 # review: 1
47
48 ### Multiline options
49
50 # Multiline can be used for log messages spanning multiple lines. This is common
51 # for Java Stack Traces or C-Line Continuation
52
53 # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
54 #multiline.pattern: ^\[
55
56 # Defines if the pattern set under pattern should be negated or not. Default is false.
57 #multiline.negate: false
58
59 # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
60 # that was (not) matched before or after or as long as a pattern is not matched based on negate.
61 # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
62 #multiline.match: after
63
64
65 #============================= Filebeat modules ===============================
66
67 filebeat.config.modules:
68 # Glob pattern for configuration loading
69 path: ${path.config}/modules.d/*.yml
70
71 # Set to true to enable config reloading
72 reload.enabled: false
73
74 # Period on which files under path should be checked for changes
75 #reload.period: 10s
76
77 #==================== Elasticsearch template setting ==========================
78
79 setup.template.settings:
80 index.number_of_shards: 1
81 #index.codec: best_compression
82 #_source.enabled: false
83
84 #================================ General =====================================
85
86 # The name of the shipper that publishes the network data. It can be used to group
87 # all the transactions sent by a single shipper in the web interface.
88 #name:
89
90 # The tags of the shipper are included in their own field with each
91 # transaction published.
92 #tags: ["service-X", "web-tier"]
93
94 # Optional fields that you can specify to add additional information to the
95 # output.
96 #fields:
97 # env: staging
98
99
100 #============================== Dashboards =====================================
101 # These settings control loading the sample dashboards to the Kibana index. Loading
102 # the dashboards is disabled by default and can be enabled either by setting the
103 # options here or by using the `setup` command.
104 #setup.dashboards.enabled: false
105
106 # The URL from where to download the dashboards archive. By default this URL
107 # has a value which is computed based on the Beat name and version. For released
108 # versions, this URL points to the dashboard archive on the artifacts.elastic.co
109 # website.
110 #setup.dashboards.url:
111
112 #============================== Kibana =====================================
113
114 # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
115 # This requires a Kibana endpoint configuration.
116 setup.kibana:
117
118 # Kibana Host
119 # Scheme and port can be left out and will be set to the default (http and 5601)
120 # In case you specify and additional path, the scheme is required: http://localhost:5601/path
121 # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
122 host: "{{ elastic_master_ip }}:5601"
123
124 # Kibana Space ID
125 # ID of the Kibana Space into which the dashboards should be loaded. By default,
126 # the Default Space will be used.
127 #space.id:
128
129 #============================= Elastic Cloud ==================================
130
131 # These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
132
133 # The cloud.id setting overwrites the `output.elasticsearch.hosts` and
134 # `setup.kibana.host` options.
135 # You can find the `cloud.id` in the Elastic Cloud web UI.
136 #cloud.id:
137
138 # The cloud.auth setting overwrites the `output.elasticsearch.username` and
139 # `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
140 #cloud.auth:
141
142 #================================ Outputs =====================================
143
144 # Configure what output to use when sending the data collected by the beat.
145
146 #-------------------------- Elasticsearch output ------------------------------
147 output.elasticsearch:
148 # Array of hosts to connect to.
149 hosts: ["{{ elastic_master_ip}}:9200"]
150
151 # Optional protocol and basic auth credentials.
152 #protocol: "https"
153 #username: "elastic"
154 #password: "changeme"
155
156 #----------------------------- Logstash output --------------------------------
157 #output.logstash:
158 # The Logstash hosts
159 #hosts: ["localhost:5044"]
160
161 # Optional SSL. By default is off.
162 # List of root certificates for HTTPS server verifications
163 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
164
165 # Certificate for SSL client authentication
166 #ssl.certificate: "/etc/pki/client/cert.pem"
167
168 # Client Certificate Key
169 #ssl.key: "/etc/pki/client/cert.key"
170
171 #================================ Processors =====================================
172
173 # Configure processors to enhance or manipulate events generated by the beat.
174
175 processors:
176 - add_host_metadata: ~
177 - add_cloud_metadata: ~
178 - add_docker_metadata: ~
179 - add_kubernetes_metadata: ~
180
181 #================================ Logging =====================================
182
183 # Sets log level. The default log level is info.
184 # Available log levels are: error, warning, info, debug
185 #logging.level: debug
186
187 # At debug level, you can selectively enable logging only for some components.
188 # To enable all selectors use ["*"]. Examples of other selectors are "beat",
189 # "publish", "service".
190 #logging.selectors: ["*"]
191
192 #============================== X-Pack Monitoring ===============================
193 # filebeat can export internal metrics to a central Elasticsearch monitoring
194 # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
195 # reporting is disabled by default.
196
197 # Set to true to enable the monitoring reporter.
198 #monitoring.enabled: false
199
200 # Sets the UUID of the Elasticsearch cluster under which monitoring data for this
201 # Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
202 # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
203 #monitoring.cluster_uuid:
204
205 # Uncomment to send the metrics to Elasticsearch. Most settings from the
206 # Elasticsearch output are accepted here as well.
207 # Note that the settings should point to your Elasticsearch *monitoring* cluster.
208 # Any setting that is not set is automatically inherited from the Elasticsearch
209 # output configuration, so if you have the Elasticsearch output configured such
210 # that it is pointing to your Elasticsearch monitoring cluster, you can simply
211 # uncomment the following line.
212 #monitoring.elasticsearch:
213
214 #================================= Migration ==================================
215
216 # This allows to enable 6.7 migration aliases
217 #migration.6_to_7.enabled: true
1 ###################### Metricbeat Configuration Example #######################
2 # This file is an example configuration file highlighting only the most common
3 # options. The metricbeat.reference.yml file from the same directory contains all the
4 # supported options with more comments. You can use it as a reference.
5 #
6 # You can find the full configuration reference here:
7 # https://www.elastic.co/guide/en/beats/metricbeat/index.html
8
9 #========================== Modules configuration ============================
10
11 metricbeat.config.modules:
12 # Glob pattern for configuration loading
13 path: ${path.config}/modules.d/*.yml
14
15 # Set to true to enable config reloading
16 reload.enabled: false
17
18 # Period on which files under path should be checked for changes
19 #reload.period: 10s
20
21 #==================== Elasticsearch template setting ==========================
22
23 setup.template.settings:
24 index.number_of_shards: 1
25 index.codec: best_compression
26 #_source.enabled: false
27
28 #================================ General =====================================
29
30 # The name of the shipper that publishes the network data. It can be used to group
31 # all the transactions sent by a single shipper in the web interface.
32 #name:
33
34 # The tags of the shipper are included in their own field with each
35 # transaction published.
36 #tags: ["service-X", "web-tier"]
37
38 # Optional fields that you can specify to add additional information to the
39 # output.
40 #fields:
41 # env: staging
42
43
44 #============================== Dashboards =====================================
45 # These settings control loading the sample dashboards to the Kibana index. Loading
46 # the dashboards is disabled by default and can be enabled either by setting the
47 # options here or by using the `setup` command.
48 #setup.dashboards.enabled: false
49
50 # The URL from where to download the dashboards archive. By default this URL
51 # has a value which is computed based on the Beat name and version. For released
52 # versions, this URL points to the dashboard archive on the artifacts.elastic.co
53 # website.
54 #setup.dashboards.url:
55
56 #============================== Kibana =====================================
57
58 # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
59 # This requires a Kibana endpoint configuration.
60 setup.kibana:
61
62 # Kibana Host
63 # Scheme and port can be left out and will be set to the default (http and 5601)
64 # In case you specify and additional path, the scheme is required: http://localhost:5601/path
65 # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
66 host: "{{ elastic_master_ip }}:5601"
67
68 # Kibana Space ID
69 # ID of the Kibana Space into which the dashboards should be loaded. By default,
70 # the Default Space will be used.
71 #space.id:
72
73 #============================= Elastic Cloud ==================================
74
75 # These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
76
77 # The cloud.id setting overwrites the `output.elasticsearch.hosts` and
78 # `setup.kibana.host` options.
79 # You can find the `cloud.id` in the Elastic Cloud web UI.
80 #cloud.id:
81
82 # The cloud.auth setting overwrites the `output.elasticsearch.username` and
83 # `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
84 #cloud.auth:
85
86 #================================ Outputs =====================================
87
88 # Configure what output to use when sending the data collected by the beat.
89
90 #-------------------------- Elasticsearch output ------------------------------
91 output.elasticsearch:
92 # Array of hosts to connect to.
93 hosts: ["{{ elastic_master_ip}}:9200"]
94
95 # Optional protocol and basic auth credentials.
96 #protocol: "https"
97 #username: "elastic"
98 #password: "changeme"
99
100 #----------------------------- Logstash output --------------------------------
101 #output.logstash:
102 # The Logstash hosts
103 #hosts: ["localhost:5044"]
104
105 # Optional SSL. By default is off.
106 # List of root certificates for HTTPS server verifications
107 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
108
109 # Certificate for SSL client authentication
110 #ssl.certificate: "/etc/pki/client/cert.pem"
111
112 # Client Certificate Key
113 #ssl.key: "/etc/pki/client/cert.key"
114
115 #================================ Processors =====================================
116
117 # Configure processors to enhance or manipulate events generated by the beat.
118
119 processors:
120 - add_host_metadata: ~
121 - add_cloud_metadata: ~
122 - add_docker_metadata: ~
123 - add_kubernetes_metadata: ~
124
125 #================================ Logging =====================================
126
127 # Sets log level. The default log level is info.
128 # Available log levels are: error, warning, info, debug
129 #logging.level: debug
130
131 # At debug level, you can selectively enable logging only for some components.
132 # To enable all selectors use ["*"]. Examples of other selectors are "beat",
133 # "publish", "service".
134 #logging.selectors: ["*"]
135
136 #============================== X-Pack Monitoring ===============================
137 # metricbeat can export internal metrics to a central Elasticsearch monitoring
138 # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
139 # reporting is disabled by default.
140
141 # Set to true to enable the monitoring reporter.
142 #monitoring.enabled: false
143
144 # Sets the UUID of the Elasticsearch cluster under which monitoring data for this
145 # Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
146 # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
147 #monitoring.cluster_uuid:
148
149 # Uncomment to send the metrics to Elasticsearch. Most settings from the
150 # Elasticsearch output are accepted here as well.
151 # Note that the settings should point to your Elasticsearch *monitoring* cluster.
152 # Any setting that is not set is automatically inherited from the Elasticsearch
153 # output configuration, so if you have the Elasticsearch output configured such
154 # that it is pointing to your Elasticsearch monitoring cluster, you can simply
155 # uncomment the following line.
156 #monitoring.elasticsearch:
157
158 #================================= Migration ==================================
159
160 # This allows to enable 6.7 migration aliases
161 #migration.6_to_7.enabled: true
1 # Module: oracle
2 # Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-oracle.html
3
4 - module: oracle
5 metricsets: ["tablespace"]
6 #enabled: false
7 enabled: true
8 period: 10s
9 #hosts: ["oracle://user:pass@localhost:1521/ORCLPDB1.localdomain?sysdba=1"]
10 hosts: ["oracle://sys:12345678@exodb.lab.data.com.py:1521/exodb.lab.data.com.py?sysdba=1"]
11
12 # username: ""
13 # password: ""
14
1 #################### Packetbeat Configuration Example #########################
2
3 # This file is an example configuration file highlighting only the most common
4 # options. The packetbeat.reference.yml file from the same directory contains all the
5 # supported options with more comments. You can use it as a reference.
6 #
7 # You can find the full configuration reference here:
8 # https://www.elastic.co/guide/en/beats/packetbeat/index.html
9
10 #============================== Network device ================================
11
12 # Select the network interface to sniff the data. On Linux, you can use the
13 # "any" keyword to sniff on all connected interfaces.
14 packetbeat.interfaces.device: any
15
16 #================================== Flows =====================================
17
18 # Set `enabled: false` or comment out all options to disable flows reporting.
19 packetbeat.flows:
20 # Set network flow timeout. Flow is killed if no packet is received before being
21 # timed out.
22 timeout: 30s
23
24 # Configure reporting period. If set to -1, only killed flows will be reported
25 period: 10s
26
27 #========================== Transaction protocols =============================
28
29 packetbeat.protocols:
30 - type: icmp
31 # Enable ICMPv4 and ICMPv6 monitoring. Default: false
32 enabled: true
33
34 - type: amqp
35 # Configure the ports where to listen for AMQP traffic. You can disable
36 # the AMQP protocol by commenting out the list of ports.
37 ports: [5672]
38
39 - type: cassandra
40 #Cassandra port for traffic monitoring.
41 ports: [9042]
42
43 - type: dhcpv4
44 # Configure the DHCP for IPv4 ports.
45 ports: [67, 68]
46
47 - type: dns
48 # Configure the ports where to listen for DNS traffic. You can disable
49 # the DNS protocol by commenting out the list of ports.
50 ports: [53]
51
52 - type: http
53 # Configure the ports where to listen for HTTP traffic. You can disable
54 # the HTTP protocol by commenting out the list of ports.
55 ports: [80, 8080, 8000, 5000, 8002]
56
57 - type: memcache
58 # Configure the ports where to listen for memcache traffic. You can disable
59 # the Memcache protocol by commenting out the list of ports.
60 ports: [11211]
61
62 - type: mysql
63 # Configure the ports where to listen for MySQL traffic. You can disable
64 # the MySQL protocol by commenting out the list of ports.
65 ports: [3306,3307]
66
67 - type: pgsql
68 # Configure the ports where to listen for Pgsql traffic. You can disable
69 # the Pgsql protocol by commenting out the list of ports.
70 ports: [5432]
71
72 - type: redis
73 # Configure the ports where to listen for Redis traffic. You can disable
74 # the Redis protocol by commenting out the list of ports.
75 ports: [6379]
76
77 - type: thrift
78 # Configure the ports where to listen for Thrift-RPC traffic. You can disable
79 # the Thrift-RPC protocol by commenting out the list of ports.
80 ports: [9090]
81
82 - type: mongodb
83 # Configure the ports where to listen for MongoDB traffic. You can disable
84 # the MongoDB protocol by commenting out the list of ports.
85 ports: [27017]
86
87 - type: nfs
88 # Configure the ports where to listen for NFS traffic. You can disable
89 # the NFS protocol by commenting out the list of ports.
90 ports: [2049]
91
92 - type: tls
93 # Configure the ports where to listen for TLS traffic. You can disable
94 # the TLS protocol by commenting out the list of ports.
95 ports:
96 - 443 # HTTPS
97 - 993 # IMAPS
98 - 995 # POP3S
99 - 5223 # XMPP over SSL
100 - 8443
101 - 8883 # Secure MQTT
102 - 9243 # Elasticsearch
103
104 #==================== Elasticsearch template setting ==========================
105
106 setup.template.settings:
107 index.number_of_shards: 1
108 #index.codec: best_compression
109 #_source.enabled: false
110
111 #================================ General =====================================
112
113 # The name of the shipper that publishes the network data. It can be used to group
114 # all the transactions sent by a single shipper in the web interface.
115 #name:
116
117 # The tags of the shipper are included in their own field with each
118 # transaction published.
119 #tags: ["service-X", "web-tier"]
120
121 # Optional fields that you can specify to add additional information to the
122 # output.
123 #fields:
124 # env: staging
125
126
127 #============================== Dashboards =====================================
128 # These settings control loading the sample dashboards to the Kibana index. Loading
129 # the dashboards is disabled by default and can be enabled either by setting the
130 # options here or by using the `setup` command.
131 #setup.dashboards.enabled: false
132
133 # The URL from where to download the dashboards archive. By default this URL
134 # has a value which is computed based on the Beat name and version. For released
135 # versions, this URL points to the dashboard archive on the artifacts.elastic.co
136 # website.
137 #setup.dashboards.url:
138
139 #============================== Kibana =====================================
140
141 # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
142 # This requires a Kibana endpoint configuration.
143 setup.kibana:
144
145 # Kibana Host
146 # Scheme and port can be left out and will be set to the default (http and 5601)
147 # In case you specify and additional path, the scheme is required: http://localhost:5601/path
148 # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
149 host: "{{ elastic_master_ip }}:5601"
150
151 # Kibana Space ID
152 # ID of the Kibana Space into which the dashboards should be loaded. By default,
153 # the Default Space will be used.
154 #space.id:
155
156 #============================= Elastic Cloud ==================================
157
158 # These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/).
159
160 # The cloud.id setting overwrites the `output.elasticsearch.hosts` and
161 # `setup.kibana.host` options.
162 # You can find the `cloud.id` in the Elastic Cloud web UI.
163 #cloud.id:
164
165 # The cloud.auth setting overwrites the `output.elasticsearch.username` and
166 # `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
167 #cloud.auth:
168
169 #================================ Outputs =====================================
170
171 # Configure what output to use when sending the data collected by the beat.
172
173 #-------------------------- Elasticsearch output ------------------------------
174 output.elasticsearch:
175 # Array of hosts to connect to.
176 hosts: ["{{ elastic_master_ip}}:9200"]
177
178 # Optional protocol and basic auth credentials.
179 #protocol: "https"
180 #username: "elastic"
181 #password: "changeme"
182
183 #----------------------------- Logstash output --------------------------------
184 #output.logstash:
185 # The Logstash hosts
186 #hosts: ["localhost:5044"]
187
188 # Optional SSL. By default is off.
189 # List of root certificates for HTTPS server verifications
190 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
191
192 # Certificate for SSL client authentication
193 #ssl.certificate: "/etc/pki/client/cert.pem"
194
195 # Client Certificate Key
196 #ssl.key: "/etc/pki/client/cert.key"
197
198 #================================ Processors =====================================
199
200 # Configure processors to enhance or manipulate events generated by the beat.
201
202 processors:
203 - add_host_metadata: ~
204 - add_cloud_metadata: ~
205 - add_docker_metadata: ~
206
207 #================================ Logging =====================================
208
209 # Sets log level. The default log level is info.
210 # Available log levels are: error, warning, info, debug
211 #logging.level: debug
212
213 # At debug level, you can selectively enable logging only for some components.
214 # To enable all selectors use ["*"]. Examples of other selectors are "beat",
215 # "publish", "service".
216 #logging.selectors: ["*"]
217
218 #============================== X-Pack Monitoring ===============================
219 # packetbeat can export internal metrics to a central Elasticsearch monitoring
220 # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
221 # reporting is disabled by default.
222
223 # Set to true to enable the monitoring reporter.
224 #monitoring.enabled: false
225
226 # Sets the UUID of the Elasticsearch cluster under which monitoring data for this
227 # Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
228 # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
229 #monitoring.cluster_uuid:
230
231 # Uncomment to send the metrics to Elasticsearch. Most settings from the
232 # Elasticsearch output are accepted here as well.
233 # Note that the settings should point to your Elasticsearch *monitoring* cluster.
234 # Any setting that is not set is automatically inherited from the Elasticsearch
235 # output configuration, so if you have the Elasticsearch output configured such
236 # that it is pointing to your Elasticsearch monitoring cluster, you can simply
237 # uncomment the following line.
238 #monitoring.elasticsearch:
239
240 #================================= Migration ==================================
241
242 # This allows to enable 6.7 migration aliases
243 #migration.6_to_7.enabled: true
1 Role Name
2 =========
3
4 A brief description of the role goes here.
5
6 Requirements
7 ------------
8
9 Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10
11 Role Variables
12 --------------
13
14 A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15
16 Dependencies
17 ------------
18
19 A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20
21 Example Playbook
22 ----------------
23
24 Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25
26 - hosts: servers
27 roles:
28 - { role: username.rolename, x: 42 }
29
30 License
31 -------
32
33 BSD
34
35 Author Information
36 ------------------
37
38 An optional section for the role authors to include contact information, or a website (HTML is not allowed).
1 ---
2 # defaults file for elasticsearch
...\ No newline at end of file ...\ No newline at end of file
1 ---
2 # handlers file for elasticsearch
...\ No newline at end of file ...\ No newline at end of file
1 galaxy_info:
2 author: Javier Franco
3 description: Elastic Beats for linux
4 company: Datasystems
5
6 min_ansible_version: 2.4
7
8 platforms:
9 - name: Fedora
10 versions:
11 - 28+
12 - name: RHEL
13 versions:
14 - 7
15
16 galaxy_tags:
17 - web
18 - system
19 - monitoring
20 - logging
21 - elk
22 - elasticsearch
1 ---
2 # tasks file for elasticsearch---
3
4 - include: os-RedHat.yml
5 when: ansible_os_family == "RedHat"
6
7 - include: os-Debian.yml
8 when: ansible_os_family == "Debian"
9
10 - name: Configure elasticsearch.yml
11 template:
12 src: templates/elasticsearch.yml.j2
13 dest: /etc/elasticsearch/elasticsearch.yml
14 owner: root
15 group: elasticsearch
16
17 - name: Reload systemd daemons
18 systemd:
19 daemon_reload: yes
20
21 - name: Start the elasticsearch service
22 service:
23 name: elasticsearch
24 state: started
25 enabled: yes
1 ---
2 - name: Copy the .deb to the server
3 copy:
4 src: "{{ pkg_location | default('files') }}/elasticsearch.deb"
5 dest: /root/
6
7 - name: Install the elasticsearch package
8 apt:
9 deb: /root/elasticsearch.deb
1 ---
2 #- name: Copy the rpm to the server
3 # copy:
4 # src: "{{ pkg_location | default('files') }}/elasticsearch.rpm"
5 # dest: /root/
6
7 - name: download the rpm file to install
8 get_url:
9 url: "{{ repo_url }}{{ rpm_name }}"
10 dest: /root/
11 owner: root
12 group: root
13
14 - name: Install the elasticsearch package
15 yum:
16 #name: /root/elasticsearch.rpm
17 name: /root/{{ rpm_name }}
18 state: present
1 # ======================== Elasticsearch Configuration =========================
2 #
3 # NOTE: Elasticsearch comes with reasonable defaults for most settings.
4 # Before you set out to tweak and tune the configuration, make sure you
5 # understand what are you trying to accomplish and the consequences.
6 #
7 # The primary way of configuring a node is via this file. This template lists
8 # the most important settings you may want to configure for a production cluster.
9 #
10 # Please consult the documentation for further information on configuration options:
11 # https://www.elastic.co/guide/en/elasticsearch/reference/index.html
12 #
13 # ---------------------------------- Cluster -----------------------------------
14 #
15 # Use a descriptive name for your cluster:
16 #
17 cluster.name: {{ cluster_name | default('elasticsearch') }}
18 #
19 # ------------------------------------ Node ------------------------------------
20 #
21 # Use a descriptive name for the node:
22 #
23 node.name: {{ node_name | default( "master-1" ) }}
24 node.master: {{ node_master | default('true') }}
25 node.data: {{ node_data | default('true') }}
26 #
27 # Add custom attributes to the node:
28 #
29 #node.attr.rack: r1
30 #
31 # ----------------------------------- Paths ------------------------------------
32 #
33 # Path to directory where to store the data (separate multiple locations by comma):
34 #
35 path.data: /var/lib/elasticsearch
36 #
37 # Path to log files:
38 #
39 path.logs: /var/log/elasticsearch
40 #
41 # ----------------------------------- Memory -----------------------------------
42 #
43 # Lock the memory on startup:
44 #
45 #bootstrap.memory_lock: true
46 #
47 # Make sure that the heap size is set to about half the memory available
48 # on the system and that the owner of the process is allowed to use this
49 # limit.
50 #
51 # Elasticsearch performs poorly when the system is swapping the memory.
52 #
53 # ---------------------------------- Network -----------------------------------
54 #
55 # Set the bind address to a specific IP (IPv4 or IPv6):
56 #
57 network.host: {{ network_host | default('[_local_, _site_]')}}
58 #
59 # Set a custom port for HTTP:
60 #
61 #http.port: 9200
62 #
63 # For more information, consult the network module documentation.
64 #
65 # --------------------------------- Discovery ----------------------------------
66 #
67 # Pass an initial list of hosts to perform discovery when this node is started:
68 # The default list of hosts is ["127.0.0.1", "[::1]"]
69 #
70 discovery.seed_hosts: {{ discovery_seed_hosts }}
71 #
72 # Bootstrap the cluster using an initial set of master-eligible nodes:
73 #
74 cluster.initial_master_nodes: {{ cluster_initial_master_nodes | default('["master-1"]') }}
75 #
76 # For more information, consult the discovery and cluster formation module documentation.
77 #
78 # ---------------------------------- Gateway -----------------------------------
79 #
80 # Block initial recovery after a full cluster restart until N nodes are started:
81 #
82 #gateway.recover_after_nodes: 3
83 #
84 # For more information, consult the gateway module documentation.
85 #
86 # ---------------------------------- Various -----------------------------------
87 #
88 # Require explicit names when deleting indices:
89 #
90 #action.destructive_requires_name: true
1 ---
2 language: python
3 python: "2.7"
4
5 # Use the new container infrastructure
6 sudo: false
7
8 # Install ansible
9 addons:
10 apt:
11 packages:
12 - python-pip
13
14 install:
15 # Install ansible
16 - pip install ansible
17
18 # Check ansible version
19 - ansible --version
20
21 # Create ansible.cfg with correct roles_path
22 - printf '[defaults]\nroles_path=../' >ansible.cfg
23
24 script:
25 # Basic role syntax check
26 - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27
28 notifications:
29 webhooks: https://galaxy.ansible.com/api/v1/notifications/
...\ No newline at end of file ...\ No newline at end of file
1 Role Name
2 =========
3
4 A brief description of the role goes here.
5
6 Requirements
7 ------------
8
9 Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10
11 Role Variables
12 --------------
13
14 A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15
16 Dependencies
17 ------------
18
19 A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20
21 Example Playbook
22 ----------------
23
24 Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25
26 - hosts: servers
27 roles:
28 - { role: username.rolename, x: 42 }
29
30 License
31 -------
32
33 BSD
34
35 Author Information
36 ------------------
37
38 An optional section for the role authors to include contact information, or a website (HTML is not allowed).
1 ---
2 # defaults file for roles/elk-prerequisitos
...\ No newline at end of file ...\ No newline at end of file
1 ---
2 # handlers file for roles/elk-prerequisitos
...\ No newline at end of file ...\ No newline at end of file
1 galaxy_info:
2 author: Javier Franco
3 description: Elastic Stack prerequisitos
4 company: Datasystems
5
6 min_ansible_version: 2.4
7
8 platforms:
9 - name: Fedora
10 versions:
11 - 28+
12 - name: RHEL
13 versions:
14 - 7
15 - name: CentOS
16 versions:
17 - 7
18
19 galaxy_tags:
20 - web
21 - system
22 - monitoring
23 - logging
24 - elk
25 - elasticsearch
1 ---
2 # tasks file for roles/elk-prerequisitos
3
4 - name: Add the fs.file-max on the sysctl.conf
5 sysctl:
6 name: fs.file-max
7 value: '{{ fsfile_max_value }}'
8 reload: yes
9 tags:
10 - add_the_fsfilemax_on_the_sysctlconf
11 - elk_prerequisitos
12
13 - name: Add the max map count on sysctl.conf
14 sysctl:
15 name: vm.max_map_count
16 value: '{{ vm_max_map_count_value }}'
17 reload: yes
18 tags:
19 - add_the_max_map_count_on_sysctlconf
20 - elk_prerequisitos
21
22 - name: Configure the fs.file max per user
23 pam_limits:
24 domain: "{{ fsfile_max_user }}"
25 limit_type: "{{ item.lim_type }}"
26 limit_item: "{{ item.lim_item }}"
27 value: "{{ item.value }}"
28 loop:
29 - {lim_type: 'soft', lim_item: 'nproc', value: '{{ fsfile_max_value }}'}
30 - {lim_type: 'soft', lim_item: 'nofile', value: '{{ fsfile_max_value }}'}
31 - {lim_type: 'hard', lim_item: 'nproc', value: '{{ fsfile_max_value }}'}
32 - {lim_type: 'hard', lim_item: 'nofile', value: '{{ fsfile_max_value }}'}
33 tags:
34 - configure_the_fsfilemax_per_user
35 - elk_prerequisitos
36
37 - name: Start firewall
38 service:
39 name: firewalld
40 state: started
41 tags:
42 - start_firewall
43 - elk_prerequisitos
44
45 - name: Configure firewall ports
46 firewalld:
47 port: "{{ item.port }}"
48 permanent: yes
49 state: "{{ item.state }}"
50 immediate: yes
51 loop:
52 - {port: '9200/tcp', state: 'enabled'}
53 - {port: '9300/tcp', state: 'enabled'}
54 - {port: '5601/tcp', state: 'enabled'}
55 when: ansible_os_family == "RedHat"
56 tags:
57 - configure_firewll_ports
58 - elk_prerequisitos
1 ---
2 - hosts: localhost
3 remote_user: root
4 roles:
5 - roles/elk-prerequisitos
...\ No newline at end of file ...\ No newline at end of file
1 ---
2 # vars file for roles/elk-prerequisitos
3 fsfile_max_value: "65536"
4 fsfile_max_user: "elasticsearch"
5 vm_max_map_count_value: "262144"
1 Role Name
2 =========
3
4 A brief description of the role goes here.
5
6 Requirements
7 ------------
8
9 Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10
11 Role Variables
12 --------------
13
14 A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15
16 Dependencies
17 ------------
18
19 A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20
21 Example Playbook
22 ----------------
23
24 Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25
26 - hosts: servers
27 roles:
28 - { role: username.rolename, x: 42 }
29
30 License
31 -------
32
33 BSD
34
35 Author Information
36 ------------------
37
38 An optional section for the role authors to include contact information, or a website (HTML is not allowed).
1 ---
2 # defaults file for kibana
...\ No newline at end of file ...\ No newline at end of file
1 ---
2 # handlers file for kibana
...\ No newline at end of file ...\ No newline at end of file
1 galaxy_info:
2 author: Javier Franco
3 description: Elastic Beats for linux
4 company: Datasystems
5
6 min_ansible_version: 2.4
7
8 platforms:
9 - name: Fedora
10 versions:
11 - 28+
12 - name: RHEL
13 versions:
14 - 7
15
16 galaxy_tags:
17 - web
18 - system
19 - monitoring
20 - logging
21 - elk
22 - elasticsearch
1 ---
2 # tasks file for kibana---
3
4 - include_tasks: os-RedHat.yml
5 when: ansible_os_family == "RedHat"
6
7 - include_tasks: os-Debian.yml
8 when: ansible_os_family == "Debian"
9
10 - name: Configure kibana.yml
11 template:
12 src: templates/kibana.yml.j2
13 dest: /etc/kibana/kibana.yml
14
15 - name: Reload systemd daemons
16 systemd:
17 daemon_reload: yes
18
19 - name: Start kibana service
20 service:
21 name: kibana
22 state: started
23 enabled: yes
24
25 - name: Wait for kibana to fully start
26 uri:
27 url: "http://{{ ansible_facts.default_ipv4.address }}:5601/status"
28 status_code: 200
29 register: kibana_status
30 until: kibana_status.status == 200
31 retries: 90
32 delay: 5
1 ---
2 - name: Copy the .deb to the server
3 copy:
4 src: "{{ pkg_location | default('files') }}/kibana.deb"
5 dest: /root/
6
7 - name: Install the kibana package
8 apt:
9 deb: /root/kibana.deb
10 ...
1 ---
2 #- name: Copy the rpm to the server
3 # copy:
4 # src: "{{ pkg_location | default('files') }}/kibana.rpm"
5 # dest: /root/
6
7 - name: download the rpm file to install
8 get_url:
9 url: "{{ repo_url }}{{ rpm_name }}"
10 dest: /root/
11 owner: root
12 group: root
13
14
15 - name: Install the kibana package
16 yum:
17 #name: /root/kibana.rpm
18 name: /root/{{ rpm_name }}
19 state: present
20 ...
1 # Kibana is served by a back end server. This setting specifies the port to use.
2 #server.port: 5601
3
4 # Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
5 # The default is 'localhost', which usually means remote machines will not be able to connect.
6 # To allow connections from remote users, set this parameter to a non-loopback address.
7 server.host: "{{ ansible_facts.default_ipv4.address }}"
8
9 # Enables you to specify a path to mount Kibana at if you are running behind a proxy.
10 # Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
11 # from requests it receives, and to prevent a deprecation warning at startup.
12 # This setting cannot end in a slash.
13 #server.basePath: ""
14
15 # Specifies whether Kibana should rewrite requests that are prefixed with
16 # `server.basePath` or require that they are rewritten by your reverse proxy.
17 # This setting was effectively always `false` before Kibana 6.3 and will
18 # default to `true` starting in Kibana 7.0.
19 #server.rewriteBasePath: false
20
21 # The maximum payload size in bytes for incoming server requests.
22 #server.maxPayloadBytes: 1048576
23
24 # The Kibana server's name. This is used for display purposes.
25 #server.name: "your-hostname"
26
27 # The URLs of the Elasticsearch instances to use for all your queries.
28 #elasticsearch.hosts: ["http://localhost:9200"]
29 elasticsearch.hosts: {{ elasticsearch_hosts | default('"[http://localhost:9200]"') }}
30
31 # When this setting's value is true Kibana uses the hostname specified in the server.host
32 # setting. When the value of this setting is false, Kibana uses the hostname of the host
33 # that connects to this Kibana instance.
34 #elasticsearch.preserveHost: true
35
36 # Kibana uses an index in Elasticsearch to store saved searches, visualizations and
37 # dashboards. Kibana creates a new index if the index doesn't already exist.
38 #kibana.index: ".kibana"
39
40 # The default application to load.
41 #kibana.defaultAppId: "home"
42
43 # If your Elasticsearch is protected with basic authentication, these settings provide
44 # the username and password that the Kibana server uses to perform maintenance on the Kibana
45 # index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
46 # is proxied through the Kibana server.
47 #elasticsearch.username: "kibana"
48 #elasticsearch.password: "pass"
49
50 # Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
51 # These settings enable SSL for outgoing requests from the Kibana server to the browser.
52 #server.ssl.enabled: false
53 #server.ssl.certificate: /path/to/your/server.crt
54 #server.ssl.key: /path/to/your/server.key
55
56 # Optional settings that provide the paths to the PEM-format SSL certificate and key files.
57 # These files validate that your Elasticsearch backend uses the same key files.
58 #elasticsearch.ssl.certificate: /path/to/your/client.crt
59 #elasticsearch.ssl.key: /path/to/your/client.key
60
61 # Optional setting that enables you to specify a path to the PEM file for the certificate
62 # authority for your Elasticsearch instance.
63 #elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
64
65 # To disregard the validity of SSL certificates, change this setting's value to 'none'.
66 #elasticsearch.ssl.verificationMode: full
67
68 # Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
69 # the elasticsearch.requestTimeout setting.
70 #elasticsearch.pingTimeout: 1500
71
72 # Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
73 # must be a positive integer.
74 #elasticsearch.requestTimeout: 30000
75
76 # List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
77 # headers, set this value to [] (an empty list).
78 #elasticsearch.requestHeadersWhitelist: [ authorization ]
79
80 # Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
81 # by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
82 #elasticsearch.customHeaders: {}
83
84 # Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
85 #elasticsearch.shardTimeout: 30000
86
87 # Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
88 #elasticsearch.startupTimeout: 5000
89
90 # Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
91 #elasticsearch.logQueries: false
92
93 # Specifies the path where Kibana creates the process ID file.
94 #pid.file: /var/run/kibana.pid
95
96 # Enables you specify a file where Kibana stores log output.
97 #logging.dest: stdout
98
99 # Set the value of this setting to true to suppress all logging output.
100 #logging.silent: false
101
102 # Set the value of this setting to true to suppress all logging output other than error messages.
103 #logging.quiet: false
104
105 # Set the value of this setting to true to log all events, including system usage information
106 # and all requests.
107 #logging.verbose: false
108
109 # Set the interval in milliseconds to sample system and process performance
110 # metrics. Minimum is 100ms. Defaults to 5000.
111 #ops.interval: 5000
112
113 # Specifies locale to be used for all localizable strings, dates and number formats.
114 # Supported languages are the following: English - en , by default , Chinese - zh-CN .
115 #i18n.locale: "en"
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!