1 Star 1 Fork 0

安家/tidb-ansible

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
migrate_monitor.yml 10.91 KB
一键复制 编辑 原始数据 按行查看 历史
zhangjinpeng1987 提交于 2019-04-08 13:59 . TiKV 3.0 (#724)
---
# Copyright 2016 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
# The rolling update playbook of TiDB
- name: check config locally
hosts: localhost
any_errors_fatal: true
tags:
- always
roles:
- check_config_static
- name: gather all facts, and check dest
hosts: all
any_errors_fatal: true
tags:
- always
roles:
- check_config_dynamic
- name: rolling update node_exporter
hosts: monitored_servers
any_errors_fatal: true
tags:
- node_exporter
pre_tasks:
- name: stop node_exporter by supervise
shell: cd {{ deploy_dir }}/scripts && ./stop_node_exporter.sh
when: process_supervision == 'supervise'
- name: stop node_exporter by systemd
systemd: name=node_exporter.service state=stopped enabled=no
become: true
when: process_supervision == 'systemd'
- name: wait for node_exporter down
wait_for: host={{ ansible_host }} port={{ node_exporter_port }} state=stopped
- name: clean systemd config
file: path="/etc/systemd/system/{{ item }}" state=absent
become: true
when: process_supervision == 'systemd'
with_items:
- node_exporter.service
roles:
- node_exporter
post_tasks:
- name: start node_exporter by supervise
shell: cd {{ deploy_dir }}/scripts && ./start_node_exporter.sh
when: process_supervision == 'supervise'
- name: start node_exporter by systemd
systemd: name=node_exporter-{{ node_exporter_port }}.service state=started
become: true
when: process_supervision == 'systemd'
- name: wait for node_exporter up
wait_for: |
host={{ ansible_host }} port={{ node_exporter_port }} state=present
send='GET /metrics HTTP/1.0\r\n\r\n' search_regex='200 OK'
- name: rolling update blackbox_exporter
hosts: monitored_servers
any_errors_fatal: true
tags:
- blackbox_exporter
pre_tasks:
- name: check blackbox_exporter existed
stat:
path: "{{ deploy_dir }}/conf/blackbox.yml"
register: blackbox_exporter_configure_file
- name: stop blackbox_exporter by supervise
shell: cd {{ deploy_dir }}/scripts && ./stop_blackbox_exporter.sh
when: process_supervision == 'supervise' and blackbox_exporter_configure_file.stat.exists == True
- name: stop blackbox_exporter by systemd
systemd: name=blackbox_exporter.service state=stopped enabled=no
become: true
when: process_supervision == 'systemd' and blackbox_exporter_configure_file.stat.exists == True
- name: wait for blackbox_exporter down
wait_for: host={{ ansible_host }} port={{ blackbox_exporter_port }} state=stopped
- name: clean systemd config
file: path="/etc/systemd/system/{{ item }}" state=absent
become: true
when: process_supervision == 'systemd' and blackbox_exporter_configure_file.stat.exists == True
with_items:
- blackbox_exporter.service
roles:
- blackbox_exporter
post_tasks:
- name: start blackbox_exporter by supervise
shell: cd {{ deploy_dir }}/scripts && ./start_blackbox_exporter.sh
when: process_supervision == 'supervise'
- name: start blackbox_exporter by systemd
systemd: name=blackbox_exporter-{{ blackbox_exporter_port }}.service state=started
become: true
when: process_supervision == 'systemd'
- name: wait for blackbox_exporter up
wait_for: |
host={{ ansible_host }} port={{ blackbox_exporter_port }} state=present
send='GET / HTTP/1.0\r\n\r\n' search_regex='200 OK'
- name: rolling update pushgateway
hosts: monitoring_servers
any_errors_fatal: true
tags:
- pushgateway
pre_tasks:
- name: stop pushgateway by supervise
shell: cd {{ deploy_dir }}/scripts && ./stop_{{ item }}.sh
with_items:
- pushgateway
when: process_supervision == 'supervise'
- name: stop pushgateway by systemd
systemd: name={{ item }} state=stopped enabled=no
when: process_supervision == 'systemd'
become: true
with_items:
- pushgateway.service
- name: wait for pushgateway down
wait_for: host={{ ansible_host }} port={{ pushgateway_port }} state=stopped
- name: clean systemd config
file: path="/etc/systemd/system/{{ item }}" state=absent
become: true
when: process_supervision == 'systemd'
with_items:
- pushgateway.service
roles:
- pushgateway
post_tasks:
- name: start pushgateway by supervise
shell: cd {{ deploy_dir }}/scripts && ./start_{{ item }}.sh
when: process_supervision == 'supervise'
with_items:
- pushgateway
- name: start pushgateway by systemd
systemd: name={{ item }} state=started enabled=no
when: process_supervision == 'systemd'
become: true
with_items:
- pushgateway-{{ pushgateway_port }}.service
- name: wait for pushgateway up
wait_for: |
host={{ ansible_host }} port={{ pushgateway_port }} state=present
send='GET /metrics HTTP/1.0\r\n\r\n' search_regex='200 OK'
- name: rolling update prometheus
hosts: monitoring_servers
any_errors_fatal: true
tags:
- prometheus
pre_tasks:
- name: stop prometheus by supervise
shell: cd {{ deploy_dir }}/scripts && ./stop_{{ item }}.sh
with_items:
- prometheus
when: process_supervision == 'supervise'
- name: stop prometheus by systemd
systemd: name={{ item }} state=stopped enabled=no
when: process_supervision == 'systemd'
become: true
with_items:
- prometheus.service
- name: wait for prometheus down
wait_for: host={{ ansible_host }} port={{ prometheus_port }} state=stopped
- name: clean systemd config
file: path="/etc/systemd/system/{{ item }}" state=absent
become: true
when: process_supervision == 'systemd'
with_items:
- prometheus.service
roles:
- prometheus
post_tasks:
- name: start prometheus by supervise
shell: cd {{ deploy_dir }}/scripts && ./start_{{ item }}.sh
when: process_supervision == 'supervise'
with_items:
- prometheus
- name: start prometheus by systemd
systemd: name={{ item }} state=started enabled=no
when: process_supervision == 'systemd'
become: true
with_items:
- prometheus-{{ prometheus_port }}.service
- name: wait for prometheus up
wait_for: |
host={{ ansible_host }} port={{ prometheus_port }} state=present
send='GET /metrics HTTP/1.0\r\n\r\n' search_regex='200 OK'
- name: rolling update grafana
hosts: grafana_servers
any_errors_fatal: true
tags:
- grafana
pre_tasks:
- name: stop grafana by supervise
shell: cd {{ deploy_dir }}/scripts && ./stop_{{ item }}.sh
when: process_supervision == 'supervise'
with_items:
- grafana
- name: stop grafana by systemd
systemd: name=grafana.service state=stopped enabled=no
become: true
when: process_supervision == 'systemd'
- name: wait for grafana down
wait_for: host={{ ansible_host }} port={{ grafana_port }} state=stopped
- name: clean systemd config
file: path="/etc/systemd/system/{{ item }}" state=absent
become: true
when: process_supervision == 'systemd'
with_items:
- grafana.service
roles:
- grafana
- grafana_collector
post_tasks:
- name: start grafana by supervise
shell: cd {{ deploy_dir }}/scripts && ./start_{{ item }}.sh
when: process_supervision == 'supervise'
with_items:
- grafana
- name: start grafana by systemd
systemd: name=grafana-{{ grafana_port }}.service state=started enabled=no
when: process_supervision == 'systemd'
become: true
- name: wait for grafana up
wait_for: |
host={{ ansible_host }} port={{ grafana_port }} state=present
send='GET /login HTTP/1.0\r\n\r\n' search_regex='200 OK'
- name: start grafana_collector by supervise
shell: cd {{ deploy_dir }}/scripts && ./start_{{ item }}.sh
when: process_supervision == 'supervise'
with_items:
- grafana_collector
- name: start grafana_collector by systemd
systemd: name=grafana_collector-{{ grafana_collector_port }}.service state=started enabled=no
when: process_supervision == 'systemd'
become: true
- name: wait for grafana_collector up
wait_for: |
host={{ ansible_host }} port={{ grafana_collector_port }} state=present
- set_fact:
grafana_host: "{{ ansible_host }}"
- include_tasks: "common_tasks/create_grafana_api_keys.yml"
- name: import grafana data source
shell: >
chdir={{ grafana_data_dir }}
warn=no
curl -q -X POST -d @data_source.json --header 'Content-Type: application/json'
"http://{{ grafana_admin_user }}:{{ grafana_admin_password }}@127.0.0.1:{{ grafana_port }}/api/datasources"
- name: import grafana dashboards - prepare config
delegate_to: localhost
template: src=grafana.dest.json.j2 dest={{ playbook_dir }}/scripts/dests.json
vars:
- ansible_become: false
- ansible_connection: local
- grafana_dest_config:
name: "{{ cluster_name | title }}"
url: "http://{{ grafana_host }}:{{ grafana_port }}/"
report_url: "http://{{ grafana_host }}:{{ grafana_collector_port }}/"
user: "{{ grafana_admin_user }}"
password: "{{ grafana_admin_password }}"
apikey: "{{ lookup('file', grafana_api_keys_dir + '/grafana_apikey.key') }}"
datasource: "{{ cluster_name }}"
titles:
node: "{{ cluster_name | title }}-Node_exporter"
pd: "{{ cluster_name | title }}-PD"
tidb: "{{ cluster_name | title }}-TiDB"
tikv_summary: "{{ cluster_name | title }}-TiKV-Summary"
tikv_details: "{{ cluster_name | title }}-TiKV-Details"
tikv_trouble_shot: "{{ cluster_name | title }}-TiKV-Trouble-Shooting"
binlog: "{{ cluster_name | title }}-Binlog"
overview: "{{ cluster_name | title }}-Overview"
disk_performance: "{{ cluster_name | title }}-Disk-Performance"
blackbox_exporter: "{{ cluster_name | title }}-Blackbox_exporter"
kafka_overview: "{{ cluster_name | title }}-Kafka-Overview"
- name: import grafana dashboards - run import script
delegate_to: localhost
shell: >-
chdir={{ playbook_dir }}/scripts
./grafana-config-copy.py
vars:
- ansible_become: false
- ansible_connection: local
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/anjia/tidb-ansible.git
git@gitee.com:anjia/tidb-ansible.git
anjia
tidb-ansible
tidb-ansible
master

搜索帮助

23e8dbc6 1850385 7e0993f3 1850385