mirror of
https://github.com/Dannecron/netology-devops.git
synced 2025-12-25 15:22:37 +03:00
homework 5.5: add task 2-3
This commit is contained in:
1
src/homework/05-virtualization/5.5/ansible/.gitignore
vendored
Normal file
1
src/homework/05-virtualization/5.5/ansible/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
inventory
|
||||
@@ -0,0 +1,7 @@
|
||||
FROM prom/alertmanager:v0.15.3
|
||||
|
||||
COPY conf /etc/alertmanager/
|
||||
|
||||
ENTRYPOINT [ "/etc/alertmanager/docker-entrypoint.sh" ]
|
||||
CMD [ "--config.file=/etc/alertmanager/alertmanager.yml", \
|
||||
"--storage.path=/alertmanager" ]
|
||||
@@ -0,0 +1,11 @@
|
||||
route:
|
||||
receiver: 'slack'
|
||||
|
||||
receivers:
|
||||
- name: 'slack'
|
||||
slack_configs:
|
||||
- send_resolved: true
|
||||
text: "{{ .CommonAnnotations.description }}"
|
||||
#username: <user>#
|
||||
#channel: <channel>#
|
||||
#api_url: <url>#
|
||||
@@ -0,0 +1,12 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
cat /etc/alertmanager/alertmanager.yml |\
|
||||
sed "s@#api_url: <url>#@api_url: '$SLACK_URL'@g" |\
|
||||
sed "s@#channel: <channel>#@channel: '#$SLACK_CHANNEL'@g" |\
|
||||
sed "s@#username: <user>#@username: '$SLACK_USER'@g" > /tmp/alertmanager.yml
|
||||
|
||||
mv /tmp/alertmanager.yml /etc/alertmanager/alertmanager.yml
|
||||
|
||||
set -- /bin/alertmanager "$@"
|
||||
|
||||
exec "$@"
|
||||
@@ -0,0 +1,40 @@
|
||||
:9090 {
|
||||
basicauth / {$ADMIN_USER} {$ADMIN_PASSWORD}
|
||||
proxy / prometheus:9090 {
|
||||
transparent
|
||||
}
|
||||
|
||||
errors stderr
|
||||
tls off
|
||||
}
|
||||
|
||||
:9093 {
|
||||
basicauth / {$ADMIN_USER} {$ADMIN_PASSWORD}
|
||||
proxy / alertmanager:9093 {
|
||||
transparent
|
||||
}
|
||||
|
||||
errors stderr
|
||||
tls off
|
||||
}
|
||||
|
||||
:9094 {
|
||||
basicauth / {$ADMIN_USER} {$ADMIN_PASSWORD}
|
||||
proxy / unsee:8080 {
|
||||
transparent
|
||||
}
|
||||
|
||||
errors stderr
|
||||
tls off
|
||||
}
|
||||
|
||||
:3000 {
|
||||
proxy / grafana:3000 {
|
||||
transparent
|
||||
websocket
|
||||
}
|
||||
|
||||
errors stderr
|
||||
tls off
|
||||
}
|
||||
|
||||
@@ -0,0 +1,206 @@
|
||||
version: "3.3"
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
|
||||
volumes:
|
||||
prometheus: {}
|
||||
grafana: {}
|
||||
alertmanager: {}
|
||||
|
||||
configs:
|
||||
caddy_config:
|
||||
file: ./caddy/Caddyfile
|
||||
dockerd_config:
|
||||
file: ./dockerd-exporter/Caddyfile
|
||||
node_rules:
|
||||
file: ./prometheus/rules/swarm_node.rules.yml
|
||||
task_rules:
|
||||
file: ./prometheus/rules/swarm_task.rules.yml
|
||||
|
||||
services:
|
||||
dockerd-exporter:
|
||||
image: stefanprodan/caddy
|
||||
networks:
|
||||
- net
|
||||
environment:
|
||||
- DOCKER_GWBRIDGE_IP=172.18.0.1
|
||||
configs:
|
||||
- source: dockerd_config
|
||||
target: /etc/caddy/Caddyfile
|
||||
deploy:
|
||||
mode: global
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
|
||||
cadvisor:
|
||||
image: google/cadvisor
|
||||
networks:
|
||||
- net
|
||||
command: -logtostderr -docker_only
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /:/rootfs:ro
|
||||
- /var/run:/var/run
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker/:/var/lib/docker:ro
|
||||
deploy:
|
||||
mode: global
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
|
||||
grafana:
|
||||
image: stefanprodan/swarmprom-grafana:5.3.4
|
||||
networks:
|
||||
- net
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
#- GF_SERVER_ROOT_URL=${GF_SERVER_ROOT_URL:-localhost}
|
||||
#- GF_SMTP_ENABLED=${GF_SMTP_ENABLED:-false}
|
||||
#- GF_SMTP_FROM_ADDRESS=${GF_SMTP_FROM_ADDRESS:-grafana@test.com}
|
||||
#- GF_SMTP_FROM_NAME=${GF_SMTP_FROM_NAME:-Grafana}
|
||||
#- GF_SMTP_HOST=${GF_SMTP_HOST:-smtp:25}
|
||||
#- GF_SMTP_USER=${GF_SMTP_USER}
|
||||
#- GF_SMTP_PASSWORD=${GF_SMTP_PASSWORD}
|
||||
volumes:
|
||||
- grafana:/var/lib/grafana
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
|
||||
alertmanager:
|
||||
image: stefanprodan/swarmprom-alertmanager:v0.14.0
|
||||
networks:
|
||||
- net
|
||||
environment:
|
||||
- SLACK_URL=${SLACK_URL:-https://hooks.slack.com/services/TOKEN}
|
||||
- SLACK_CHANNEL=${SLACK_CHANNEL:-general}
|
||||
- SLACK_USER=${SLACK_USER:-alertmanager}
|
||||
command:
|
||||
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
||||
- '--storage.path=/alertmanager'
|
||||
volumes:
|
||||
- alertmanager:/alertmanager
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
|
||||
unsee:
|
||||
image: cloudflare/unsee:v0.8.0
|
||||
networks:
|
||||
- net
|
||||
environment:
|
||||
- "ALERTMANAGER_URIS=default:http://alertmanager:9093"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
node-exporter:
|
||||
image: stefanprodan/swarmprom-node-exporter:v0.16.0
|
||||
networks:
|
||||
- net
|
||||
environment:
|
||||
- NODE_ID={{.Node.ID}}
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/rootfs:ro
|
||||
- /etc/hostname:/etc/nodename
|
||||
command:
|
||||
- '--path.sysfs=/host/sys'
|
||||
- '--path.procfs=/host/proc'
|
||||
- '--collector.textfile.directory=/etc/node-exporter/'
|
||||
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
|
||||
- '--no-collector.ipvs'
|
||||
deploy:
|
||||
mode: global
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
|
||||
prometheus:
|
||||
image: stefanprodan/swarmprom-prometheus:v2.5.0
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--storage.tsdb.retention=${PROMETHEUS_RETENTION:-24h}'
|
||||
volumes:
|
||||
- prometheus:/prometheus
|
||||
configs:
|
||||
- source: node_rules
|
||||
target: /etc/prometheus/swarm_node.rules.yml
|
||||
- source: task_rules
|
||||
target: /etc/prometheus/swarm_task.rules.yml
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
resources:
|
||||
limits:
|
||||
memory: 2048M
|
||||
reservations:
|
||||
memory: 128M
|
||||
|
||||
caddy:
|
||||
image: stefanprodan/caddy
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "9090:9090"
|
||||
- "9093:9093"
|
||||
- "9094:9094"
|
||||
networks:
|
||||
- net
|
||||
environment:
|
||||
- ADMIN_USER=${ADMIN_USER:-admin}
|
||||
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
|
||||
configs:
|
||||
- source: caddy_config
|
||||
target: /etc/caddy/Caddyfile
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000"]
|
||||
interval: 5s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
@@ -0,0 +1,8 @@
|
||||
:9323 {
|
||||
proxy / {$DOCKER_GWBRIDGE_IP}:9323 {
|
||||
transparent
|
||||
}
|
||||
|
||||
errors stderr
|
||||
tls off
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
FROM grafana/grafana:5.3.4
|
||||
# https://hub.docker.com/r/grafana/grafana/tags/
|
||||
|
||||
COPY datasources /etc/grafana/provisioning/datasources/
|
||||
COPY swarmprom_dashboards.yml /etc/grafana/provisioning/dashboards/
|
||||
COPY dashboards /etc/grafana/dashboards/
|
||||
|
||||
ENV GF_SECURITY_ADMIN_PASSWORD=admin \
|
||||
GF_SECURITY_ADMIN_USER=admin \
|
||||
GF_PATHS_PROVISIONING=/etc/grafana/provisioning/
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,13 @@
|
||||
apiVersion: 1
|
||||
|
||||
deleteDatasources:
|
||||
- name: Prometheus
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
version: 1
|
||||
editable: true
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'default'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/dashboards
|
||||
@@ -0,0 +1,10 @@
|
||||
FROM prom/node-exporter:v0.16.0
|
||||
|
||||
ENV NODE_ID=none
|
||||
|
||||
USER root
|
||||
|
||||
COPY conf /etc/node-exporter/
|
||||
|
||||
ENTRYPOINT [ "/etc/node-exporter/docker-entrypoint.sh" ]
|
||||
CMD [ "/bin/node_exporter" ]
|
||||
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
NODE_NAME=$(cat /etc/nodename)
|
||||
echo "node_meta{node_id=\"$NODE_ID\", container_label_com_docker_swarm_node_id=\"$NODE_ID\", node_name=\"$NODE_NAME\"} 1" > /etc/node-exporter/node-meta.prom
|
||||
|
||||
set -- /bin/node_exporter "$@"
|
||||
|
||||
exec "$@"
|
||||
@@ -0,0 +1,10 @@
|
||||
FROM prom/prometheus:v2.5.0
|
||||
# https://hub.docker.com/r/prom/prometheus/tags/
|
||||
|
||||
ENV WEAVE_TOKEN=none
|
||||
|
||||
COPY conf /etc/prometheus/
|
||||
|
||||
ENTRYPOINT [ "/etc/prometheus/docker-entrypoint.sh" ]
|
||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||
"--storage.tsdb.path=/prometheus" ]
|
||||
@@ -0,0 +1,48 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
cat /etc/prometheus/prometheus.yml > /tmp/prometheus.yml
|
||||
cat /etc/prometheus/weave-cortex.yml | \
|
||||
sed "s@#password: <token>#@password: '$WEAVE_TOKEN'@g" > /tmp/weave-cortex.yml
|
||||
|
||||
#JOBS=mongo-exporter:9111 redis-exporter:9112
|
||||
|
||||
if [ ${JOBS+x} ]; then
|
||||
|
||||
for job in $JOBS
|
||||
do
|
||||
echo "adding job $job"
|
||||
|
||||
SERVICE=$(echo "$job" | cut -d":" -f1)
|
||||
PORT=$(echo "$job" | cut -d":" -f2)
|
||||
|
||||
cat >>/tmp/prometheus.yml <<EOF
|
||||
|
||||
- job_name: '${SERVICE}'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.${SERVICE}'
|
||||
type: 'A'
|
||||
port: ${PORT}
|
||||
EOF
|
||||
|
||||
cat >>/tmp/weave-cortex.yml <<EOF
|
||||
|
||||
- job_name: '${SERVICE}'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.${SERVICE}'
|
||||
type: 'A'
|
||||
port: ${PORT}
|
||||
EOF
|
||||
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
mv /tmp/prometheus.yml /etc/prometheus/prometheus.yml
|
||||
mv /tmp/weave-cortex.yml /etc/prometheus/weave-cortex.yml
|
||||
|
||||
set -- /bin/prometheus "$@"
|
||||
|
||||
exec "$@"
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
external_labels:
|
||||
monitor: 'promswarm'
|
||||
|
||||
rule_files:
|
||||
- "swarm_node.rules.yml"
|
||||
- "swarm_task.rules.yml"
|
||||
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'dockerd-exporter'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.dockerd-exporter'
|
||||
type: 'A'
|
||||
port: 9323
|
||||
|
||||
- job_name: 'cadvisor'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.cadvisor'
|
||||
type: 'A'
|
||||
port: 8080
|
||||
|
||||
- job_name: 'node-exporter'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.node-exporter'
|
||||
type: 'A'
|
||||
port: 9100
|
||||
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
remote_write:
|
||||
- url: https://cloud.weave.works/api/prom/push
|
||||
basic_auth:
|
||||
#password: <token>#
|
||||
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
external_labels:
|
||||
monitor: 'promswarm'
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'dockerd-exporter'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.dockerd-exporter'
|
||||
type: 'A'
|
||||
port: 9323
|
||||
|
||||
- job_name: 'cadvisor'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.cadvisor'
|
||||
type: 'A'
|
||||
port: 8080
|
||||
|
||||
- job_name: 'node-exporter'
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.node-exporter'
|
||||
type: 'A'
|
||||
port: 9100
|
||||
@@ -0,0 +1,44 @@
|
||||
groups:
|
||||
- name: /1/store/projects/vagrant/docker-swarm-vagrant/apps/swarmprom/prometheus/rules/swarm_node.rules.yml
|
||||
rules:
|
||||
- alert: node_cpu_usage
|
||||
expr: 100 - (avg(irate(node_cpu_seconds_total{mode="idle"}[1m]) * ON(instance) GROUP_LEFT(node_name)
|
||||
node_meta * 100) BY (node_name)) > 50
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
description: Swarm node {{ $labels.node_name }} CPU usage is at {{ humanize
|
||||
$value}}%.
|
||||
summary: CPU alert for Swarm node '{{ $labels.node_name }}'
|
||||
- alert: node_memory_usage
|
||||
expr: sum(((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes)
|
||||
* ON(instance) GROUP_LEFT(node_name) node_meta * 100) BY (node_name) > 80
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
description: Swarm node {{ $labels.node_name }} memory usage is at {{ humanize
|
||||
$value}}%.
|
||||
summary: Memory alert for Swarm node '{{ $labels.node_name }}'
|
||||
- alert: node_disk_usage
|
||||
expr: ((node_filesystem_size_bytes{mountpoint="/rootfs"} - node_filesystem_free_bytes{mountpoint="/rootfs"})
|
||||
* 100 / node_filesystem_size_bytes{mountpoint="/rootfs"}) * ON(instance) GROUP_LEFT(node_name)
|
||||
node_meta > 85
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
description: Swarm node {{ $labels.node_name }} disk usage is at {{ humanize
|
||||
$value}}%.
|
||||
summary: Disk alert for Swarm node '{{ $labels.node_name }}'
|
||||
- alert: node_disk_fill_rate_6h
|
||||
expr: predict_linear(node_filesystem_free_bytes{mountpoint="/rootfs"}[1h], 6 * 3600) * ON(instance)
|
||||
GROUP_LEFT(node_name) node_meta < 0
|
||||
for: 1h
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
description: Swarm node {{ $labels.node_name }} disk is going to fill up in
|
||||
6h.
|
||||
summary: Disk fill alert for Swarm node '{{ $labels.node_name }}'
|
||||
@@ -0,0 +1,24 @@
|
||||
groups:
|
||||
- name: /1/store/projects/vagrant/docker-swarm-vagrant/apps/swarmprom/prometheus/rules/swarm_task.rules.yml
|
||||
rules:
|
||||
- alert: task_high_cpu_usage_50
|
||||
expr: sum(rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[1m]))
|
||||
BY (container_label_com_docker_swarm_task_name, container_label_com_docker_swarm_node_id)
|
||||
* 100 > 50
|
||||
for: 1m
|
||||
annotations:
|
||||
description: '{{ $labels.container_label_com_docker_swarm_task_name }} on ''{{
|
||||
$labels.container_label_com_docker_swarm_node_id }}'' CPU usage is at {{ humanize
|
||||
$value}}%.'
|
||||
summary: CPU alert for Swarm task '{{ $labels.container_label_com_docker_swarm_task_name
|
||||
}}' on '{{ $labels.container_label_com_docker_swarm_node_id }}'
|
||||
- alert: task_high_memory_usage_1g
|
||||
expr: sum(container_memory_rss{container_label_com_docker_swarm_task_name=~".+"})
|
||||
BY (container_label_com_docker_swarm_task_name, container_label_com_docker_swarm_node_id) > 1e+09
|
||||
for: 1m
|
||||
annotations:
|
||||
description: '{{ $labels.container_label_com_docker_swarm_task_name }} on ''{{
|
||||
$labels.container_label_com_docker_swarm_node_id }}'' memory usage is {{ humanize
|
||||
$value}}.'
|
||||
summary: Memory alert for Swarm task '{{ $labels.container_label_com_docker_swarm_task_name
|
||||
}}' on '{{ $labels.container_label_com_docker_swarm_node_id }}'
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Configure Hosts File
|
||||
lineinfile: >
|
||||
path=/etc/hosts
|
||||
regexp='.*{{ item }}$'
|
||||
line="{{ hostvars[item].ansible_default_ipv4.address }} {{item}}"
|
||||
state=present
|
||||
when: hostvars[item].ansible_default_ipv4.address is defined
|
||||
with_items: "{{ groups['nodes'] }}"
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Add docker repository
|
||||
command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
|
||||
- name: Installing docker package
|
||||
yum: >
|
||||
pkg={{ item }}
|
||||
state=present
|
||||
update_cache=yes
|
||||
with_items:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
|
||||
- name: Enable docker daemon
|
||||
systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: yes
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: Add Managers to the Swarm
|
||||
shell: "docker swarm join --token {{ hostvars['node01.netology.yc']['manager_token']['stdout'] }} {{ hostvars['node01.netology.yc']['ansible_default_ipv4']['address'] }}:2377"
|
||||
tags: swarm
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: Add Workers to the Swarm
|
||||
shell: "docker swarm join --token {{ hostvars['node01.netology.yc']['worker_token']['stdout'] }} {{ hostvars['node01.netology.yc']['ansible_default_ipv4']['address'] }}:2377"
|
||||
tags: swarm
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Initialize Docker Swarm
|
||||
shell: "docker swarm init --advertise-addr={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:2377"
|
||||
tags: swarm
|
||||
|
||||
- name: Get the Manager join-token
|
||||
shell: docker swarm join-token --quiet manager
|
||||
register: manager_token
|
||||
tags: swarm
|
||||
|
||||
- name: Get the worker join-token
|
||||
shell: docker swarm join-token --quiet worker
|
||||
register: worker_token
|
||||
tags: swarm
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: Nodes Leaving the Swarm
|
||||
shell: docker swarm leave -f
|
||||
ignore_errors: true
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Installing tools
|
||||
yum: >
|
||||
pkg={{ item }}
|
||||
state=present
|
||||
update_cache=yes
|
||||
with_items:
|
||||
- ntp
|
||||
- python
|
||||
- tcpdump
|
||||
- wget
|
||||
- openssl
|
||||
- curl
|
||||
- git
|
||||
@@ -0,0 +1,41 @@
|
||||
---
|
||||
- name: Install of Requrements Tools
|
||||
hosts: nodes
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
roles:
|
||||
- install-tools
|
||||
- configure-hosts-file
|
||||
|
||||
- name: Install Docker Engine
|
||||
hosts: nodes
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
roles:
|
||||
- docker-installation
|
||||
|
||||
- name: Initialize Docker Swarm Cluster
|
||||
hosts: active
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
roles:
|
||||
- docker-swarm-init
|
||||
|
||||
- name: Add Managers Swarm Cluster
|
||||
hosts: standby
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
roles:
|
||||
- docker-swarm-add-manager
|
||||
|
||||
- name: Add Workers to the Swarm Cluster
|
||||
hosts: workers
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
roles:
|
||||
- docker-swarm-add-worker
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- hosts: nodes
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
|
||||
tasks:
|
||||
- name: Check Current Leader
|
||||
shell: docker node ls | grep {{ ansible_hostname }}
|
||||
register: docker_info
|
||||
changed_when: false
|
||||
|
||||
- name: Run deploy, if node is leader
|
||||
shell: docker stack deploy --compose-file /opt/monitoring/docker-compose.yml swarm_monitoring
|
||||
when:
|
||||
- "'Leader' in docker_info.stdout"
|
||||
- "'Active' in docker_info.stdout"
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- hosts: nodes
|
||||
become: yes
|
||||
become_user: root
|
||||
remote_user: centos
|
||||
|
||||
tasks:
|
||||
- name: Synchronization
|
||||
copy:
|
||||
src: monitoring/
|
||||
dest: "/opt/monitoring/"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
become: true
|
||||
@@ -29,4 +29,82 @@
|
||||
> - Что такое Overlay Network?
|
||||
|
||||
`Overlay Network` - это особый тип docker-сети, который позволяет связать контейнеры, запущенные на разных нодах.
|
||||
То есть, данная сеть позволяет направить трафик на определённый контейнер на определённой ноде только по имени контейнера.
|
||||
То есть, данная сеть позволяет направить трафик на определённый контейнер на определённой ноде только по имени контейнера.
|
||||
|
||||
### Задача 2
|
||||
|
||||
> Создать ваш первый Docker Swarm кластер в Яндекс.Облаке
|
||||
>
|
||||
> Для получения зачета, вам необходимо предоставить скриншот из терминала (консоли), с выводом команды:
|
||||
>
|
||||
> ```shell
|
||||
> docker node ls
|
||||
> ```
|
||||
|
||||
Для начала необходимо выполнить шаги создания образа ОС в облаке, следуя инструкции из домашней работы [5.4](/src/homework/05-virtualization/5.4/readme.md#Задача 1).
|
||||
|
||||
После того как образ будет создан:
|
||||
1. Скопировать секреты для `terraform` из [variables.tf.example](./terraform/variables.tf.example) в `variables.tf`
|
||||
2. Затем нужно изменить поля в конфигурации.
|
||||
3. Инициализировать конфигурацию: `terraform init` (не работает без vpn, при получении данных отдаётся 403 статус код)
|
||||
4. Просмотреть конфигурацию `terraform plan`
|
||||
5. Применить конфигурацию к облаку `terraform apply -auto-approve`
|
||||
6. Подключится по ssh к машине, чей ip-адрес будет выведен в строке с переменной `external_ip_address_node01` и выполнить необходимую команду:
|
||||
```shell
|
||||
ssh centos@51.250.64.218
|
||||
sudo docker node ls
|
||||
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
|
||||
ttj5yee26pppcezlys0g0pzum * node01.netology.yc Ready Active Leader 20.10.16
|
||||
a717bja2genbm7c6prdxailfy node02.netology.yc Ready Active Reachable 20.10.16
|
||||
qijk98huwd1y1omhsphc28rjr node03.netology.yc Ready Active Reachable 20.10.16
|
||||
pbmbmjeawqf7sst6yia40llwp node04.netology.yc Ready Active 20.10.16
|
||||
y6g2mtvdcitnmyzxwnipklk82 node05.netology.yc Ready Active 20.10.16
|
||||
s7f1f34ef238lvd7qltftb6jt node06.netology.yc Ready Active 20.10.16
|
||||
```
|
||||
|
||||
### Задача 3
|
||||
|
||||
> Создать ваш первый, готовый к боевой эксплуатации кластер мониторинга, состоящий из стека микросервисов.
|
||||
>
|
||||
> Для получения зачета, вам необходимо предоставить скриншот из терминала (консоли), с выводом команды:
|
||||
>
|
||||
> ```shell
|
||||
> docker service ls
|
||||
> ```
|
||||
|
||||
Стэк сервисов был развёрнут в рамках [второго задания](#Задача 2) при запуске `terraform`.
|
||||
|
||||
```shell
|
||||
sudo docker service ls
|
||||
ID NAME MODE REPLICAS IMAGE PORTS
|
||||
4iot28xmyl3w swarm_monitoring_alertmanager replicated 1/1 stefanprodan/swarmprom-alertmanager:v0.14.0
|
||||
5akoz6vjp9a3 swarm_monitoring_caddy replicated 1/1 stefanprodan/caddy:latest *:3000->3000/tcp, *:9090->9090/tcp, *:9093-9094->9093-9094/tcp
|
||||
mf0c8h4vyuue swarm_monitoring_cadvisor global 6/6 google/cadvisor:latest
|
||||
vbgaltbn2t17 swarm_monitoring_dockerd-exporter global 6/6 stefanprodan/caddy:latest
|
||||
ihmlsx3bmxs0 swarm_monitoring_grafana replicated 1/1 stefanprodan/swarmprom-grafana:5.3.4
|
||||
uju9p0ws4vwm swarm_monitoring_node-exporter global 6/6 stefanprodan/swarmprom-node-exporter:v0.16.0
|
||||
8ipjzv0vax7m swarm_monitoring_prometheus replicated 1/1 stefanprodan/swarmprom-prometheus:v2.5.0
|
||||
96xidxmifhco swarm_monitoring_unsee replicated 1/1 cloudflare/unsee:v0.8.0
|
||||
```
|
||||
|
||||
Для того, чтобы зайти на web-панель `grafana`, необходимо:
|
||||
1. Выяснить, на какой ноде был развёрнут данный сервис
|
||||
```shell
|
||||
sudo docker service ps swarm_monitoring_grafana
|
||||
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
|
||||
t97g9zhyggja swarm_monitoring_grafana.1 stefanprodan/swarmprom-grafana:5.3.4 node02.netology.yc Running Running 7 minutes ago
|
||||
```
|
||||
2. Выяснить внешний ip-адрес искомой ноды. Для этого можно посмотреть в [`ansible/inventory`](./ansible/inventory) файл.
|
||||
Либо выполнить команду:
|
||||
```shell
|
||||
sudo docker inspect node02.netology.yc --format '{{ .Status.Addr }}'
|
||||
192.168.101.12
|
||||
```
|
||||
3. Зайти по полученному адресу на порт `:3000`: `http://192.168.101.12:3000`.
|
||||
|
||||
### Clean up
|
||||
|
||||
Удаление всей инфраструктуры:
|
||||
|
||||
1. Удаление ВМ, сетей: `terraform destroy -auto-approve`
|
||||
2. Удаление образа ОС: `yc compute image delete --id {{ image_id }}`
|
||||
38
src/homework/05-virtualization/5.5/terraform/.gitignore
vendored
Normal file
38
src/homework/05-virtualization/5.5/terraform/.gitignore
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
variables.tf
|
||||
|
||||
# Local .terraform directories
|
||||
**/.terraform/*
|
||||
|
||||
# .tfstate files
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
.terraform.lock.hcl
|
||||
|
||||
# Crash log files
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
||||
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
|
||||
# password, private keys, and other secrets. These should not be part of version
|
||||
# control as they are data points which are potentially sensitive and subject
|
||||
# to change depending on the environment.
|
||||
#
|
||||
*.tfvars
|
||||
|
||||
# Ignore override files as they are usually used to override resources locally and so
|
||||
# are not checked in
|
||||
override.tf
|
||||
override.tf.json
|
||||
*_override.tf
|
||||
*_override.tf.json
|
||||
|
||||
# Include override files you do wish to add to version control using negated pattern
|
||||
#
|
||||
# !example_override.tf
|
||||
|
||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
||||
# example: *tfplan*
|
||||
|
||||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
||||
6
src/homework/05-virtualization/5.5/terraform/ansible.cfg
Normal file
6
src/homework/05-virtualization/5.5/terraform/ansible.cfg
Normal file
@@ -0,0 +1,6 @@
|
||||
[defaults]
|
||||
inventory=./inventory
|
||||
deprecation_warnings=False
|
||||
command_warnings=False
|
||||
ansible_port=22
|
||||
host_key_checking = False
|
||||
39
src/homework/05-virtualization/5.5/terraform/ansible.tf
Normal file
39
src/homework/05-virtualization/5.5/terraform/ansible.tf
Normal file
@@ -0,0 +1,39 @@
|
||||
resource "null_resource" "wait" {
|
||||
provisioner "local-exec" {
|
||||
command = "sleep 100"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
local_file.inventory
|
||||
]
|
||||
}
|
||||
|
||||
resource "null_resource" "cluster" {
|
||||
provisioner "local-exec" {
|
||||
command = "ANSIBLE_FORCE_COLOR=1 ansible-playbook -i ../ansible/inventory ../ansible/swarm-deploy-cluster.yml"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
null_resource.wait
|
||||
]
|
||||
}
|
||||
|
||||
resource "null_resource" "sync" {
|
||||
provisioner "local-exec" {
|
||||
command = "ANSIBLE_FORCE_COLOR=1 ansible-playbook -i ../ansible/inventory ../ansible/swarm-deploy-sync.yml"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
null_resource.cluster
|
||||
]
|
||||
}
|
||||
|
||||
resource "null_resource" "monitoring" {
|
||||
provisioner "local-exec" {
|
||||
command = "ANSIBLE_FORCE_COLOR=1 ansible-playbook -i ../ansible/inventory ../ansible/swarm-deploy-stack.yml --limit=managers"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
null_resource.sync
|
||||
]
|
||||
}
|
||||
36
src/homework/05-virtualization/5.5/terraform/inventory.tf
Normal file
36
src/homework/05-virtualization/5.5/terraform/inventory.tf
Normal file
@@ -0,0 +1,36 @@
|
||||
resource "local_file" "inventory" {
|
||||
content = <<-DOC
|
||||
# Ansible inventory containing variable values from Terraform.
|
||||
# Generated by Terraform.
|
||||
|
||||
[nodes:children]
|
||||
managers
|
||||
workers
|
||||
|
||||
[managers:children]
|
||||
active
|
||||
standby
|
||||
|
||||
[active]
|
||||
node01.netology.yc ansible_host=${yandex_compute_instance.node01.network_interface.0.nat_ip_address}
|
||||
|
||||
[standby]
|
||||
node02.netology.yc ansible_host=${yandex_compute_instance.node02.network_interface.0.nat_ip_address}
|
||||
node03.netology.yc ansible_host=${yandex_compute_instance.node03.network_interface.0.nat_ip_address}
|
||||
|
||||
[workers]
|
||||
node04.netology.yc ansible_host=${yandex_compute_instance.node04.network_interface.0.nat_ip_address}
|
||||
node05.netology.yc ansible_host=${yandex_compute_instance.node05.network_interface.0.nat_ip_address}
|
||||
node06.netology.yc ansible_host=${yandex_compute_instance.node06.network_interface.0.nat_ip_address}
|
||||
DOC
|
||||
filename = "../ansible/inventory"
|
||||
|
||||
depends_on = [
|
||||
yandex_compute_instance.node01,
|
||||
yandex_compute_instance.node02,
|
||||
yandex_compute_instance.node03,
|
||||
yandex_compute_instance.node04,
|
||||
yandex_compute_instance.node05,
|
||||
yandex_compute_instance.node06
|
||||
]
|
||||
}
|
||||
11
src/homework/05-virtualization/5.5/terraform/network.tf
Normal file
11
src/homework/05-virtualization/5.5/terraform/network.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
# Network
|
||||
resource "yandex_vpc_network" "default" {
|
||||
name = "net"
|
||||
}
|
||||
|
||||
resource "yandex_vpc_subnet" "default" {
|
||||
name = "subnet"
|
||||
zone = "ru-central1-a"
|
||||
network_id = "${yandex_vpc_network.default.id}"
|
||||
v4_cidr_blocks = ["192.168.101.0/24"]
|
||||
}
|
||||
30
src/homework/05-virtualization/5.5/terraform/node01.tf
Normal file
30
src/homework/05-virtualization/5.5/terraform/node01.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource "yandex_compute_instance" "node01" {
|
||||
name = "node01"
|
||||
zone = "ru-central1-a"
|
||||
hostname = "node01.netology.yc"
|
||||
allow_stopping_for_update = true
|
||||
|
||||
resources {
|
||||
cores = 4
|
||||
memory = 8
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "${var.centos-7-base}"
|
||||
name = "root-node01"
|
||||
type = "network-nvme"
|
||||
size = "10"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = "${yandex_vpc_subnet.default.id}"
|
||||
nat = true
|
||||
ip_address = "192.168.101.11"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "centos:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
30
src/homework/05-virtualization/5.5/terraform/node02.tf
Normal file
30
src/homework/05-virtualization/5.5/terraform/node02.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource "yandex_compute_instance" "node02" {
|
||||
name = "node02"
|
||||
zone = "ru-central1-a"
|
||||
hostname = "node02.netology.yc"
|
||||
allow_stopping_for_update = true
|
||||
|
||||
resources {
|
||||
cores = 4
|
||||
memory = 8
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "${var.centos-7-base}"
|
||||
name = "root-node02"
|
||||
type = "network-nvme"
|
||||
size = "10"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = "${yandex_vpc_subnet.default.id}"
|
||||
nat = true
|
||||
ip_address = "192.168.101.12"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "centos:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
30
src/homework/05-virtualization/5.5/terraform/node03.tf
Normal file
30
src/homework/05-virtualization/5.5/terraform/node03.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource "yandex_compute_instance" "node03" {
|
||||
name = "node03"
|
||||
zone = "ru-central1-a"
|
||||
hostname = "node03.netology.yc"
|
||||
allow_stopping_for_update = true
|
||||
|
||||
resources {
|
||||
cores = 4
|
||||
memory = 8
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "${var.centos-7-base}"
|
||||
name = "root-node03"
|
||||
type = "network-nvme"
|
||||
size = "10"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = "${yandex_vpc_subnet.default.id}"
|
||||
nat = true
|
||||
ip_address = "192.168.101.13"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "centos:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
30
src/homework/05-virtualization/5.5/terraform/node04.tf
Normal file
30
src/homework/05-virtualization/5.5/terraform/node04.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource "yandex_compute_instance" "node04" {
|
||||
name = "node04"
|
||||
zone = "ru-central1-a"
|
||||
hostname = "node04.netology.yc"
|
||||
allow_stopping_for_update = true
|
||||
|
||||
resources {
|
||||
cores = 4
|
||||
memory = 8
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "${var.centos-7-base}"
|
||||
name = "root-node04"
|
||||
type = "network-nvme"
|
||||
size = "40"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = "${yandex_vpc_subnet.default.id}"
|
||||
nat = true
|
||||
ip_address = "192.168.101.14"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "centos:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
30
src/homework/05-virtualization/5.5/terraform/node05.tf
Normal file
30
src/homework/05-virtualization/5.5/terraform/node05.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource "yandex_compute_instance" "node05" {
|
||||
name = "node05"
|
||||
zone = "ru-central1-a"
|
||||
hostname = "node05.netology.yc"
|
||||
allow_stopping_for_update = true
|
||||
|
||||
resources {
|
||||
cores = 4
|
||||
memory = 8
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "${var.centos-7-base}"
|
||||
name = "root-node05"
|
||||
type = "network-nvme"
|
||||
size = "40"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = "${yandex_vpc_subnet.default.id}"
|
||||
nat = true
|
||||
ip_address = "192.168.101.15"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "centos:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
30
src/homework/05-virtualization/5.5/terraform/node06.tf
Normal file
30
src/homework/05-virtualization/5.5/terraform/node06.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource "yandex_compute_instance" "node06" {
|
||||
name = "node06"
|
||||
zone = "ru-central1-a"
|
||||
hostname = "node06.netology.yc"
|
||||
allow_stopping_for_update = true
|
||||
|
||||
resources {
|
||||
cores = 4
|
||||
memory = 8
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "${var.centos-7-base}"
|
||||
name = "root-node06"
|
||||
type = "network-nvme"
|
||||
size = "40"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = "${yandex_vpc_subnet.default.id}"
|
||||
nat = true
|
||||
ip_address = "192.168.101.16"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "centos:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
47
src/homework/05-virtualization/5.5/terraform/output.tf
Normal file
47
src/homework/05-virtualization/5.5/terraform/output.tf
Normal file
@@ -0,0 +1,47 @@
|
||||
output "internal_ip_address_node01" {
|
||||
value = "${yandex_compute_instance.node01.network_interface.0.ip_address}"
|
||||
}
|
||||
|
||||
output "external_ip_address_node01" {
|
||||
value = "${yandex_compute_instance.node01.network_interface.0.nat_ip_address}"
|
||||
}
|
||||
|
||||
output "internal_ip_address_node02" {
|
||||
value = "${yandex_compute_instance.node02.network_interface.0.ip_address}"
|
||||
}
|
||||
|
||||
output "external_ip_address_node02" {
|
||||
value = "${yandex_compute_instance.node02.network_interface.0.nat_ip_address}"
|
||||
}
|
||||
|
||||
output "internal_ip_address_node03" {
|
||||
value = "${yandex_compute_instance.node03.network_interface.0.ip_address}"
|
||||
}
|
||||
|
||||
output "external_ip_address_node03" {
|
||||
value = "${yandex_compute_instance.node03.network_interface.0.nat_ip_address}"
|
||||
}
|
||||
|
||||
output "internal_ip_address_node04" {
|
||||
value = "${yandex_compute_instance.node04.network_interface.0.ip_address}"
|
||||
}
|
||||
|
||||
output "external_ip_address_node04" {
|
||||
value = "${yandex_compute_instance.node04.network_interface.0.nat_ip_address}"
|
||||
}
|
||||
|
||||
output "internal_ip_address_node05" {
|
||||
value = "${yandex_compute_instance.node05.network_interface.0.ip_address}"
|
||||
}
|
||||
|
||||
output "external_ip_address_node05" {
|
||||
value = "${yandex_compute_instance.node05.network_interface.0.nat_ip_address}"
|
||||
}
|
||||
|
||||
output "internal_ip_address_node06" {
|
||||
value = "${yandex_compute_instance.node06.network_interface.0.ip_address}"
|
||||
}
|
||||
|
||||
output "external_ip_address_node06" {
|
||||
value = "${yandex_compute_instance.node06.network_interface.0.nat_ip_address}"
|
||||
}
|
||||
14
src/homework/05-virtualization/5.5/terraform/provider.tf
Normal file
14
src/homework/05-virtualization/5.5/terraform/provider.tf
Normal file
@@ -0,0 +1,14 @@
|
||||
# Provider
|
||||
terraform {
|
||||
required_providers {
|
||||
yandex = {
|
||||
source = "yandex-cloud/yandex"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "yandex" {
|
||||
token = var.yandex_cloud_token
|
||||
cloud_id = var.yandex_cloud_id
|
||||
folder_id = var.yandex_folder_id
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
# Заменить на ID своего облака
|
||||
# https://console.cloud.yandex.ru/cloud?section=overview
|
||||
variable "yandex_cloud_id" {
|
||||
default = "b1gu1gt5nqi6lqgu3t7s"
|
||||
}
|
||||
|
||||
# Заменить на Folder своего облака
|
||||
# https://console.cloud.yandex.ru/cloud?section=overview
|
||||
variable "yandex_folder_id" {
|
||||
default = "b1gaec42k169jqpo02f7"
|
||||
}
|
||||
|
||||
# OAuth токен, используемый утилитой yc. Применялся на этапе с packer.
|
||||
variable "yandex_cloud_token" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
# Заменить на ID своего образа
|
||||
# ID можно узнать с помощью команды yc compute image list
|
||||
variable "centos-7-base" {
|
||||
default = "fd8ft6norj68lo29qlpi"
|
||||
}
|
||||
Reference in New Issue
Block a user