mirror of
https://github.com/Dannecron/netology-devops.git
synced 2025-12-25 23:32:37 +03:00
homework 12.4: complete task 1: create kubespray inventory example
also create tf config for task 2
This commit is contained in:
6
src/homework/12-kubernetes/12.4/Makefile
Normal file
6
src/homework/12-kubernetes/12.4/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
tf-init:
|
||||
cd ./terraform \
|
||||
&& env $(cat .env) terraform init
|
||||
tf-plan:
|
||||
cd ./terraform \
|
||||
&& env $(cat .env) terraform plan
|
||||
@@ -0,0 +1,28 @@
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
|
||||
[all]
|
||||
control ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1
|
||||
node1 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2
|
||||
node2 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3
|
||||
node3 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4
|
||||
node4 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5
|
||||
|
||||
[kube_control_plane]
|
||||
control
|
||||
|
||||
[etcd]
|
||||
control
|
||||
|
||||
[kube_node]
|
||||
node1
|
||||
node2
|
||||
node3
|
||||
node4
|
||||
|
||||
[calico_rr]
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
||||
calico_rr
|
||||
29
src/homework/12-kubernetes/12.4/readme.md
Normal file
29
src/homework/12-kubernetes/12.4/readme.md
Normal file
@@ -0,0 +1,29 @@
|
||||
Выполнение [домашнего задания](https://github.com/netology-code/devkub-homeworks/blob/main/12-kubernetes-04-install-part-2.md)
|
||||
по теме "12.4. Развертывание кластера на собственных серверах, лекция 2"
|
||||
|
||||
## Q/A
|
||||
|
||||
> Новые проекты пошли стабильным потоком. Каждый проект требует себе несколько кластеров: под тесты и продуктив.
|
||||
> Делать все руками — не вариант, поэтому стоит автоматизировать подготовку новых кластеров.
|
||||
|
||||
### Задание 1
|
||||
|
||||
> Подготовить инвентарь kubespray
|
||||
>
|
||||
> Новые тестовые кластеры требуют типичных простых настроек. Нужно подготовить инвентарь и проверить его работу. Требования к инвентарю:
|
||||
> * подготовка работы кластера из 5 нод: 1 мастер и 4 рабочие ноды;
|
||||
> * в качестве CRI — containerd;
|
||||
> * запуск etcd производить на мастере.
|
||||
|
||||
Пример inventory-файла для запуска `kubespray`: [`inventory.example.ini`](./kubespray/inventory.example.ini).
|
||||
В данной конфигурации необходимо только изменить ip-адреса виртуальных машин.
|
||||
|
||||
### Задание 2
|
||||
|
||||
> Подготовить и проверить инвентарь для кластера в yandex.cloud
|
||||
>
|
||||
> Часть новых проектов хотят запускать на мощностях yandex.cloud. Требования похожи:
|
||||
> * разворачивать 5 нод: 1 мастер и 4 рабочие ноды;
|
||||
> * работать должны на минимально допустимых виртуальных машинах
|
||||
|
||||
// todo
|
||||
4
src/homework/12-kubernetes/12.4/terraform/.env.example
Normal file
4
src/homework/12-kubernetes/12.4/terraform/.env.example
Normal file
@@ -0,0 +1,4 @@
|
||||
YC_TOKEN=OAuthToken
|
||||
YC_CLOUD_ID=cloudId
|
||||
YC_FOLDER_ID=folderId
|
||||
YC_ZONE=ru-central1-a
|
||||
37
src/homework/12-kubernetes/12.4/terraform/.gitignore
vendored
Normal file
37
src/homework/12-kubernetes/12.4/terraform/.gitignore
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
.env
|
||||
|
||||
# Local .terraform directories
|
||||
**/.terraform/*
|
||||
|
||||
# .tfstate files
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# Crash log files
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
||||
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
|
||||
# password, private keys, and other secrets. These should not be part of version
|
||||
# control as they are data points which are potentially sensitive and subject
|
||||
# to change depending on the environment.
|
||||
#
|
||||
*.tfvars
|
||||
|
||||
# Ignore override files as they are usually used to override resources locally and so
|
||||
# are not checked in
|
||||
override.tf
|
||||
override.tf.json
|
||||
*_override.tf
|
||||
*_override.tf.json
|
||||
|
||||
# Include override files you do wish to add to version control using negated pattern
|
||||
#
|
||||
# !example_override.tf
|
||||
|
||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
||||
# example: *tfplan*
|
||||
|
||||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
||||
9
src/homework/12-kubernetes/12.4/terraform/.terraform.lock.hcl
generated
Normal file
9
src/homework/12-kubernetes/12.4/terraform/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,9 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/yandex-cloud/yandex" {
|
||||
version = "0.82.0"
|
||||
hashes = [
|
||||
"h1:TV2paiynRTtVYsUbOgsHR+g6GVbCnXTgvrb4JDmNVzs=",
|
||||
]
|
||||
}
|
||||
90
src/homework/12-kubernetes/12.4/terraform/main.tf
Normal file
90
src/homework/12-kubernetes/12.4/terraform/main.tf
Normal file
@@ -0,0 +1,90 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
yandex = {
|
||||
source = "yandex-cloud/yandex"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
|
||||
provider "yandex" {
|
||||
token = "auth_token_here"
|
||||
cloud_id = "cloud_id_here"
|
||||
folder_id = "folder_id_here"
|
||||
zone = "ru-central1-a"
|
||||
}
|
||||
|
||||
resource "yandex_vpc_network" "network-1" {
|
||||
name = "network1"
|
||||
}
|
||||
|
||||
resource "yandex_vpc_subnet" "subnet-1" {
|
||||
name = "subnet1"
|
||||
zone = "ru-central1-a"
|
||||
network_id = yandex_vpc_network.network-1.id
|
||||
v4_cidr_blocks = ["192.168.10.0/24"]
|
||||
}
|
||||
|
||||
resource "yandex_compute_instance" "k8s-control" {
|
||||
name = "test-vm-1"
|
||||
|
||||
resources {
|
||||
cores = 2
|
||||
memory = 2
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "fd81hgrcv6lsnkremf32" # ubuntu-20-04-lts-v20210908
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = yandex_vpc_subnet.subnet-1.id
|
||||
nat = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "ubuntu:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "yandex_compute_instance" "k8s-node" {
|
||||
for_each = toset(["node01", "node02", "node03", "node04"])
|
||||
|
||||
name = each.key
|
||||
|
||||
resources {
|
||||
cores = 1
|
||||
memory = 1
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = "fd81hgrcv6lsnkremf32" # ubuntu-20-04-lts-v20210908
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnet_id = yandex_vpc_subnet.subnet-1.id
|
||||
nat = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "ubuntu:${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
}
|
||||
|
||||
output "control_ips" {
|
||||
value = {
|
||||
external = yandex_compute_instance.k8s-control.network_interface.0.ip_address
|
||||
internal = yandex_compute_instance.k8s-control.network_interface.0.nat_ip_address
|
||||
}
|
||||
}
|
||||
|
||||
output "node_ips" {
|
||||
value = {
|
||||
external = values(yandex_compute_instance.k8s-node)[*].network_interface.0.ip_address
|
||||
internal = values(yandex_compute_instance.k8s-node)[*].network_interface.0.nat_ip_address
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user