homework 7.3: add task 2

This commit is contained in:
2022-06-23 15:16:53 +07:00
parent 55f07be3d0
commit bb13a1061e
5 changed files with 483 additions and 9 deletions

View File

@@ -10,3 +10,13 @@ tf-init:
tf-plan:
cd ./terraform \
&& env $(cat ../.env) terraform plan
tf-ws-prod:
cd ./terraform \
&& terraform workspace select prod
tf-ws-stage:
cd ./terraform \
&& terraform workspace select stage
tf-ws-current:
cd ./terraform \
&& terraform workspace list

View File

@@ -96,4 +96,401 @@ cd ./terraform \
> * Вывод команды `terraform workspace list`.
> * Вывод команды `terraform plan` для воркспейса `prod`.
// todo
Для создания workspace необходимо выполнить две команды:
```shell
cd ./terraform
terraform workspace new stage
Created and switched to workspace "stage"!
<...>
terraform workspace new prod
Created and switched to workspace "prod"!
<...>
terraform workspace list
default
* prod
stage
```
Далее, необходимо определить переменные для количества создаваемых виртуальных машин для каждого окружения.
Для этого нужно добавить следующие значения в файл [variables.tf](./terraform/variables.tf):
```terraform
locals {
vm_count = {
stage = 1
prod = 2
}
}
```
А в [main.tf](./terraform/main.tf) в блоке `vm-1` добавить новый ключ `count = local.vm_count[terraform.workspace]`.
В данном случае, при переключении на `workspace=prod` команда `terraform plan` будет говорить о 4-х ресурсах на изменение.
Но при переключении на `workspace=stage` будет запланировано изменение 3-х ресурсов.
Далее добавим новую группу виртуальных машин, которые будут создаваться на основе модуля [`for-each`](https://www.terraform.io/language/meta-arguments/for_each), а не `count`.
Для начала добавим конфигурацию в [variables.tf](./terraform/variables.tf):
```terraform
locals {
vm_2_config = {
"balancer" = {
cores = {
stage = 1
prod = 2
}
memory = {
stage = 1
prod = 2
}
}
"application" = {
cores = {
stage = 1
prod = 2
}
memory = {
stage = 1
prod = 2
}
}
}
}
```
В данном случае будет производиться инициализация двух машин, для каждой из которых будут определены ресурсы. При этом ресурсы будут зависеть от текущего `workspace`.
Инициализация ресурсов описана в [for_each.tf](./terraform/for_each.tf).
При этом вывод команды `terraform plan` для `workspace=prod` будет выглядеть следующим образом:
```shell
make tf-plan
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# yandex_compute_instance.vm-1[0] will be created
+ resource "yandex_compute_instance" "vm-1" {
+ created_at = (known after apply)
+ folder_id = (known after apply)
+ fqdn = (known after apply)
+ hostname = (known after apply)
+ id = (known after apply)
+ metadata = {
+ "ssh-keys" = <<-EOT
ubuntu:ssh-rsa AAA
EOT
}
+ name = "test-vm-1"
+ network_acceleration_type = "standard"
+ platform_id = "standard-v1"
+ service_account_id = (known after apply)
+ status = (known after apply)
+ zone = (known after apply)
+ boot_disk {
+ auto_delete = true
+ device_name = (known after apply)
+ disk_id = (known after apply)
+ mode = (known after apply)
+ initialize_params {
+ block_size = (known after apply)
+ description = (known after apply)
+ image_id = "fd81hgrcv6lsnkremf32"
+ name = (known after apply)
+ size = (known after apply)
+ snapshot_id = (known after apply)
+ type = "network-hdd"
}
}
+ network_interface {
+ index = (known after apply)
+ ip_address = (known after apply)
+ ipv4 = true
+ ipv6 = (known after apply)
+ ipv6_address = (known after apply)
+ mac_address = (known after apply)
+ nat = true
+ nat_ip_address = (known after apply)
+ nat_ip_version = (known after apply)
+ security_group_ids = (known after apply)
+ subnet_id = (known after apply)
}
+ placement_policy {
+ host_affinity_rules = (known after apply)
+ placement_group_id = (known after apply)
}
+ resources {
+ core_fraction = 100
+ cores = 2
+ memory = 2
}
+ scheduling_policy {
+ preemptible = (known after apply)
}
}
# yandex_compute_instance.vm-1[1] will be created
+ resource "yandex_compute_instance" "vm-1" {
+ created_at = (known after apply)
+ folder_id = (known after apply)
+ fqdn = (known after apply)
+ hostname = (known after apply)
+ id = (known after apply)
+ metadata = {
+ "ssh-keys" = <<-EOT
ubuntu:ssh-rsa AAA
EOT
}
+ name = "test-vm-1"
+ network_acceleration_type = "standard"
+ platform_id = "standard-v1"
+ service_account_id = (known after apply)
+ status = (known after apply)
+ zone = (known after apply)
+ boot_disk {
+ auto_delete = true
+ device_name = (known after apply)
+ disk_id = (known after apply)
+ mode = (known after apply)
+ initialize_params {
+ block_size = (known after apply)
+ description = (known after apply)
+ image_id = "fd81hgrcv6lsnkremf32"
+ name = (known after apply)
+ size = (known after apply)
+ snapshot_id = (known after apply)
+ type = "network-hdd"
}
}
+ network_interface {
+ index = (known after apply)
+ ip_address = (known after apply)
+ ipv4 = true
+ ipv6 = (known after apply)
+ ipv6_address = (known after apply)
+ mac_address = (known after apply)
+ nat = true
+ nat_ip_address = (known after apply)
+ nat_ip_version = (known after apply)
+ security_group_ids = (known after apply)
+ subnet_id = (known after apply)
}
+ placement_policy {
+ host_affinity_rules = (known after apply)
+ placement_group_id = (known after apply)
}
+ resources {
+ core_fraction = 100
+ cores = 2
+ memory = 2
}
+ scheduling_policy {
+ preemptible = (known after apply)
}
}
# yandex_compute_instance.vm-2["application"] will be created
+ resource "yandex_compute_instance" "vm-2" {
+ created_at = (known after apply)
+ folder_id = (known after apply)
+ fqdn = (known after apply)
+ hostname = (known after apply)
+ id = (known after apply)
+ metadata = {
+ "ssh-keys" = <<-EOT
ubuntu:ssh-rsa AAA
EOT
}
+ name = "test-vm-2"
+ network_acceleration_type = "standard"
+ platform_id = "standard-v1"
+ service_account_id = (known after apply)
+ status = (known after apply)
+ zone = (known after apply)
+ boot_disk {
+ auto_delete = true
+ device_name = (known after apply)
+ disk_id = (known after apply)
+ mode = (known after apply)
+ initialize_params {
+ block_size = (known after apply)
+ description = (known after apply)
+ image_id = "fd81hgrcv6lsnkremf32"
+ name = (known after apply)
+ size = (known after apply)
+ snapshot_id = (known after apply)
+ type = "network-hdd"
}
}
+ network_interface {
+ index = (known after apply)
+ ip_address = (known after apply)
+ ipv4 = true
+ ipv6 = (known after apply)
+ ipv6_address = (known after apply)
+ mac_address = (known after apply)
+ nat = true
+ nat_ip_address = (known after apply)
+ nat_ip_version = (known after apply)
+ security_group_ids = (known after apply)
+ subnet_id = (known after apply)
}
+ placement_policy {
+ host_affinity_rules = (known after apply)
+ placement_group_id = (known after apply)
}
+ resources {
+ core_fraction = 100
+ cores = 2
+ memory = 2
}
+ scheduling_policy {
+ preemptible = (known after apply)
}
}
# yandex_compute_instance.vm-2["balancer"] will be created
+ resource "yandex_compute_instance" "vm-2" {
+ created_at = (known after apply)
+ folder_id = (known after apply)
+ fqdn = (known after apply)
+ hostname = (known after apply)
+ id = (known after apply)
+ metadata = {
+ "ssh-keys" = <<-EOT
ubuntu:ssh-rsa AAA
EOT
}
+ name = "test-vm-2"
+ network_acceleration_type = "standard"
+ platform_id = "standard-v1"
+ service_account_id = (known after apply)
+ status = (known after apply)
+ zone = (known after apply)
+ boot_disk {
+ auto_delete = true
+ device_name = (known after apply)
+ disk_id = (known after apply)
+ mode = (known after apply)
+ initialize_params {
+ block_size = (known after apply)
+ description = (known after apply)
+ image_id = "fd81hgrcv6lsnkremf32"
+ name = (known after apply)
+ size = (known after apply)
+ snapshot_id = (known after apply)
+ type = "network-hdd"
}
}
+ network_interface {
+ index = (known after apply)
+ ip_address = (known after apply)
+ ipv4 = true
+ ipv6 = (known after apply)
+ ipv6_address = (known after apply)
+ mac_address = (known after apply)
+ nat = true
+ nat_ip_address = (known after apply)
+ nat_ip_version = (known after apply)
+ security_group_ids = (known after apply)
+ subnet_id = (known after apply)
}
+ placement_policy {
+ host_affinity_rules = (known after apply)
+ placement_group_id = (known after apply)
}
+ resources {
+ core_fraction = 100
+ cores = 2
+ memory = 2
}
+ scheduling_policy {
+ preemptible = (known after apply)
}
}
# yandex_vpc_network.network-1 will be created
+ resource "yandex_vpc_network" "network-1" {
+ created_at = (known after apply)
+ default_security_group_id = (known after apply)
+ folder_id = (known after apply)
+ id = (known after apply)
+ labels = (known after apply)
+ name = "network1"
+ subnet_ids = (known after apply)
}
# yandex_vpc_subnet.subnet-1 will be created
+ resource "yandex_vpc_subnet" "subnet-1" {
+ created_at = (known after apply)
+ folder_id = (known after apply)
+ id = (known after apply)
+ labels = (known after apply)
+ name = "subnet1"
+ network_id = (known after apply)
+ v4_cidr_blocks = [
+ "192.168.10.0/24",
]
+ v6_cidr_blocks = (known after apply)
+ zone = "ru-central1-a"
}
# yandex_vpc_subnet.subnet-2 will be created
+ resource "yandex_vpc_subnet" "subnet-2" {
+ created_at = (known after apply)
+ folder_id = (known after apply)
+ id = (known after apply)
+ labels = (known after apply)
+ name = "subnet2"
+ network_id = (known after apply)
+ v4_cidr_blocks = [
+ "192.168.11.0/24",
]
+ v6_cidr_blocks = (known after apply)
+ zone = "ru-central1-a"
}
Plan: 7 to add, 0 to change, 0 to destroy.
Changes to Outputs:
+ external_ip_address_vm_1 = [
+ (known after apply),
+ (known after apply),
]
+ internal_ip_address_vm_1 = [
+ (known after apply),
+ (known after apply),
]
───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
```

View File

@@ -0,0 +1,32 @@
resource "yandex_vpc_subnet" "subnet-2" {
name = "subnet2"
zone = "ru-central1-a"
network_id = yandex_vpc_network.network-1.id
v4_cidr_blocks = ["192.168.11.0/24"]
}
resource "yandex_compute_instance" "vm-2" {
for_each = toset(["balancer", "application"])
name = "test-vm-2"
resources {
cores = local.vm_2_config[each.key].cores[terraform.workspace]
memory = local.vm_2_config[each.key].memory[terraform.workspace]
}
boot_disk {
initialize_params {
image_id = "fd81hgrcv6lsnkremf32" # ubuntu-20-04-lts-v20210908
}
}
network_interface {
subnet_id = yandex_vpc_subnet.subnet-1.id
nat = true
}
metadata = {
ssh-keys = "ubuntu:${file("~/.ssh/id_rsa.pub")}"
}
}

View File

@@ -9,16 +9,9 @@ resource "yandex_vpc_subnet" "subnet-1" {
v4_cidr_blocks = ["192.168.10.0/24"]
}
output "internal_ip_address_vm_1" {
value = yandex_compute_instance.vm-1.network_interface.0.ip_address
}
output "external_ip_address_vm_1" {
value = yandex_compute_instance.vm-1.network_interface.0.nat_ip_address
}
resource "yandex_compute_instance" "vm-1" {
name = "test-vm-1"
count = local.vm_count[terraform.workspace]
resources {
cores = 2
@@ -40,3 +33,15 @@ resource "yandex_compute_instance" "vm-1" {
ssh-keys = "ubuntu:${file("~/.ssh/id_rsa.pub")}"
}
}
output "internal_ip_address_vm_1" {
value = [
for vm in yandex_compute_instance.vm-1 : vm.network_interface.0.ip_address
]
}
output "external_ip_address_vm_1" {
value = [
for vm in yandex_compute_instance.vm-1 : vm.network_interface.0.nat_ip_address
]
}

View File

@@ -0,0 +1,30 @@
locals {
vm_count = {
stage = 1
prod = 2
}
vm_2_config = {
"balancer" = {
cores = {
stage = 1
prod = 2
}
memory = {
stage = 1
prod = 2
}
}
"application" = {
cores = {
stage = 1
prod = 2
}
memory = {
stage = 1
prod = 2
}
}
}
}