forked from CircleCI-Archived/enterprise-setup
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathnomad-cluster.tf
120 lines (100 loc) · 2.92 KB
/
nomad-cluster.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
variable "enable_nomad" {
description = "Nomad builder fleet is used for CCIE v2. It is enabled by default."
default = "1"
}
variable "nomad_client_instance_type" {
description = "instance type for the nomad clients. We recommend a XYZ instance"
default = "m4.xlarge"
}
variable "max_clients_count" {
description = "max number of nomad clients"
default = "2"
}
resource "aws_security_group" "nomad_sg" {
count = "${var.enable_nomad}"
name = "${var.prefix}_nomad_sg"
description = "SG for CircleCI nomad server/client"
vpc_id = "${var.aws_vpc_id}"
ingress {
from_port = 4646
to_port = 4648
protocol = "tcp"
cidr_blocks = ["${data.aws_subnet.subnet.cidr_block}"]
}
# For SSHing into 2.0 build
ingress {
from_port = 64535
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "ssh_sg" {
count = "${var.enable_nomad}"
name = "${var.prefix}_ssh_sg"
description = "SG for SSH access"
vpc_id = "${var.aws_vpc_id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# data "template_file" "nomad_client_config" {
# template = "${file("templates/nomad-client.hcl.tpl")}"
# vars {
# nomad_server = "${aws_instance.services.private_ip}"
# }
# }
data "template_file" "nomad_user_data"{
template = "${file("templates/nomad_user_data.tpl")}"
vars {
nomad_server = "${aws_instance.services.private_ip}"
http_proxy = "${var.http_proxy}"
https_proxy = "${var.https_proxy}"
no_proxy = "${var.no_proxy}"
}
}
resource "aws_launch_configuration" "clients_lc" {
count = "${var.enable_nomad}"
instance_type = "${var.nomad_client_instance_type}"
image_id = "${lookup(var.ubuntu_ami, var.aws_region)}"
key_name = "${var.aws_ssh_key_name}"
root_block_device = {
volume_type = "gp2"
volume_size = "200"
}
security_groups = ["${aws_security_group.nomad_sg.id}", "${aws_security_group.ssh_sg.id}"]
user_data = "${data.template_file.nomad_user_data.rendered}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "clients_asg" {
count = "${var.enable_nomad}"
name = "${var.prefix}_nomad_clients_asg"
vpc_zone_identifier = ["${var.aws_subnet_id}"]
launch_configuration = "${aws_launch_configuration.clients_lc.name}"
max_size = "${var.max_clients_count}"
min_size = 0
desired_capacity = 1
force_delete = true
tag {
key = "Name"
value = "${var.prefix}-nomad-client"
propagate_at_launch = "true"
}
}