summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormokha <mokha@cisco.com>2018-12-03 17:06:26 -0700
committermokha <mokha@cisco.com>2018-12-03 17:06:26 -0700
commit8240aa4ab1c6b975b5b06c6ca43bd18bacf890c0 (patch)
treefbdd5b2449aeb99c3a8323d40eeb85cdb95b7fad
parent97a7d1fa6db20233db1adf29d016db5990d762e0 (diff)
create eks cluster.
-rw-r--r--.gitignore2
-rw-r--r--eks/main.tf346
-rw-r--r--eks/variables.tf9
3 files changed, 356 insertions, 1 deletions
diff --git a/.gitignore b/.gitignore
index 79bf838..3d5026c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,2 @@
.terraform*
-terraform.tfvars
+*.tfvars
diff --git a/eks/main.tf b/eks/main.tf
new file mode 100644
index 0000000..c8c404d
--- /dev/null
+++ b/eks/main.tf
@@ -0,0 +1,346 @@
+provider "aws" {
+ access_key = "${var.access_key}"
+ secret_key = "${var.secret_key}"
+ region = "${var.region}"
+}
+
+data "aws_availability_zones" "available" {}
+
+resource "aws_vpc" "voltron" {
+ cidr_block = "10.0.0.0/16"
+
+ tags = "${
+ map(
+ "Name", "terraform-eks-voltron-node",
+ "kubernetes.io/cluster/${var.cluster-name}", "shared",
+ )
+ }"
+}
+
+resource "aws_subnet" "voltron" {
+ count = 2
+
+ availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
+ cidr_block = "10.0.${count.index}.0/24"
+ vpc_id = "${aws_vpc.voltron.id}"
+
+ tags = "${
+ map(
+ "Name", "terraform-eks-voltron-node",
+ "kubernetes.io/cluster/${var.cluster-name}", "shared",
+ )
+ }"
+}
+
+resource "aws_internet_gateway" "voltron" {
+ vpc_id = "${aws_vpc.voltron.id}"
+
+ tags {
+ Name = "terraform-eks-voltron"
+ }
+}
+
+resource "aws_route_table" "voltron" {
+ vpc_id = "${aws_vpc.voltron.id}"
+
+ route {
+ cidr_block = "0.0.0.0/0"
+ gateway_id = "${aws_internet_gateway.voltron.id}"
+ }
+}
+
+resource "aws_route_table_association" "voltron" {
+ count = 2
+
+ subnet_id = "${aws_subnet.voltron.*.id[count.index]}"
+ route_table_id = "${aws_route_table.voltron.id}"
+}
+
+resource "aws_iam_role" "voltron-cluster" {
+ name = "terraform-eks-voltron-cluster"
+
+ assume_role_policy = <<POLICY
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "eks.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
+POLICY
+}
+
+resource "aws_iam_role_policy_attachment" "voltron-cluster-AmazonEKSClusterPolicy" {
+ policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
+ role = "${aws_iam_role.voltron-cluster.name}"
+}
+
+resource "aws_iam_role_policy_attachment" "voltron-cluster-AmazonEKSServicePolicy" {
+ policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
+ role = "${aws_iam_role.voltron-cluster.name}"
+}
+
+resource "aws_security_group" "voltron-cluster" {
+ name = "terraform-eks-voltron-cluster"
+ description = "Cluster communication with worker nodes"
+ vpc_id = "${aws_vpc.voltron.id}"
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags {
+ Name = "terraform-eks-voltron"
+ }
+}
+
+# OPTIONAL: Allow inbound traffic from your local workstation external IP
+# to the Kubernetes. You will need to replace A.B.C.D below with
+# your real IP. Services like icanhazip.com can help you find this.
+# resource "aws_security_group_rule" "voltron-cluster-ingress-workstation-https" {
+# cidr_blocks = ["A.B.C.D/32"]
+# description = "Allow workstation to communicate with the cluster API Server"
+# from_port = 443
+# protocol = "tcp"
+# security_group_id = "${aws_security_group.voltron-cluster.id}"
+# to_port = 443
+# type = "ingress"
+#}
+
+
+resource "aws_eks_cluster" "voltron" {
+ name = "${var.cluster-name}"
+ role_arn = "${aws_iam_role.voltron-cluster.arn}"
+
+ vpc_config {
+ security_group_ids = ["${aws_security_group.voltron-cluster.id}"]
+ subnet_ids = ["${aws_subnet.voltron.*.id}"]
+ }
+
+ depends_on = [
+ "aws_iam_role_policy_attachment.voltron-cluster-AmazonEKSClusterPolicy",
+ "aws_iam_role_policy_attachment.voltron-cluster-AmazonEKSServicePolicy",
+ ]
+}
+
+
+locals {
+ kubeconfig = <<KUBECONFIG
+
+
+apiVersion: v1
+clusters:
+- cluster:
+ server: ${aws_eks_cluster.voltron.endpoint}
+ certificate-authority-data: ${aws_eks_cluster.voltron.certificate_authority.0.data}
+ name: kubernetes
+contexts:
+- context:
+ cluster: kubernetes
+ user: aws
+ name: aws
+current-context: aws
+kind: Config
+preferences: {}
+users:
+- name: aws
+ user:
+ exec:
+ apiVersion: client.authentication.k8s.io/v1alpha1
+ command: aws-iam-authenticator
+ args:
+ - "token"
+ - "-i"
+ - "${var.cluster-name}"
+KUBECONFIG
+}
+
+output "kubeconfig" {
+ value = "${local.kubeconfig}"
+}
+
+resource "aws_iam_role" "voltron-node" {
+ name = "terraform-eks-voltron-node"
+
+ assume_role_policy = <<POLICY
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
+POLICY
+}
+
+resource "aws_iam_role_policy_attachment" "voltron-node-AmazonEKSWorkerNodePolicy" {
+ policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
+ role = "${aws_iam_role.voltron-node.name}"
+}
+
+resource "aws_iam_role_policy_attachment" "voltron-node-AmazonEKS_CNI_Policy" {
+ policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
+ role = "${aws_iam_role.voltron-node.name}"
+}
+
+resource "aws_iam_role_policy_attachment" "voltron-node-AmazonEC2ContainerRegistryReadOnly" {
+ policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
+ role = "${aws_iam_role.voltron-node.name}"
+}
+
+resource "aws_iam_instance_profile" "voltron-node" {
+ name = "terraform-eks-voltron"
+ role = "${aws_iam_role.voltron-node.name}"
+}
+
+
+resource "aws_security_group" "voltron-node" {
+ name = "terraform-eks-voltron-node"
+ description = "Security group for all nodes in the cluster"
+ vpc_id = "${aws_vpc.voltron.id}"
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = "${
+ map(
+ "Name", "terraform-eks-voltron-node",
+ "kubernetes.io/cluster/${var.cluster-name}", "owned",
+ )
+ }"
+}
+
+resource "aws_security_group_rule" "voltron-node-ingress-self" {
+ description = "Allow node to communicate with each other"
+ from_port = 0
+ protocol = "-1"
+ security_group_id = "${aws_security_group.voltron-node.id}"
+ source_security_group_id = "${aws_security_group.voltron-node.id}"
+ to_port = 65535
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "voltron-node-ingress-cluster" {
+ description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
+ from_port = 1025
+ protocol = "tcp"
+ security_group_id = "${aws_security_group.voltron-node.id}"
+ source_security_group_id = "${aws_security_group.voltron-cluster.id}"
+ to_port = 65535
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "voltron-cluster-ingress-node-https" {
+ description = "Allow pods to communicate with the cluster API Server"
+ from_port = 443
+ protocol = "tcp"
+ security_group_id = "${aws_security_group.voltron-cluster.id}"
+ source_security_group_id = "${aws_security_group.voltron-node.id}"
+ to_port = 443
+ type = "ingress"
+}
+
+data "aws_ami" "eks-worker" {
+ filter {
+ name = "name"
+ values = ["amazon-eks-node-v*"]
+ }
+
+ most_recent = true
+ owners = ["602401143452"] # Amazon EKS AMI Account ID
+}
+
+
+# This data source is included for ease of sample architecture deployment
+# and can be swapped out as necessary.
+data "aws_region" "current" {}
+
+# EKS currently documents this required userdata for EKS worker nodes to
+# properly configure Kubernetes applications on the EC2 instance.
+# We utilize a Terraform local here to simplify Base64 encoding this
+# information into the AutoScaling Launch Configuration.
+# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
+locals {
+ voltron-node-userdata = <<USERDATA
+#!/bin/bash
+set -o xtrace
+/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.voltron.endpoint}' --b64-cluster-ca '${aws_eks_cluster.voltron.certificate_authority.0.data}' '${var.cluster-name}'
+USERDATA
+}
+
+resource "aws_launch_configuration" "voltron" {
+ associate_public_ip_address = true
+ iam_instance_profile = "${aws_iam_instance_profile.voltron-node.name}"
+ image_id = "${data.aws_ami.eks-worker.id}"
+ instance_type = "m4.large"
+ name_prefix = "terraform-eks-voltron"
+ security_groups = ["${aws_security_group.voltron-node.id}"]
+ user_data_base64 = "${base64encode(local.voltron-node-userdata)}"
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+resource "aws_autoscaling_group" "voltron" {
+ desired_capacity = 2
+ launch_configuration = "${aws_launch_configuration.voltron.id}"
+ max_size = 2
+ min_size = 1
+ name = "terraform-eks-voltron"
+ vpc_zone_identifier = ["${aws_subnet.voltron.*.id}"]
+
+ tag {
+ key = "Name"
+ value = "terraform-eks-voltron"
+ propagate_at_launch = true
+ }
+
+ tag {
+ key = "kubernetes.io/cluster/${var.cluster-name}"
+ value = "owned"
+ propagate_at_launch = true
+ }
+}
+
+
+locals {
+ config_map_aws_auth = <<CONFIGMAPAWSAUTH
+
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aws-auth
+ namespace: kube-system
+data:
+ mapRoles: |
+ - rolearn: ${aws_iam_role.voltron-node.arn}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+CONFIGMAPAWSAUTH
+}
+
+output "config_map_aws_auth" {
+ value = "${local.config_map_aws_auth}"
+}
+
diff --git a/eks/variables.tf b/eks/variables.tf
new file mode 100644
index 0000000..4bd66a8
--- /dev/null
+++ b/eks/variables.tf
@@ -0,0 +1,9 @@
+variable "access_key" {}
+variable "secret_key" {}
+variable "region" {
+ default = "us-east-1"
+}
+variable "cluster-name" {
+ default = "terraform-eks-demo"
+ type = "string"
+}