You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

483 lines
13 KiB

  1. #######################################################
  2. #### THIS IS NOT HOW YOU DEPLOY K3S IN PROD
  3. #### THIS DOES NOT USE CERTS FOR INTERNAL COMMUNICATION
  4. #### USE THE SECURE SCRIPT FOR ACTUAL DEPLOYMENT
  5. ####
  6. #### By Sagnik Bhattacharya, 2024
  7. ####
  8. #######################################################
  9. # instaling dependency
  10. terraform {
  11. required_version = ">= 0.14.0"
  12. required_providers {
  13. openstack = {
  14. source = "terraform-provider-openstack/openstack"
  15. version = ">= 2.0.0"
  16. }
  17. tls = {
  18. source = "hashicorp/tls"
  19. version = ">= 3.1.0"
  20. }
  21. kubernetes = {
  22. source = "hashicorp/kubernetes"
  23. version = "~> 2.0"
  24. }
  25. }
  26. }
  27. provider "openstack" {
  28. auth_url = var.auth_url
  29. region = var.region
  30. tenant_name = var.tenant_name
  31. user_name = var.user_name
  32. password = var.password
  33. domain_name = var.domain_name
  34. insecure = true # DANGER
  35. }
  36. variable "auth_url" {
  37. description = "OpenStack authentication URL"
  38. type = string
  39. }
  40. variable "region" {
  41. description = "OpenStack region"
  42. type = string
  43. }
  44. variable "tenant_name" {
  45. description = "OpenStack tenant name"
  46. type = string
  47. }
  48. variable "user_name" {
  49. description = "OpenStack username"
  50. type = string
  51. }
  52. variable "password" {
  53. description = "OpenStack password"
  54. type = string
  55. sensitive = true
  56. }
  57. variable "domain_name" {
  58. description = "OpenStack domain name"
  59. type = string
  60. }
  61. # Broken for some reason dont know why
  62. # variable "ssh_public_key" {
  63. # description = "Path to the SSH public key"
  64. # type = string
  65. # }
  66. variable "num_worker_nodes" {
  67. description = "Number of worker nodes to create"
  68. type = number
  69. }
  70. variable "master_flavor" {
  71. description = "Flavor for the master node"
  72. type = string
  73. }
  74. variable "worker_flavor" {
  75. description = "Flavor for the worker nodes"
  76. type = string
  77. }
  78. variable "os_image" {
  79. description = "OS image to use for instances"
  80. type = string
  81. }
  82. variable "volume_size" {
  83. description = "Size of the volumes to create for nodes"
  84. type = number
  85. }
  86. variable "dns_servers" {
  87. description = "List of DNS servers for the instances"
  88. type = list(string)
  89. }
  90. variable "floating_ip_pool" {
  91. description = "Name of the floating IP pool for the instances"
  92. type = string
  93. }
  94. variable "delay_seconds" {
  95. description = "The delay in seconds before creating the worker nodes"
  96. default = 120
  97. ## This wait time has been implimented as we were not able tp set a time till the cluster comes up
  98. ## we clould have also checked the file but in K3S as soon as the process in executed the file is created so we cant base it
  99. ## thus we habe added a timer of 120s
  100. ## depends on cluster load and netspeed
  101. }
  102. # Delay resource for master
  103. resource "null_resource" "delay_master" {
  104. provisioner "local-exec" {
  105. command = "sleep ${var.delay_seconds}"
  106. }
  107. triggers = {
  108. instance_id_master = openstack_compute_instance_v2.k3s_master.id
  109. }
  110. }
  111. # Delay resource for workers
  112. resource "null_resource" "delay_workers" {
  113. provisioner "local-exec" {
  114. command = "sleep ${var.delay_seconds}"
  115. }
  116. triggers = {
  117. instance_id_workers = join(",", openstack_compute_instance_v2.k3s_workers.*.id)
  118. }
  119. }
  120. # Define the network
  121. resource "openstack_networking_network_v2" "network" {
  122. name = "k3s-network"
  123. admin_state_up = "true"
  124. }
  125. # Define the subnet
  126. resource "openstack_networking_subnet_v2" "subnet" {
  127. name = "k3s-subnet"
  128. network_id = openstack_networking_network_v2.network.id
  129. cidr = "192.168.1.0/24"
  130. ip_version = 4
  131. dns_nameservers = var.dns_servers
  132. }
  133. # Define the router
  134. data "openstack_networking_network_v2" "floating_ip" {
  135. name = var.floating_ip_pool
  136. }
  137. resource "openstack_networking_router_v2" "router" {
  138. name = "k3s-router"
  139. admin_state_up = "true"
  140. external_network_id = data.openstack_networking_network_v2.floating_ip.id
  141. }
  142. # Connect the router to the subnet
  143. resource "openstack_networking_router_interface_v2" "router_interface" {
  144. router_id = openstack_networking_router_v2.router.id
  145. subnet_id = openstack_networking_subnet_v2.subnet.id
  146. }
  147. # Adding FIP to master ## DEPRICATED
  148. resource "openstack_networking_floatingip_v2" "fip" {
  149. pool = var.floating_ip_pool
  150. }
  151. resource "openstack_compute_floatingip_associate_v2" "fip_assoc" {
  152. floating_ip = openstack_networking_floatingip_v2.fip.address
  153. instance_id = openstack_compute_instance_v2.k3s_master.id
  154. }
  155. # Creating SSH keys
  156. resource "tls_private_key" "ssh" {
  157. algorithm = "ECDSA"
  158. ecdsa_curve = "P256"
  159. }
  160. # Saving key in local
  161. resource "local_file" "private_key" {
  162. content = tls_private_key.ssh.private_key_pem
  163. filename = "${path.module}/id_rsa"
  164. }
  165. # Define the keypair for SSH
  166. resource "openstack_compute_keypair_v2" "default" {
  167. name = "k3s-key"
  168. # public_key = file(var.ssh_public_key)
  169. public_key = tls_private_key.ssh.public_key_openssh
  170. }
  171. # Create a new security group
  172. resource "openstack_networking_secgroup_v2" "secgroup" {
  173. name = "k3s-secgroup"
  174. description = "Security group for k3s"
  175. }
  176. # # Allow SSH traffic
  177. # resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ssh" {
  178. # direction = "ingress"
  179. # ethertype = "IPv4"
  180. # protocol = "tcp"
  181. # port_range_min = 22
  182. # port_range_max = 22
  183. # remote_ip_prefix = "0.0.0.0/0"
  184. # security_group_id = openstack_networking_secgroup_v2.secgroup.id
  185. # }
  186. ########### DONT DO THIS ITS VERY BAD ########################
  187. # Allow all inbound traffic
  188. resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_all_inbound" {
  189. direction = "ingress"
  190. ethertype = "IPv4"
  191. remote_ip_prefix = "0.0.0.0/0"
  192. security_group_id = openstack_networking_secgroup_v2.secgroup.id
  193. }
  194. #############################################################
  195. # Allow all outbound traffic
  196. resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_all_outbound" {
  197. direction = "egress"
  198. ethertype = "IPv4"
  199. remote_ip_prefix = "0.0.0.0/0"
  200. security_group_id = openstack_networking_secgroup_v2.secgroup.id
  201. }
  202. # Define the master node
  203. resource "openstack_compute_instance_v2" "k3s_master" {
  204. name = "kube-master"
  205. image_name = var.os_image
  206. flavor_name = var.master_flavor
  207. key_pair = openstack_compute_keypair_v2.default.name
  208. security_groups = ["default",openstack_networking_secgroup_v2.secgroup.name]
  209. network {
  210. uuid = openstack_networking_network_v2.network.id
  211. }
  212. # This thing does all the magic, a glorified bash script XD
  213. user_data = <<-EOT
  214. #!/bin/bash
  215. apt-get update
  216. apt-get install -y curl
  217. echo "Before snap"
  218. snap install helm --classic
  219. # Install KubeCTL
  220. curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
  221. install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
  222. kubectl version --client
  223. echo "before K3S"
  224. # Install K3s with taint if there are worker nodes
  225. if [ ${var.num_worker_nodes} -gt 0 ]; then
  226. curl -sfL https://get.k3s.io | sh -s - --node-taint key=value:NoExecute --disable traefik --disable-agent --tls-san 127.0.0.1
  227. else
  228. # Install K3s without taint, allowing the master to schedule pods
  229. curl -sfL https://get.k3s.io | sh -s - --disable traefik --disable-agent --tls-san 127.0.0.1
  230. fi
  231. # Wait and save the token into a file
  232. while [ ! -f /var/lib/rancher/k3s/server/node-token ]; do
  233. sleep 5
  234. done
  235. mkdir -p /var/lib/rancher/k3s/server/
  236. echo $(cat /var/lib/rancher/k3s/server/node-token) > /var/lib/rancher/k3s/server/token
  237. chmod 777 /var/lib/rancher/k3s/server/token
  238. ls -ltr /var/lib/rancher/k3s/server/token
  239. # Mount the volume at /mnt
  240. mkdir /mnt/data
  241. mkfs.ext4 /dev/vdb
  242. echo '/dev/vdb /mnt/data ext4 defaults 0 0' >> /etc/fstab
  243. mount -a
  244. # Adding kubeconfig
  245. chmod 644 /etc/rancher/k3s/k3s.yaml
  246. echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /etc/profile
  247. EOT
  248. metadata = {
  249. instance_role = "master"
  250. }
  251. }
  252. # Define the volume for the master node
  253. resource "openstack_blockstorage_volume_v3" "k3s_master_volume" {
  254. name = "k3s-master-volume"
  255. size = var.volume_size
  256. }
  257. # Attach the volume to the master node
  258. resource "openstack_compute_volume_attach_v2" "k3s_master_volume_attach" {
  259. instance_id = openstack_compute_instance_v2.k3s_master.id
  260. volume_id = openstack_blockstorage_volume_v3.k3s_master_volume.id
  261. }
  262. resource "openstack_compute_instance_v2" "k3s_workers" {
  263. count = var.num_worker_nodes
  264. name = "kubeworker-${count.index}"
  265. image_name = var.os_image
  266. flavor_name = var.worker_flavor
  267. key_pair = openstack_compute_keypair_v2.default.name
  268. security_groups = ["default", openstack_networking_secgroup_v2.secgroup.name]
  269. depends_on = [
  270. openstack_compute_instance_v2.k3s_master,
  271. null_resource.delay_master
  272. ]
  273. network {
  274. uuid = openstack_networking_network_v2.network.id
  275. }
  276. # This script installs necessary software and prepares the mount point
  277. user_data = <<-EOT
  278. #!/bin/bash
  279. echo "hello"
  280. apt-get update
  281. apt-get install -y curl
  282. # Create a mount point for the attached volume
  283. mkdir /mnt/data
  284. mkfs.ext4 /dev/vdb
  285. echo '/dev/vdb /mnt/data ext4 defaults 0 0' >> /etc/fstab
  286. mount -a
  287. # Save the private key
  288. echo '${tls_private_key.ssh.private_key_pem}' > /home/ubuntu/.ssh/id_rsa
  289. chmod 600 /home/ubuntu/.ssh/id_rsa
  290. while [ -z "$TOKEN" ]; do
  291. TOKEN=$(ssh -o StrictHostKeyChecking=no -i /home/ubuntu/.ssh/id_rsa ubuntu@${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4} 'sudo cat /var/lib/rancher/k3s/server/token')
  292. sleep 5
  293. done
  294. curl -sfL https://get.k3s.io | K3S_URL=https://${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4}:6443 K3S_TOKEN=$TOKEN sh -
  295. EOT
  296. # provisioner "remote-exec" {
  297. # inline = [
  298. # "TOKEN=$(ssh -o StrictHostKeyChecking=no -l ubuntu ${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4} 'cat /var/lib/rancher/k3s/server/token')",
  299. # "curl -sfL https://get.k3s.io | K3S_URL=http://${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4}:6443 K3S_TOKEN=$TOKEN sh -"
  300. # ]
  301. connection {
  302. type = "ssh"
  303. user = "ubuntu"
  304. private_key = tls_private_key.ssh.private_key_pem
  305. host = self.access_ip_v4
  306. }
  307. metadata = {
  308. instance_role = "worker"
  309. }
  310. }
  311. # Define the volumes for the worker nodes
  312. resource "openstack_blockstorage_volume_v3" "k3s_worker_volumes" {
  313. count = var.num_worker_nodes
  314. name = "k3s-worker-volume-${count.index}"
  315. size = var.volume_size
  316. }
  317. # Attach the volumes to the worker nodes
  318. resource "openstack_compute_volume_attach_v2" "k3s_worker_volume_attach" {
  319. count = var.num_worker_nodes
  320. instance_id = element(openstack_compute_instance_v2.k3s_workers.*.id, count.index)
  321. volume_id = element(openstack_blockstorage_volume_v3.k3s_worker_volumes.*.id, count.index)
  322. # Ensure attachment only happens after instance and volume creation
  323. depends_on = [
  324. openstack_compute_instance_v2.k3s_workers,
  325. openstack_blockstorage_volume_v3.k3s_worker_volumes
  326. ]
  327. }
  328. ## Works till here
  329. data "kubernetes_namespace" "existing" {
  330. metadata {
  331. name = "kube-system"
  332. }
  333. }
  334. resource "kubernetes_namespace" "default" {
  335. count = data.kubernetes_namespace.existing.id != null ? 0 : 1
  336. depends_on = [null_resource.delay_workers]
  337. metadata {
  338. name = "kube-system"
  339. }
  340. }
  341. resource "kubernetes_deployment" "traefik" {
  342. metadata {
  343. name = "traefik"
  344. namespace = "kube-system"
  345. labels = {
  346. app = "traefik"
  347. }
  348. }
  349. spec {
  350. replicas = 1
  351. selector {
  352. match_labels = {
  353. app = "traefik"
  354. }
  355. }
  356. template {
  357. metadata {
  358. labels = {
  359. app = "traefik"
  360. }
  361. }
  362. spec {
  363. container {
  364. name = "traefik"
  365. image = "traefik:v2.4"
  366. args = ["--providers.kubernetescrd", "--entrypoints.web.Address=:80", "--entrypoints.websecure.Address=:443"]
  367. port {
  368. name = "web"
  369. container_port = 80
  370. }
  371. port {
  372. name = "websecure"
  373. container_port = 443
  374. }
  375. }
  376. }
  377. }
  378. }
  379. }
  380. resource "kubernetes_service" "traefik" {
  381. metadata {
  382. name = "traefik"
  383. namespace = "kube-system"
  384. labels = {
  385. app = "traefik"
  386. }
  387. }
  388. spec {
  389. selector = {
  390. app = "traefik"
  391. }
  392. type = "LoadBalancer"
  393. port {
  394. name = "web"
  395. port = 80
  396. target_port = 80
  397. }
  398. port {
  399. name = "websecure"
  400. port = 443
  401. target_port = 443
  402. }
  403. }
  404. }
  405. output "traefik_lb_ip" {
  406. value = flatten([for s in kubernetes_service.traefik.status : [for i in s.load_balancer.ingress : i.ip]])
  407. }