1.) Multi-Tier AWS Infrastructure with EKS, EC2, and Containerized Nginx Application
This Cloudscript example showcases the deployment of a sophisticated multi-tier architecture on AWS, seamlessly integrating Amazon EKS for Kubernetes orchestration, EC2 instances for traditional server roles, and containerized Nginx applications for scalable web services. The configuration begins by establishing a primary VPC with two public subnets across distinct availability zones, ensuring high availability and network segmentation. It includes a security group tailored for SSH access and comprehensive outbound traffic. An IAM role with the necessary EKS policies is created to manage the Kubernetes cluster, which is provisioned within the defined subnets. Additionally, an EC2 instance is set up with automated Ansible-like tasks to install essential packages like Nginx and Docker, configure Nginx, and verify its operation. On the Kubernetes side, a Deployment is defined for the Nginx application, featuring readiness probes, resource limits, volume management, and a LoadBalancer service configured with AWS Network Load Balancer (NLB) annotations. The setup also incorporates auto-scaling policies to dynamically adjust the number of replicas based on CPU utilization, ensuring the application remains responsive under varying loads. This comprehensive example illustrates how Cloudscript effectively combines infrastructure provisioning, configuration management, and container orchestration to deliver a robust and scalable web application environment on AWS.
providers {aws { provider ="aws" region ="us-east-1" version ="~> 4.0" }}service "webapp" { provider ="aws"infrastructure {# VPC Definitionnetwork "vpc" { cidr_block ="10.0.0.0/16" enable_dns_hostnames =true enable_dns_support =true tags = { Name ="main-vpc" } resource_type ="aws_vpc" }# Subnet Definitionsnetwork "subnet1" { vpc_id ="${infrastructure.network.vpc.id}" cidr_block ="10.0.1.0/24" availability_zone ="us-east-1a" map_public_ip_on_launch =true resource_type ="aws_subnet" }network "subnet2" { vpc_id ="${infrastructure.network.vpc.id}" cidr_block ="10.0.2.0/24" availability_zone ="us-east-1b" map_public_ip_on_launch =true resource_type ="aws_subnet" }# Security Group for SSH Accessnetwork "allow_ssh" { name ="allow_ssh" vpc_id ="${infrastructure.network.vpc.id}" ingress = [ { description ="Allow SSH access" from_port =22 to_port =22 protocol ="tcp" cidr_blocks = ["0.0.0.0/0"] ipv6_cidr_blocks = [] prefix_list_ids = [] security_groups = [] self =false } ] egress = [ { description ="Allow all outbound traffic" from_port =0 to_port =0 protocol ="-1" cidr_blocks = ["0.0.0.0/0"] ipv6_cidr_blocks = [] prefix_list_ids = [] security_groups = [] self =false } ] resource_type ="aws_security_group" }# IAM Role for EKS Clusteriam "eks_cluster_iam" { name ="eks-cluster" assume_role_policy =file("role.json") resource_type ="aws_iam_role" }# IAM Policy Attachment for EKS Clusteriam "eks_cluster_policy_attachment" { role ="${infrastructure.iam.eks_cluster_iam.name}" policy_arn ="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" resource_type ="aws_iam_role_policy_attachment" }# EKS Cluster Definitioncompute "eks_cluster_main" { name ="main-cluster" role_arn ="${infrastructure.iam.eks_cluster_iam.arn}" vpc_config = { subnet_ids = ["${infrastructure.network.subnet1.id}","${infrastructure.network.subnet2.id}" ] } depends_on = ["infrastructure.iam.eks_cluster_policy_attachment"] resource_type ="aws_eks_cluster" }# EC2 Instance for Web Servercompute "web_server" { instance_type ="t2.micro" ami ="your-ami" subnet_id ="${infrastructure.network.subnet1.id}" vpc_security_group_ids = ["${infrastructure.network.allow_ssh.id}"] tags = { Name ="main_web_server" } depends_on = ["infrastructure.network.vpc"] resource_type ="aws_instance" } }configuration {play "webapp" { name ="Configure webapp" hosts ="{{ target_servers | default('all') }}" become =true vars = { target_web_servers ="web_servers" target_db_servers ="db_servers" }# Packages tasks blocktask { name ="Packages tasks"block {task { name ="Install required packages"package { name ="{{ item }}" state ="present" update_cache =true } loop = ["nginx","docker"] check_mode =false# Ensures packages are installed even in check mode } } }# Other tasks blocktask { name ="Other tasks"block {task { name ="Create/modify /etc/nginx/nginx.conf"copy { dest ="/etc/nginx/nginx.conf" content =file("nginx.conf") mode ="0644" owner ="root" group ="root" } notify = ["restart nginx"] when ="ansible_distribution == 'Ubuntu'" }task { name ="Ensure nginx is started"service { name ="nginx" state ="started" enabled ="yes" } register ="nginx_started_result" retries =3 delay =5 failed_when ="nginx_started_result is failed" changed_when ="nginx_started_result is changed" when ="ansible_distribution == 'Ubuntu'" }task { name ="Verify nginx is serving content"uri { url ="http://localhost" status_code =200 } register ="nginx_response" retries =3 delay =5 until ="nginx_response.status == 200" } } }# Handlershandler { name ="restart nginx"service { name ="nginx" state ="restarted" } } } }containers {app "web_app" { image ="nginx:latest" type ="Deployment" replicas =3 command = ["/bin/sh"] args = ["-c","nginx -g 'daemon off;'"] working_dir ="/usr/share/nginx/html" readiness_probe = { http_get = { path ="/healthz" port =80 } initial_delay_seconds =5 period_seconds =10 } resources = { limits = { cpu ="500m" memory ="512Mi" } requests = { cpu ="250m" memory ="256Mi" } } empty_dir_volumes = [ { name ="cache" size_limit ="1Gi" } ] volume_mounts = [ { name ="cache" mountPath ="/cache" } ] ports = [ { container_port =80 service_port =80 } ] service = { type ="LoadBalancer" annotations = {"service.beta.kubernetes.io/aws-load-balancer-type"="nlb" } } node_selector = {"kubernetes.io/os"="linux""node-type"="web" } auto_scaling = { min_replicas =2 max_replicas =10 target_cpu_utilization_percentage =80 } } }deployment {"infrastructure.compute.web_server" maps_to "configuration.play.webapp" }}
2.) Multi-Tier GCP Infrastructure with GKE Cluster, Compute Engine MySQL Server, and Containerized Admin Tools
This Cloudscript example demonstrates the deployment of a sophisticated multi-tier architecture on Google Cloud Platform, seamlessly integrating Google Kubernetes Engine (GKE) for container orchestration, Compute Engine instances for hosting a dedicated MySQL database, and containerized administrative tools for efficient management. The configuration begins by setting up a primary GKE cluster with an initial node count and a node pool to ensure scalability and high availability within the specified region and zone. A Compute Engine instance is provisioned to serve as the MySQL server, featuring appropriate machine types, boot disk configurations, and network interfaces for secure and reliable database operations. Utilizing Ansible-like tasks, the setup installs MySQL, configures the root password, and ensures the database is properly secured and operational. Additionally, the deployment includes a CronJob container for daily database backups using mysqldump, safeguarding data integrity and enabling recoverability. An administrative tool is deployed as a containerized application with auto-scaling capabilities, managed through Kubernetes to handle varying loads and maintain high availability. Services are defined to facilitate internal communication within the cluster. This comprehensive example illustrates how Cloudscript effectively combines infrastructure provisioning, configuration management, and container orchestration on GCP to deliver a robust, scalable, and secure database environment complemented by essential administrative functionalities.
providers {google { provider ="google" project ="your-project-name" region ="us-central1" zone ="us-central1-c" version ="~> 4.0" }}service "database" { provider ="google"infrastructure {compute "gke_cluster" { name ="primary-cluster" resource_type ="google_container_cluster" initial_node_count =1 location ="us-central1-c" }compute "gke_node_pool" { name ="default-pool" cluster ="${infrastructure.compute.gke_cluster.name}" node_count =2 resource_type ="google_container_node_pool" }compute "mysql_server" { name ="mysql-server-instance" machine_type ="n1-standard-1" resource_type ="google_compute_instance" tags = ["db-server"] boot_disk = [ {"initialize_params": {"image":"debian-cloud/debian-11" } } ] network_interface = [ {"network":"default","access_config": {} } ] } }configuration {play "db_setup" { name ="Configure MySQL Database" hosts ="{{ target_servers | default('all') }}" become =truetask { name ="Install MySQL"package { name ="mysql-server" state ="present" update_cache =true } }task { name ="Configure MySQL root password"shell { cmd ="mysqladmin -u root password 'password'" } when ="ansible_os_family == 'Debian'" } } }containers {job "db-backup" { image ="mysql:5.7" type ="CronJob" schedule ="0 2 * * *" command = ["/usr/bin/mysqldump"] args = ["-u","root","-psecretpass","mydb"] restartPolicy ="OnFailure" }app "admin-tool" { image ="nginx:alpine" type ="Deployment" replicas =1auto_scaling { min_replicas =1 max_replicas =5 target_cpu_utilization_percentage =70 }service { type ="ClusterIP" ports = [ { container_port =80 service_port =80 } ] } } }deployment {"infrastructure.compute.mysql_server" maps_to "configuration.play.db_setup" }}
3.) Multi-Tier AWS Infrastructure with Application Load Balancer, EC2 Apache Servers, and Kubernetes Nginx Frontend
This Cloudscript example illustrates the deployment of a resilient multi-tier architecture on AWS, integrating an Application Load Balancer (ALB), EC2 instances configured as Apache web servers, and a Kubernetes-based Nginx frontend application. The setup begins by creating a dedicated VPC with a CIDR block of 10.10.0.0/16, ensuring a secure and isolated network environment. Within this VPC, two public subnets are established across different availability zones (us-west-2a and us-west-2b) to enhance fault tolerance and high availability. An ALB is deployed across these subnets to distribute incoming traffic efficiently to the backend EC2 instances. The EC2 web server instances, utilizing the specified AMI, are provisioned in the first public subnet and are configured with Apache (httpd) using Ansible-like tasks to ensure the web servers are properly installed and running. Additionally, a Kubernetes Deployment is defined for the Nginx frontend application, featuring two replicas to handle web traffic with the capability to scale automatically based on CPU utilization, ranging from 2 to 10 replicas. The Nginx service is exposed via a Network Load Balancer (NLB) to facilitate reliable and scalable access. This comprehensive configuration demonstrates how Cloudscript effectively combines infrastructure provisioning, configuration management, and container orchestration to deliver a robust, scalable, and highly available frontend environment on AWS.