This content originally appeared on DEV Community and was authored by Rodrigo Burgos
Amazon Web Services (AWS) provides a robust infrastructure for deploying scalable applications. In this tutorial, we will walk through setting up an Amazon ElastiCache Redis cluster and an ECS (Fargate) service using Terraform.
This setup includes:
A Redis cluster for caching.
An ECS service running an API container.
Security groups to control network access.
CloudWatch logging for monitoring.
Prerequisites
Before you begin, ensure you have the following installed:
Terraform (latest version)
AWS CLI (configured with credentials)
A VPC with available subnets
An ECR repository for your application image
1. Configuring the Redis Cluster
First, create an ElastiCache parameter group to customize Redis settings:
resource "aws_elasticache_parameter_group" "custom_redis_parameter_group_dev" {
name = "custom-redis-parameter-group-dev"
family = "redis7"
parameter {
name = "maxmemory-policy"
value = "allkeys-lru"
}
parameter {
name = "timeout"
value = "3600"
}
}
Now, define a subnet group for Redis:
resource "aws_elasticache_subnet_group" "redis_public_subnet_group_dev" {
name = "redis-public-subnet-group-dev"
subnet_ids = [var.subnet_id_a, var.subnet_id_b]
tags = {
Name = "redis-public-subnet-group-dev"
}
}
Next, create the Redis cluster:
resource "aws_elasticache_cluster" "redis_cluster_dev" {
cluster_id = "redis-cluster-dev"
engine = "redis"
node_type = "cache.t2.micro"
num_cache_nodes = 1
parameter_group_name = aws_elasticache_parameter_group.custom_redis_parameter_group_dev.name
port = 6379
security_group_ids = [aws_security_group.redis_dev_sg.id]
subnet_group_name = aws_elasticache_subnet_group.redis_public_subnet_group_dev.name
}
2. Configuring the ECS Service
Define the ECS Task Definition:
resource "aws_ecs_task_definition" "game_api_task" {
family = "game-api-task-dev"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = "256"
memory = "512"
execution_role_arn = aws_iam_role.ecs_task_execution_role.arn
task_role_arn = aws_iam_role.ecs_task_role.arn
container_definitions = jsonencode([
{
name = "game_api_container_dev"
image = "${aws_ecr_repository.game_api_ecr_dev.repository_url}:latest"
cpu = 256
memory = 512
essential = true
portMappings = [
{
containerPort = 3000
hostPort = 3000
protocol = "tcp"
}
]
logConfiguration = {
logDriver = "awslogs"
options = {
awslogs-group = aws_cloudwatch_log_group.game_api_dev_log_group.name
awslogs-region = "us-east-1"
awslogs-stream-prefix = "ecs"
}
}
}
])
}
Create a CloudWatch log group for logs:
resource "aws_cloudwatch_log_group" "game_api_dev_log_group" {
name = "/ecs/game-api-task-dev"
retention_in_days = 7
}
3. Configuring Security Groups
Define the security group for the API Load Balancer:
resource "aws_security_group" "api_load_balancer_dev_sg" {
name = "api-sg-dev"
description = "Allow HTTP traffic"
vpc_id = var.vpc_id
ingress {
description = "Allow HTTP traffic"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allow HTTPS traffic"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allow API traffic"
from_port = 3000
to_port = 3000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Allow all outbound traffic"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
Define the security group for Redis:
resource "aws_security_group" "redis_dev_sg" {
name = "redis-sg-dev"
description = "Security group for Redis cluster"
vpc_id = var.vpc_id
ingress {
description = "Allow Redis access from ECS"
from_port = 6379
to_port = 6379
protocol = "tcp"
}
Implementing a WebSocket Gateway in NestJS
In real-time applications, WebSockets enable bidirectional communication between clients and servers. NestJS provides a built-in WebSocket module to facilitate this. Below, we define a WebSocket gateway using the @nestjs/websockets package.
Setting Up the WebSocket Gateway
The MatchGateway class listens for specific messages and manages client connections.
import {
SubscribeMessage,
WebSocketGateway,
WebSocketServer,
MessageBody,
ConnectedSocket
} from "@nestjs/websockets";
import { Server, Socket } from "socket.io";
import { PlayerConnectionDto } from "./dto/player.dto";
import { SocketMessagesEnum } from "@/domain/enum/socket-messages";
import { AddPlayerToRoundUseCase } from "@/use-cases/player/add-player-to-round";
@WebSocketGateway({
cors: {
origin: "*",
methods: ["GET", "POST"]
},
transports: ["websocket"]
})
export class MatchGateway {
@WebSocketServer() server: Server;
constructor(
private readonly addPlayerToRoundUseCase: AddPlayerToRoundUseCase
) {}
handleConnection(client: Socket): void {
console.log("Client connected:", client.id);
}
@SubscribeMessage(SocketMessagesEnum.PLAYER_CONNECTION)
async handlePlayerOn(
@MessageBody() player: PlayerConnectionDto,
@ConnectedSocket() client: Socket
): Promise<void> {
try {
const playerData = { id: player.id };
client.emit(SocketMessagesEnum.PLAYER_CONNECTION, playerData);
} catch (error) {
console.error("Error in WebSocket handler:", error);
client.emit(SocketMessagesEnum.ERROR, {
message: `Error processing player connection: ${error.message}`
});
}
}
@SubscribeMessage(SocketMessagesEnum.CHART_DATA)
async handleChartData(): Promise<void> {
try {
// Handle real-time chart data
} catch (error) {
console.error("Error in chart data handler:", error);
}
}
}
Setting Up the WebSocket Adapter
To ensure the WebSocket server runs smoothly within the NestJS application, an adapter is needed. This can be added in the bootstrap function:
import { IoAdapter } from "@nestjs/platform-socket.io";
import { NestFactory } from "@nestjs/core";
import { AppModule } from "./modules/app.module";
async function bootstrap() {
const app = await NestFactory.create(AppModule);
app.useWebSocketAdapter(new IoAdapter(app));
app.enableCors({ origin: "*", methods: ["GET", "POST"] });
const port = process.env.PORT || 3000;
await app.listen(port);
console.log(`🚀 Server running on port ${port}`);
}
bootstrap();
Automating Deployment with GitHub Actions
To automate deployments, create a GitHub Actions workflow (.github/workflows/deploy.yml) with the following:
name: Deploy API to ECS
on:
push:
branches:
- dev
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: "us-east-1"
- name: Login to Amazon ECR
uses: aws-actions/amazon-ecr-login@v1
- name: Build, tag, and push the Docker image to Amazon ECR
env:
IMAGE_TAG: ${{ github.sha }}
run: |
docker build -t $ECR_REPOSITORY:$IMAGE_TAG .
docker push $ECR_REPOSITORY:$IMAGE_TAG
- name: Deploy to ECS
run: |
aws ecs update-service --cluster $ECS_CLUSTER --service $ECS_SERVICE --force-new-deployment
This content originally appeared on DEV Community and was authored by Rodrigo Burgos