- AWS CLI installed and configured with proper IAM permissions.
kubectlinstalled.eksctlinstalled (to simplify EKS cluster creation).- AWS IAM Authenticator installed (usually bundled with
kubectlnowadays). helminstalled (optional, if you want to use ALB Ingress Controller via helm).
eksctl create cluster \
--name k8s-demo-cluster \
--version 1.31 \
--region us-west-2 \
--nodegroup-name standard-workers \
--node-type t3.medium \
--nodes 2 \
--nodes-min 2 \
--nodes-max 4 \
--managedThis creates an EKS cluster named k8s-demo-cluster with a managed node group.
eksctl automatically updates your kubeconfig. Confirm by running:
kubectl get nodesYou should see the worker nodes listed.
kubectl create namespace k8s-demo
kubectl config set-context --current --namespace=k8s-demoApply the MongoDB StatefulSet manifest:
cat << EOF | kubectl apply -f -
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongo
namespace: k8s-demo
spec:
serviceName: mongo
replicas: 3
selector:
matchLabels:
role: db
template:
metadata:
labels:
role: db
env: demo
replicaset: rs0.main
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: replicaset
operator: In
values:
- rs0.main
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: mongo
image: mongo:4.2
command:
- "numactl"
- "--interleave=all"
- "mongod"
- "--wiredTigerCacheSizeGB"
- "0.1"
- "--bind_ip"
- "0.0.0.0"
- "--replSet"
- "rs0"
ports:
- containerPort: 27017
volumeMounts:
- name: mongodb-persistent-storage-claim
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: mongodb-persistent-storage-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: gp2
resources:
requests:
storage: 0.5Gi
EOFcat << EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: mongo
namespace: k8s-demo
labels:
role: db
env: demo
spec:
ports:
- port: 27017
targetPort: 27017
clusterIP: None
selector:
role: db
EOFcat << EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: mongodb-secret
namespace: k8s-demo
data:
username: YWRtaW4= # base64 for "admin"
password: cGFzc3dvcmQ= # base64 for "password"
EOFcat << EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: api
namespace: k8s-demo
labels:
role: api
env: demo
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 25%
selector:
matchLabels:
role: api
template:
metadata:
labels:
role: api
spec:
containers:
- name: api
image: cloudacademydevops/api:v3
imagePullPolicy: Always
env:
- name: MONGO_CONN_STR
value: mongodb://mongo-0.mongo,mongo-1.mongo,mongo-2.mongo:27017/langdb?replicaSet=rs0
- name: MONGO_USERNAME
valueFrom:
secretKeyRef:
name: mongodb-secret
key: username
- name: MONGO_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-secret
key: password
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /ok
port: 8080
initialDelaySeconds: 2
periodSeconds: 5
readinessProbe:
httpGet:
path: /ok
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
EOFNote: The frontend env variable
REACT_APP_APIHOSTPORTrequires the API load balancer DNS (we will create it next).
For now, deploy the frontend with a placeholder:
cat << EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
namespace: k8s-demo
labels:
role: frontend
env: demo
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 25%
selector:
matchLabels:
role: frontend
template:
metadata:
labels:
role: frontend
spec:
containers:
- name: frontend
image: cloudacademydevops/frontend:v11
imagePullPolicy: Always
env:
- name: REACT_APP_APIHOSTPORT
value: "http://api.k8s-demo.local:8080"
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /ok
port: 8080
initialDelaySeconds: 2
periodSeconds: 5
readinessProbe:
httpGet:
path: /ok
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
EOF# Service for API
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: api-service
namespace: k8s-demo
spec:
selector:
role: api
ports:
- protocol: TCP
port: 8080
targetPort: 8080
type: NodePort
EOF
# Service for Frontend
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: frontend-service
namespace: k8s-demo
spec:
selector:
role: frontend
ports:
- protocol: TCP
port: 8080
targetPort: 8080
type: NodePort
EOFTo expose your frontend with an AWS Application Load Balancer (ALB), use the official AWS Load Balancer Controller.
eksctl utils associate-iam-oidc-provider \
--region us-west-2 \
--cluster k8s-demo-cluster \
--approveDownload the policy:
curl -o iam_policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.jsonCreate the IAM policy:
aws iam create-policy \
--policy-name AWSLoadBalancerControllerIAMPolicy \
--policy-document file://iam_policy.jsoneksctl create iamserviceaccount \
--cluster k8s-demo-cluster \
--namespace kube-system \
--name aws-load-balancer-controller \
--attach-policy-arn arn:aws:iam::<YOUR_ACCOUNT_ID>:policy/AWSLoadBalancerControllerIAMPolicy \
--approvehelm repo add eks https://aws.github.io/eks-charts
helm repo update
helm install aws-load-balancer-controller eks/aws-load-balancer-controller \
-n kube-system \
--set clusterName=k8s-demo-cluster \
--set serviceAccount.create=false \
--set serviceAccount.name=aws-load-balancer-controller \
--set region=us-west-2 \
--set vpcId=$(aws eks describe-cluster --name k8s-demo-cluster --query "cluster.resourcesVpcConfig.vpcId" --output text)cat << EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frontend-ingress
namespace: k8s-demo
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}]'
spec:
rules:
- http:
paths:
- path: /*
pathType: ImplementationSpecific
backend:
service:
name: frontend-service
port:
number: 8080
EOFOnce the ingress is created, get the ALB DNS name:
kubectl get ingress -n k8s-demoYou will see the EXTERNAL-IP as the ALB DNS.
Edit the frontend deployment:
kubectl -n k8s-demo set env deployment/frontend REACT_APP_APIHOSTPORT=http://<ALB_DNS>:8080Connect to one of the mongo pods to initialize replica set:
kubectl exec -it mongo-0 -n k8s-demo -- mongo
# In mongo shell, run:
rs.initiate(
{
_id: "rs0",
members: [
{ _id: 0, host: "mongo-0.mongo:27017" },
{ _id: 1, host: "mongo-1.mongo:27017" },
{ _id: 2, host: "mongo-2.mongo:27017" }
]
}
)
exit- Check all pods are running:
kubectl get pods -n k8s-demo- Check services:
kubectl get svc -n k8s-demo- Check ingress and ALB status:
kubectl get ingress -n k8s-demo- Access frontend app via the ALB DNS URL in your browser.
- Cluster created with
eksctl - MongoDB StatefulSet + Headless Service + Secret deployed
- API and frontend deployments + services deployed
- AWS Load Balancer Controller installed for ALB ingress
- Ingress created for frontend service to expose it via ALB
- MongoDB replica set initialized manually