Automate, deploy, and scale applications for Toronto's fast-paced tech environment
Duration: 14-18 weeks (self-paced)
Level: Intermediate to Advanced
Prerequisites: Linux/Unix knowledge, basic programming, Git version control
# Dockerfile for Toronto e-commerce app
FROM node:18-alpine AS builder
# Set working directory
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy source code
COPY . .
# Build the application
RUN npm run build
# Production stage
FROM node:18-alpine AS production
# Create non-root user for security
RUN addgroup -g 1001 -S nodejs && \
adduser -S nextjs -u 1001
# Set working directory
WORKDIR /app
# Copy built application
COPY --from=builder --chown=nextjs:nodejs /app/dist ./dist
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
COPY --from=builder --chown=nextjs:nodejs /app/package*.json ./
# Expose port
EXPOSE 3000
# Switch to non-root user
USER nextjs
# Health check for load balancers
HEALTHCHECK --interval=30s --timeout=3s --start-period=30s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
# Start the application
CMD ["node", "dist/server.js"]
# docker-compose.yml for Toronto tech startup
version: '3.8'
services:
# Frontend React application
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
ports:
- "3000:3000"
environment:
- NODE_ENV=production
- REACT_APP_API_URL=http://backend:5000
- REACT_APP_ENVIRONMENT=toronto-production
depends_on:
- backend
networks:
- toronto-network
labels:
- "traefik.enable=true"
- "traefik.http.routers.frontend.rule=Host(`app.torontotech.ca`)"
# Backend API service
backend:
build:
context: ./backend
dockerfile: Dockerfile
ports:
- "5000:5000"
environment:
- DATABASE_URL=postgresql://postgres:password@postgres:5432/torontoapp
- REDIS_URL=redis://redis:6379
- JWT_SECRET=${JWT_SECRET}
- STRIPE_SECRET_KEY=${STRIPE_SECRET_KEY} # Canadian payments
- CANADA_POST_API_KEY=${CANADA_POST_API_KEY}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_started
networks:
- toronto-network
volumes:
- ./logs:/app/logs
# PostgreSQL database
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_DB=torontoapp
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- toronto-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
# Redis for caching and sessions
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- toronto-network
command: redis-server --appendonly yes
# Nginx reverse proxy
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
- ./logs/nginx:/var/log/nginx
depends_on:
- frontend
- backend
networks:
- toronto-network
# Monitoring with Prometheus
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
networks:
- toronto-network
# Grafana for dashboards
grafana:
image: grafana/grafana:latest
ports:
- "3001:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=torontotech2024
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
networks:
- toronto-network
volumes:
postgres_data:
redis_data:
prometheus_data:
grafana_data:
networks:
toronto-network:
driver: bridge
Toronto Food Delivery Platform Containerization - Containerize a complete food delivery system with microservices for restaurants, delivery, payments, and notifications.
# toronto-app-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: toronto-app
namespace: production
labels:
app: toronto-app
version: v1.0.0
environment: production
region: toronto
spec:
replicas: 5
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 2
maxUnavailable: 1
selector:
matchLabels:
app: toronto-app
template:
metadata:
labels:
app: toronto-app
version: v1.0.0
spec:
containers:
- name: toronto-app
image: torontoregistry.azurecr.io/toronto-app:v1.0.0
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: toronto-app-secrets
key: database-url
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: toronto-app-config
key: redis-url
- name: CANADA_POST_API_KEY
valueFrom:
secretKeyRef:
name: toronto-app-secrets
key: canada-post-api-key
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: app-logs
mountPath: /app/logs
- name: app-config
mountPath: /app/config
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: toronto-app-logs-pvc
- name: app-config
configMap:
name: toronto-app-config
nodeSelector:
kubernetes.io/os: linux
node.kubernetes.io/instance-type: Standard_B2s
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- toronto-app
topologyKey: kubernetes.io/hostname
---
# Service for the Toronto app
apiVersion: v1
kind: Service
metadata:
name: toronto-app-service
namespace: production
labels:
app: toronto-app
spec:
selector:
app: toronto-app
ports:
- name: http
port: 80
targetPort: 3000
protocol: TCP
type: ClusterIP
---
# Horizontal Pod Autoscaler
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: toronto-app-hpa
namespace: production
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: toronto-app
minReplicas: 3
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 100
periodSeconds: 15
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 10
periodSeconds: 60
---
# Ingress for external access
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: toronto-app-ingress
namespace: production
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
tls:
- hosts:
- app.torontotech.ca
- api.torontotech.ca
secretName: toronto-app-tls
rules:
- host: app.torontotech.ca
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: toronto-app-service
port:
number: 80
- host: api.torontotech.ca
http:
paths:
- path: /api
pathType: Prefix
backend:
service:
name: toronto-api-service
port:
number: 80
# toronto-app-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: toronto-app-config
namespace: production
data:
redis-url: "redis://redis-service:6379"
environment: "toronto-production"
timezone: "America/Toronto"
currency: "CAD"
language: "en-CA"
tax-rates: |
{
"ontario": {
"hst": 0.13,
"gst": 0.05,
"pst": 0.08
}
}
features: |
{
"canada_post_integration": true,
"interac_payments": true,
"french_language": true,
"tax_calculation": true
}
---
# Secrets (base64 encoded)
apiVersion: v1
kind: Secret
metadata:
name: toronto-app-secrets
namespace: production
type: Opaque
data:
database-url: cG9zdGdyZXNxbDovL3VzZXI6cGFzc3dvcmRAcG9zdGdyZXMtc2VydmljZTo1NDMyL3Rvcm9udG9hcHA=
jwt-secret: dG9yb250by1qd3Qtc2VjcmV0LWtleS0yMDI0
canada-post-api-key: Y2FuYWRhLXBvc3QtYXBpLWtleS1zZWNyZXQ=
stripe-secret-key: c2tfdGVzdF9zdHJpcGVfc2VjcmV0X2tleQ==
redis-password: cmVkaXNwYXNzd29yZDEyMw==
Toronto Banking Application K8s Cluster - Deploy a secure, scalable banking application on Kubernetes with high availability, auto-scaling, and disaster recovery.
# .github/workflows/toronto-app-deploy.yml
name: Toronto App CI/CD Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
env:
REGISTRY: torontoregistry.azurecr.io
IMAGE_NAME: toronto-app
AZURE_RESOURCE_GROUP: rg-toronto-tech
AKS_CLUSTER_NAME: aks-toronto-prod
NAMESPACE: production
jobs:
# Code quality and security checks
quality-checks:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run ESLint
run: npm run lint
- name: Run Prettier check
run: npm run format:check
- name: Type checking
run: npm run type-check
- name: Security audit
run: npm audit --audit-level moderate
- name: OWASP Dependency Check
uses: dependency-check/Dependency-Check_Action@main
with:
project: 'toronto-app'
path: '.'
format: 'ALL'
- name: SonarCloud Scan
uses: SonarSource/sonarcloud-github-action@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
# Run automated tests
test:
runs-on: ubuntu-latest
needs: quality-checks
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: testpass
POSTGRES_USER: testuser
POSTGRES_DB: testdb
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
redis:
image: redis:7-alpine
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 3s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run unit tests
run: npm run test:unit
env:
DATABASE_URL: postgresql://testuser:testpass@localhost:5432/testdb
REDIS_URL: redis://localhost:6379
- name: Run integration tests
run: npm run test:integration
env:
DATABASE_URL: postgresql://testuser:testpass@localhost:5432/testdb
REDIS_URL: redis://localhost:6379
- name: E2E tests with Playwright
run: |
npx playwright install --with-deps
npm run test:e2e
- name: Upload test results
uses: actions/upload-artifact@v3
if: always()
with:
name: test-results
path: |
coverage/
test-results/
playwright-report/
# Build and push Docker image
build:
runs-on: ubuntu-latest
needs: test
if: github.event_name == 'push'
outputs:
image-tag: ${{ steps.meta.outputs.tags }}
image-digest: ${{ steps.build.outputs.digest }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Login to Azure Container Registry
uses: azure/docker-login@v1
with:
login-server: ${{ env.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=sha,prefix=sha-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
id: build
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
NODE_ENV=production
BUILD_DATE=${{ github.event.head_commit.timestamp }}
VCS_REF=${{ github.sha }}
- name: Sign image with Cosign
uses: sigstore/cosign-installer@v3
with:
cosign-release: 'v2.0.0'
- name: Sign container image
run: |
cosign sign --yes ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ steps.build.outputs.digest }}
# Deploy to staging environment
deploy-staging:
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/develop'
environment: staging
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Set up kubectl
uses: azure/setup-kubectl@v3
with:
version: 'v1.28.0'
- name: Get AKS credentials
run: |
az aks get-credentials --resource-group ${{ env.AZURE_RESOURCE_GROUP }} --name ${{ env.AKS_CLUSTER_NAME }}
- name: Deploy to staging
run: |
# Update image in deployment manifest
sed -i 's|IMAGE_TAG|${{ needs.build.outputs.image-tag }}|g' k8s/staging/deployment.yaml
# Apply Kubernetes manifests
kubectl apply -f k8s/staging/ --namespace=staging
# Wait for rollout
kubectl rollout status deployment/toronto-app --namespace=staging --timeout=300s
- name: Run smoke tests
run: |
sleep 30
curl -f https://staging.torontotech.ca/health || exit 1
# Deploy to production (manual approval required)
deploy-production:
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/main'
environment: production
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Set up kubectl
uses: azure/setup-kubectl@v3
- name: Get AKS credentials
run: |
az aks get-credentials --resource-group ${{ env.AZURE_RESOURCE_GROUP }} --name ${{ env.AKS_CLUSTER_NAME }}
- name: Blue-Green deployment to production
run: |
# Update image tag
sed -i 's|IMAGE_TAG|${{ needs.build.outputs.image-tag }}|g' k8s/production/deployment.yaml
# Deploy to green environment
kubectl apply -f k8s/production/ --namespace=production
# Wait for green deployment
kubectl rollout status deployment/toronto-app-green --namespace=production --timeout=600s
# Health check green environment
kubectl wait --for=condition=ready pod -l app=toronto-app-green --namespace=production --timeout=300s
# Switch traffic to green (update service selector)
kubectl patch service toronto-app-service --namespace=production -p '{"spec":{"selector":{"version":"green"}}}'
# Wait before cleaning up blue
sleep 120
# Scale down blue deployment
kubectl scale deployment toronto-app-blue --replicas=0 --namespace=production
- name: Production smoke tests
run: |
sleep 60
curl -f https://app.torontotech.ca/health || exit 1
curl -f https://api.torontotech.ca/health || exit 1
- name: Notify deployment success
uses: 8398a7/action-slack@v3
with:
status: success
text: "🚀 Toronto App successfully deployed to production!"
webhook_url: ${{ secrets.SLACK_WEBHOOK }}
# Cleanup on failure
cleanup-on-failure:
runs-on: ubuntu-latest
needs: [deploy-staging, deploy-production]
if: failure()
steps:
- name: Rollback on failure
run: |
echo "Deployment failed, rolling back..."
# Add rollback commands here
- name: Notify failure
uses: 8398a7/action-slack@v3
with:
status: failure
text: "❌ Toronto App deployment failed and was rolled back"
webhook_url: ${{ secrets.SLACK_WEBHOOK }}
Toronto Fintech CI/CD Pipeline - Create a comprehensive CI/CD pipeline for a Toronto fintech application with security scanning, compliance checks, and multi-region deployments.
# main.tf - Toronto Tech Infrastructure
terraform {
required_version = ">= 1.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.80"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.23"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.11"
}
}
# Remote state in Azure Storage
backend "azurerm" {
resource_group_name = "rg-terraform-state"
storage_account_name = "torontotechterraform"
container_name = "tfstate"
key = "toronto-infrastructure.tfstate"
}
}
# Configure Azure Provider
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
key_vault {
purge_soft_delete_on_destroy = true
recover_soft_deleted_key_vaults = true
}
}
}
# Local values for Toronto deployment
locals {
environment = var.environment
location = "Canada Central" # Toronto region
common_tags = {
Environment = var.environment
Project = "Toronto Tech Platform"
ManagedBy = "Terraform"
Department = "Engineering"
CostCenter = "Technology"
DataResidency = "Canada"
Compliance = "PIPEDA"
}
# Toronto-specific configuration
toronto_config = {
timezone = "America/Toronto"
currency = "CAD"
tax_jurisdiction = "Ontario"
data_classification = "Protected B"
}
}
# Resource Group for Toronto Tech Platform
resource "azurerm_resource_group" "toronto_tech" {
name = "rg-toronto-tech-${var.environment}"
location = local.location
tags = local.common_tags
}
# Virtual Network for Toronto region
resource "azurerm_virtual_network" "toronto_vnet" {
name = "vnet-toronto-tech-${var.environment}"
address_space = ["10.1.0.0/16"]
location = azurerm_resource_group.toronto_tech.location
resource_group_name = azurerm_resource_group.toronto_tech.name
tags = local.common_tags
}
# Subnets for different tiers
resource "azurerm_subnet" "aks_subnet" {
name = "snet-aks-${var.environment}"
resource_group_name = azurerm_resource_group.toronto_tech.name
virtual_network_name = azurerm_virtual_network.toronto_vnet.name
address_prefixes = ["10.1.1.0/24"]
}
resource "azurerm_subnet" "database_subnet" {
name = "snet-database-${var.environment}"
resource_group_name = azurerm_resource_group.toronto_tech.name
virtual_network_name = azurerm_virtual_network.toronto_vnet.name
address_prefixes = ["10.1.2.0/24"]
delegation {
name = "database-delegation"
service_delegation {
name = "Microsoft.DBforPostgreSQL/flexibleServers"
actions = [
"Microsoft.Network/virtualNetworks/subnets/join/action",
]
}
}
}
# Azure Kubernetes Service
resource "azurerm_kubernetes_cluster" "toronto_aks" {
name = "aks-toronto-tech-${var.environment}"
location = azurerm_resource_group.toronto_tech.location
resource_group_name = azurerm_resource_group.toronto_tech.name
dns_prefix = "torontotech${var.environment}"
kubernetes_version = var.kubernetes_version
default_node_pool {
name = "default"
node_count = var.node_count
vm_size = var.node_vm_size
vnet_subnet_id = azurerm_subnet.aks_subnet.id
zones = ["1", "2", "3"]
enable_auto_scaling = true
min_count = 2
max_count = 10
# Toronto-specific node configuration
node_labels = {
"region" = "toronto"
"environment" = var.environment
"compliance" = "pipeda"
}
tags = local.common_tags
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = "azure"
network_policy = "calico"
dns_service_ip = "10.2.0.10"
docker_bridge_cidr = "172.17.0.1/16"
service_cidr = "10.2.0.0/24"
}
# Azure AD integration for Toronto team
azure_active_directory_role_based_access_control {
managed = true
admin_group_object_ids = [var.azure_ad_admin_group_id]
}
# Monitoring and logging
oms_agent {
log_analytics_workspace_id = azurerm_log_analytics_workspace.toronto_logs.id
}
tags = local.common_tags
}
# PostgreSQL Flexible Server for Toronto data
resource "azurerm_postgresql_flexible_server" "toronto_db" {
name = "psql-toronto-tech-${var.environment}"
resource_group_name = azurerm_resource_group.toronto_tech.name
location = azurerm_resource_group.toronto_tech.location
version = "15"
delegated_subnet_id = azurerm_subnet.database_subnet.id
administrator_login = var.db_admin_username
administrator_password = var.db_admin_password
zone = "1"
storage_mb = var.db_storage_mb
storage_tier = "P30"
sku_name = var.db_sku_name
backup_retention_days = 35
geo_redundant_backup_enabled = true
# Canadian compliance settings
tags = merge(local.common_tags, {
"DataClassification" = "Protected B"
"BackupRetention" = "35days"
"Encryption" = "TDE"
})
}
# Redis Cache for Toronto application
resource "azurerm_redis_cache" "toronto_redis" {
name = "redis-toronto-tech-${var.environment}"
location = azurerm_resource_group.toronto_tech.location
resource_group_name = azurerm_resource_group.toronto_tech.name
capacity = var.redis_capacity
family = var.redis_family
sku_name = var.redis_sku
enable_non_ssl_port = false
minimum_tls_version = "1.2"
# Canadian data residency
zones = ["1", "2"]
redis_configuration {
enable_authentication = true
maxmemory_reserved = 50
maxmemory_delta = 50
maxmemory_policy = "allkeys-lru"
notify_keyspace_events = "Ex"
rdb_backup_enabled = true
rdb_backup_frequency = 60
rdb_backup_max_snapshot_count = 1
rdb_storage_connection_string = azurerm_storage_account.toronto_storage.primary_blob_connection_string
}
tags = local.common_tags
}
# Storage Account for Toronto application data
resource "azurerm_storage_account" "toronto_storage" {
name = "storontotech${random_string.storage_suffix.result}"
resource_group_name = azurerm_resource_group.toronto_tech.name
location = azurerm_resource_group.toronto_tech.location
account_tier = "Standard"
account_replication_type = "GRS" # Geo-redundant for disaster recovery
# Security settings for Canadian compliance
min_tls_version = "TLS1_2"
enable_https_traffic_only = true
allow_nested_items_to_be_public = false
blob_properties {
delete_retention_policy {
days = 30
}
container_delete_retention_policy {
days = 30
}
versioning_enabled = true
}
tags = merge(local.common_tags, {
"DataClassification" = "Protected B"
"Encryption" = "Microsoft-managed"
})
}
# Log Analytics Workspace for monitoring
resource "azurerm_log_analytics_workspace" "toronto_logs" {
name = "log-toronto-tech-${var.environment}"
location = azurerm_resource_group.toronto_tech.location
resource_group_name = azurerm_resource_group.toronto_tech.name
sku = "PerGB2018"
retention_in_days = 90
tags = local.common_tags
}
# Application Insights for Toronto app monitoring
resource "azurerm_application_insights" "toronto_insights" {
name = "appi-toronto-tech-${var.environment}"
location = azurerm_resource_group.toronto_tech.location
resource_group_name = azurerm_resource_group.toronto_tech.name
workspace_id = azurerm_log_analytics_workspace.toronto_logs.id
application_type = "web"
tags = local.common_tags
}
# Key Vault for secrets management
resource "azurerm_key_vault" "toronto_kv" {
name = "kv-toronto-tech-${random_string.kv_suffix.result}"
location = azurerm_resource_group.toronto_tech.location
resource_group_name = azurerm_resource_group.toronto_tech.name
tenant_id = data.azurerm_client_config.current.tenant_id
sku_name = "standard"
# Canadian compliance settings
purge_protection_enabled = true
soft_delete_retention_days = 90
access_policy {
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = data.azurerm_client_config.current.object_id
secret_permissions = [
"Get", "List", "Set", "Delete", "Recover", "Backup", "Restore"
]
}
# AKS access policy
access_policy {
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = azurerm_kubernetes_cluster.toronto_aks.kubelet_identity[0].object_id
secret_permissions = [
"Get", "List"
]
}
tags = local.common_tags
}
# Random strings for unique naming
resource "random_string" "storage_suffix" {
length = 4
special = false
upper = false
}
resource "random_string" "kv_suffix" {
length = 4
special = false
upper = false
}
# Data sources
data "azurerm_client_config" "current" {}
# Outputs
output "aks_cluster_name" {
description = "Name of the AKS cluster"
value = azurerm_kubernetes_cluster.toronto_aks.name
}
output "database_fqdn" {
description = "FQDN of the PostgreSQL server"
value = azurerm_postgresql_flexible_server.toronto_db.fqdn
sensitive = true
}
output "redis_hostname" {
description = "Hostname of the Redis cache"
value = azurerm_redis_cache.toronto_redis.hostname
}
output "key_vault_uri" {
description = "URI of the Key Vault"
value = azurerm_key_vault.toronto_kv.vault_uri
}
Toronto Multi-Region Infrastructure - Design and deploy a complete multi-region infrastructure for a Toronto-based SaaS platform with disaster recovery and compliance requirements.
GitHub Showcase: Demonstrate infrastructure-as-code, CI/CD pipelines, monitoring dashboards, and automation scripts with real Toronto business scenarios.
Toronto DevOps Community: DevOps Toronto Meetup, Toronto Kubernetes Meetup, AWS User Group Toronto, and SRE Toronto.
Join Toronto's thriving DevOps community and build the infrastructure that powers Canada's digital transformation.