From 94b4f31102913f40afb97956fe969d9f44cfd813 Mon Sep 17 00:00:00 2001 From: Christian van Dijk Date: Mon, 23 Feb 2026 09:47:16 +0100 Subject: [PATCH] =?UTF-8?q?=F0=9F=8E=89=20initial=20commit?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.example | 24 + .github/workflows/ci.yml | 144 ++++ .gitignore | 2 + Makefile | 9 + Readme.md | 0 devices-api/Dockerfile | 16 + devices-api/Makefile | 39 + devices-api/app-standalone.lua | 724 ++++++++++++++++++ devices-api/db.lua | 124 +++ devices-api/log.lua | 16 + devices-api/migrations/001_create_devices.sql | 69 ++ devices-worker/Dockerfile | 13 + devices-worker/Makefile | 4 + devices-worker/db.lua | 33 + devices-worker/handlers/device_handler.lua | 44 ++ devices-worker/handlers/rating_handler.lua | 45 ++ devices-worker/handlers/review_handler.lua | 42 + devices-worker/log.lua | 15 + devices-worker/worker.lua | 111 +++ docker-compose.yml | 109 +++ frontend/.dockerignore | 8 + frontend/.idea/.gitignore | 10 + .../.idea/copilot.data.migration.agent.xml | 6 + frontend/.idea/copilot.data.migration.ask.xml | 6 + .../copilot.data.migration.ask2agent.xml | 6 + .../.idea/copilot.data.migration.edit.xml | 6 + frontend/.idea/encodings.xml | 4 + frontend/.idea/frontend.iml | 8 + frontend/.idea/modules.xml | 8 + frontend/Dockerfile | 28 + frontend/Makefile | 7 + frontend/alpinejs/.vscode/launch.json | 35 + frontend/alpinejs/.vscode/settings.json | 5 + frontend/alpinejs/config.json | 4 + frontend/alpinejs/deno.json | 8 + frontend/alpinejs/deno.lock | 120 +++ frontend/alpinejs/main.ts | 68 ++ frontend/alpinejs/public/alpine.min.js | 5 + frontend/alpinejs/public/config.js | 1 + frontend/alpinejs/public/favicon.ico | Bin 0 -> 699 bytes frontend/alpinejs/public/index.html | 14 + frontend/alpinejs/public/main.js | 449 +++++++++++ frontend/alpinejs/public/style.css | 432 +++++++++++ k8s/handheld-devices/Chart.yaml | 6 + .../templates/api-deployment.yaml | 61 ++ k8s/handheld-devices/templates/configmap.yaml | 14 + .../templates/frontend-deployment.yaml | 57 ++ k8s/handheld-devices/templates/ingress.yaml | 31 + k8s/handheld-devices/templates/postgres.yaml | 73 ++ k8s/handheld-devices/templates/redis.yaml | 37 + k8s/handheld-devices/templates/secret.yaml | 9 + .../templates/worker-deployment.yaml | 31 + k8s/handheld-devices/values.yaml | 80 ++ 53 files changed, 3220 insertions(+) create mode 100644 .env.example create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 Readme.md create mode 100644 devices-api/Dockerfile create mode 100644 devices-api/Makefile create mode 100644 devices-api/app-standalone.lua create mode 100644 devices-api/db.lua create mode 100644 devices-api/log.lua create mode 100644 devices-api/migrations/001_create_devices.sql create mode 100644 devices-worker/Dockerfile create mode 100644 devices-worker/Makefile create mode 100644 devices-worker/db.lua create mode 100644 devices-worker/handlers/device_handler.lua create mode 100644 devices-worker/handlers/rating_handler.lua create mode 100644 devices-worker/handlers/review_handler.lua create mode 100644 devices-worker/log.lua create mode 100644 devices-worker/worker.lua create mode 100644 docker-compose.yml create mode 100644 frontend/.dockerignore create mode 100644 frontend/.idea/.gitignore create mode 100644 frontend/.idea/copilot.data.migration.agent.xml create mode 100644 frontend/.idea/copilot.data.migration.ask.xml create mode 100644 frontend/.idea/copilot.data.migration.ask2agent.xml create mode 100644 frontend/.idea/copilot.data.migration.edit.xml create mode 100644 frontend/.idea/encodings.xml create mode 100644 frontend/.idea/frontend.iml create mode 100644 frontend/.idea/modules.xml create mode 100644 frontend/Dockerfile create mode 100644 frontend/Makefile create mode 100644 frontend/alpinejs/.vscode/launch.json create mode 100644 frontend/alpinejs/.vscode/settings.json create mode 100644 frontend/alpinejs/config.json create mode 100644 frontend/alpinejs/deno.json create mode 100644 frontend/alpinejs/deno.lock create mode 100644 frontend/alpinejs/main.ts create mode 100644 frontend/alpinejs/public/alpine.min.js create mode 100644 frontend/alpinejs/public/config.js create mode 100644 frontend/alpinejs/public/favicon.ico create mode 100644 frontend/alpinejs/public/index.html create mode 100644 frontend/alpinejs/public/main.js create mode 100644 frontend/alpinejs/public/style.css create mode 100644 k8s/handheld-devices/Chart.yaml create mode 100644 k8s/handheld-devices/templates/api-deployment.yaml create mode 100644 k8s/handheld-devices/templates/configmap.yaml create mode 100644 k8s/handheld-devices/templates/frontend-deployment.yaml create mode 100644 k8s/handheld-devices/templates/ingress.yaml create mode 100644 k8s/handheld-devices/templates/postgres.yaml create mode 100644 k8s/handheld-devices/templates/redis.yaml create mode 100644 k8s/handheld-devices/templates/secret.yaml create mode 100644 k8s/handheld-devices/templates/worker-deployment.yaml create mode 100644 k8s/handheld-devices/values.yaml diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..fe81c9d --- /dev/null +++ b/.env.example @@ -0,0 +1,24 @@ +# Copy this file to .env and fill in values. Never commit .env to version control. +# Used by: docker-compose (when env_file is set), local development + +# API +API_PORT=8080 + +# PostgreSQL (used by API and worker) +DB_HOST=postgres +DB_PORT=5432 +DB_NAME=handheld_devices +DB_USER=devices_user +DB_PASSWORD=your_db_password_here + +# Redis +REDIS_HOST=redis +REDIS_PORT=6379 + +# Frontend (browser connects to API) +API_URL=http://localhost:8080 + +# Optional: DB pool and timeouts +# DB_POOL_SIZE=10 +# DB_CONNECT_TIMEOUT_MS=5000 +# DB_QUERY_TIMEOUT_MS=10000 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..feaf2ef --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,144 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Lua + uses: leafo/gh-actions-luarocks@v11 + with: + lua-version: "5.4" + + - name: Install luacheck + run: luarocks install luacheck + + - name: Luacheck devices-api + run: luacheck devices-api --codes + continue-on-error: true + + - name: Luacheck devices-worker + run: luacheck devices-worker --codes + continue-on-error: true + + - name: Setup Deno + uses: denoland/setup-deno@v2 + with: + deno-version: v2 + + - name: Deno lint frontend + run: cd frontend/alpinejs && deno lint main.ts + continue-on-error: true + + build: + name: Build + runs-on: ubuntu-latest + needs: lint + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build API image + uses: docker/build-push-action@v6 + with: + context: ./devices-api + push: false + load: true + tags: handheld-devices-api:${{ github.sha }} + + - name: Build worker image + uses: docker/build-push-action@v6 + with: + context: ./devices-worker + push: false + load: true + tags: handheld-devices-worker:${{ github.sha }} + + - name: Build frontend image + uses: docker/build-push-action@v6 + with: + context: ./frontend + push: false + load: true + tags: handheld-devices-frontend:${{ github.sha }} + + test: + name: Test + runs-on: ubuntu-latest + needs: build + services: + postgres: + image: postgres:15-alpine + env: + POSTGRES_DB: handheld_devices + POSTGRES_USER: devices_user + POSTGRES_PASSWORD: devices_password + ports: + - 5432:5432 + options: >- + --health-cmd "pg_isready -U devices_user -d handheld_devices" + --health-interval 5s + --health-timeout 5s + --health-retries 5 + + redis: + image: redis:7-alpine + ports: + - 6379:6379 + options: >- + --health-cmd "redis-cli ping" + --health-interval 5s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Lua + uses: leafo/gh-actions-luarocks@v11 + with: + lua-version: "5.4" + + - name: Install dependencies + run: | + luarocks install lua-cjson + luarocks install luasocket + luarocks install pgmoon + luarocks install redis-lua + luarocks install luaossl + + - name: Run API + run: | + cd devices-api + export DB_HOST=localhost DB_PORT=5432 DB_NAME=handheld_devices DB_USER=devices_user DB_PASSWORD=devices_password + export REDIS_HOST=localhost REDIS_PORT=6379 + lua app-standalone.lua & + API_PID=$! + sleep 15 + curl -f http://localhost:8080/health/ready || (kill $API_PID 2>/dev/null; exit 1) + curl -f http://localhost:8080/health/live || (kill $API_PID 2>/dev/null; exit 1) + kill $API_PID 2>/dev/null || true + + helm-lint: + name: Helm Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: "v3.14.0" + + - name: Helm lint + run: helm lint k8s/handheld-devices diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e408f6c --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.env +postgres_data/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..706d609 --- /dev/null +++ b/Makefile @@ -0,0 +1,9 @@ +.PHONY: generate + +install-requirements: + brew install protobuf lua luarocks && luarocks install protobuf && luarocks install grpc + +verify-install: + protoc --version + lua -v + luarocks list | grep protobuf diff --git a/Readme.md b/Readme.md new file mode 100644 index 0000000..e69de29 diff --git a/devices-api/Dockerfile b/devices-api/Dockerfile new file mode 100644 index 0000000..2ec124b --- /dev/null +++ b/devices-api/Dockerfile @@ -0,0 +1,16 @@ +FROM nickblah/lua:5.4-luarocks-alpine + +RUN apk add --no-cache gcc musl-dev make libpq git curl wget linux-headers pkgconfig + +RUN luarocks install lua-cjson +RUN luarocks install luasocket +RUN luarocks install pgmoon +RUN luarocks install redis-lua +RUN luarocks install luaossl + +WORKDIR /app +COPY . /app + +EXPOSE 8080 + +CMD ["lua", "app-standalone.lua"] diff --git a/devices-api/Makefile b/devices-api/Makefile new file mode 100644 index 0000000..4df2da1 --- /dev/null +++ b/devices-api/Makefile @@ -0,0 +1,39 @@ +.PHONY: init-db dev build clean logs down + +init-db: + docker-compose exec postgres psql -U devices_user -d handheld_devices -f /docker-entrypoint-initdb.d/001_create_devices.sql + +build: + docker-compose build + +dev: + docker-compose up -d + +logs: + docker-compose logs -f api + +logs-worker: + docker-compose logs -f worker + +logs-all: + docker-compose logs -f + +down: + docker-compose down + +down-volumes: + docker-compose down -v + +shell-postgres: + docker-compose exec postgres psql -U devices_user -d handheld_devices + +shell-api: + docker-compose exec api sh + +shell-worker: + docker-compose exec worker sh + +clean: down-volumes + +status: + docker-compose ps diff --git a/devices-api/app-standalone.lua b/devices-api/app-standalone.lua new file mode 100644 index 0000000..df29572 --- /dev/null +++ b/devices-api/app-standalone.lua @@ -0,0 +1,724 @@ +#!/usr/bin/env lua + +local socket = require("socket") +local cjson = require("cjson") +local db = require("db") +local log = require("log") + +-- Optional dependencies +local redis +pcall(function() redis = require("redis") end) + +local digest +pcall(function() digest = require("openssl.digest") end) + +local app = {} +app.port = tonumber(os.getenv("API_PORT")) or 8080 +app.host = "0.0.0.0" + +-- Database configuration (from db.lua) +local DB_HOST = os.getenv("DB_HOST") or "localhost" +local DB_PORT = tonumber(os.getenv("DB_PORT")) or 5432 +local DB_NAME = os.getenv("DB_NAME") or "handheld_devices" + +-- Redis configuration +local REDIS_HOST = os.getenv("REDIS_HOST") or "127.0.0.1" +local REDIS_PORT = tonumber(os.getenv("REDIS_PORT")) or 6379 + +-- Redis client with retry +local function get_redis_connection() + if not redis then return nil end + local attempts = 3 + for i = 1, attempts do + local ok, red = pcall(redis.connect, REDIS_HOST, REDIS_PORT) + if ok and red then return red end + if i < attempts then + socket.sleep(math.min(2 ^ i * 0.1, 2)) + end + end + return nil +end + +-- Redis ping for health +local function redis_ping() + local red = get_redis_connection() + if not red then return false end + local ok, res = pcall(red.ping, red) + if ok and res == "PONG" then return true end + return false +end + +-- Ensure tables exist +local function init_db() + local ok, err = db.with_retry(function() + return db.with_connection(function(conn) + conn:query([[ + CREATE TABLE IF NOT EXISTS devices ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + manufacturer VARCHAR(255) NOT NULL, + release_year INTEGER, + cpu VARCHAR(255), + ram_mb INTEGER, + storage_mb INTEGER, + display_size VARCHAR(50), + battery_hours REAL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + + CREATE TABLE IF NOT EXISTS ratings ( + id SERIAL PRIMARY KEY, + device_id INTEGER NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + user_id VARCHAR(255) NOT NULL, + score INTEGER CHECK (score >= 1 AND score <= 5), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + + CREATE TABLE IF NOT EXISTS reviews ( + id SERIAL PRIMARY KEY, + device_id INTEGER NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + user_id VARCHAR(255) NOT NULL, + content TEXT NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + ]]) + return true + end) + end) + if not ok then + error("Failed to init DB: " .. tostring(err)) + end +end + +-- Seed initial data +local function seed_db() + db.with_connection(function(conn) + local res, err = conn:query("SELECT COUNT(*) as total FROM devices") + local count = 0 + if res and res[1] then + count = tonumber(res[1].total) or 0 + end + + if count == 0 then + log.info("Seeding initial devices", { component = "seed" }) + local devices = { + { + name = "Steam Deck", + manufacturer = "Valve", + release_year = 2022, + cpu = "AMD Zen 2", + ram_mb = 16384, + storage_mb = 524288, + display_size = "7-inch", + battery_hours = 4.0 + }, + { + name = "Nintendo Switch", + manufacturer = "Nintendo", + release_year = 2017, + cpu = "Nvidia Tegra X1", + ram_mb = 4096, + storage_mb = 32768, + display_size = "6.2-inch", + battery_hours = 5.5 + }, + { + name = "ROG Ally", + manufacturer = "ASUS", + release_year = 2023, + cpu = "AMD Ryzen Z1 Extreme", + ram_mb = 16384, + storage_mb = 524288, + display_size = "7-inch", + battery_hours = 3.5 + } + } + + for _, device in ipairs(devices) do + conn:query( + "INSERT INTO devices (name, manufacturer, release_year, cpu, ram_mb, storage_mb, display_size, battery_hours) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)", + device.name, + device.manufacturer, + device.release_year, + device.cpu, + device.ram_mb, + device.storage_mb, + device.display_size, + device.battery_hours + ) + end + log.info("Seeding completed", { component = "seed" }) + end + end) +end + +-- Publish to Redis if available (with retry) +local function publish_event(event_type, data, request_id) + for attempt = 1, 3 do + local red = get_redis_connection() + if red then + local event = { + event_type = event_type, + timestamp = os.time(), + request_id = request_id, + } + for k, v in pairs(data) do + event[k] = v + end + + local event_json = cjson.encode(event) + + -- Push to queue (worker will consume from this using reliable pattern) + red:lpush("devices:events:queue", event_json) + + -- Also publish to Pub/Sub for immediate processing (WebSockets) + local count = red:publish("devices:events", event_json) + + log.info("Event published", { event_type = event_type, subscribers = count, request_id = request_id }) + red:quit() + return + end + if attempt < 3 then socket.sleep(math.min(2 ^ attempt * 0.1, 2)) end + end + log.warn("Failed to publish event after retries", { event_type = event_type, request_id = request_id }) +end + + +-- Helper to check if a value is JSON null +local function is_json_null(val) + return val == nil or val == cjson.null +end + +-- WebSocket Utils +local function sha1(data) + if digest then + return digest.new("sha1"):final(data) + end + -- Fallback: if luaossl is not available, we can't do a proper handshake + -- In a production app, we should ensure it is available. + -- NOTE: This fallback doesn't actually produce a SHA1 hash, it just returns data. + -- The WebSocket handshake will fail if digest is not available. + print("[WS] Warning: openssl.digest not available, SHA1 handshake will fail") + return data +end + +local function b64(data) + -- Minimal Base64 + local b='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' + return ((data:gsub('.', function(x) + local r,b='',x:byte() + for i=8,1,-1 do r=r..(b%2^i-b%2^(i-1)>0 and '1' or '0') end + return r; + end)..'0000'):gsub('%d%d%d?%d?%d?%d?', function(x) + if (#x < 6) then return '' end + local c=0 + for i=1,6 do c=c+(x:sub(i,i)=='1' and 2^(6-i) or 0) end + return b:sub(c+1,c+1) + end)..({ '', '==', '=' })[#data%3+1]) +end + +local function encode_ws_frame(payload) + local header = string.char(0x81) -- FIN + Opcode 1 (text) + local len = #payload + if len <= 125 then + header = header .. string.char(len) + elseif len <= 65535 then + header = header .. string.char(126) .. string.char(math.floor(len / 256)) .. string.char(len % 256) + else + -- 64-bit length not implemented for simplicity + header = header .. string.char(127) .. string.rep(string.char(0), 4) .. + string.char(math.floor(len / 16777216) % 256) .. + string.char(math.floor(len / 65536) % 256) .. + string.char(math.floor(len / 256) % 256) .. + string.char(len % 256) + end + return header .. payload +end + +-- Device Model +local Device = {} + +function Device.all(limit, offset) + limit = limit or 10 + offset = offset or 0 + + local res = db.with_connection(function(conn) + return conn:query("SELECT * FROM devices ORDER BY id DESC LIMIT $1 OFFSET $2", limit, offset) + end) + return res or {} +end + +function Device.find(id) + local cache_key = "device:" .. id + local red = get_redis_connection() + + if red then + local cached = red:get(cache_key) + if cached then + return cjson.decode(cached) + end + end + + local res = db.with_connection(function(conn) + return conn:query("SELECT * FROM devices WHERE id = $1", tonumber(id)) + end) + local row = res and res[1] or nil + + if row and red then + red:setex(cache_key, 300, cjson.encode(row)) + end + + return row +end + +function Device.create(data, request_id) + local res = db.with_connection(function(conn) + return conn:query( + "INSERT INTO devices (name, manufacturer, release_year, cpu, ram_mb, storage_mb, display_size, battery_hours) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id", + data.name, + data.manufacturer, + (not is_json_null(data.release_year)) and tonumber(data.release_year) or nil, + (not is_json_null(data.cpu)) and data.cpu or nil, + (not is_json_null(data.ram_mb)) and tonumber(data.ram_mb) or nil, + (not is_json_null(data.storage_mb)) and tonumber(data.storage_mb) or nil, + (not is_json_null(data.display_size)) and data.display_size or nil, + (not is_json_null(data.battery_hours)) and tonumber(data.battery_hours) or nil + ) + end) + local row = res and res[1] + local device_id = row and tonumber(row.id) or nil + + if device_id then + publish_event("DevicePublished", { device_id = device_id, device_name = data.name }, request_id) + return Device.find(device_id) + end + + return nil +end + +function Device.update(id, data, request_id) + local updated = false + db.with_connection(function(conn) + local updates = {} + local pg = conn + if not is_json_null(data.name) then table.insert(updates, "name = " .. pg:escape_literal(data.name)) end + if not is_json_null(data.manufacturer) then table.insert(updates, "manufacturer = " .. pg:escape_literal(data.manufacturer)) end + if not is_json_null(data.release_year) then table.insert(updates, "release_year = " .. tonumber(data.release_year)) end + if not is_json_null(data.cpu) then table.insert(updates, "cpu = " .. pg:escape_literal(data.cpu)) end + if not is_json_null(data.ram_mb) then table.insert(updates, "ram_mb = " .. tonumber(data.ram_mb)) end + if not is_json_null(data.storage_mb) then table.insert(updates, "storage_mb = " .. tonumber(data.storage_mb)) end + if not is_json_null(data.display_size) then table.insert(updates, "display_size = " .. pg:escape_literal(data.display_size)) end + if not is_json_null(data.battery_hours) then table.insert(updates, "battery_hours = " .. tonumber(data.battery_hours)) end + + if #updates > 0 then + table.insert(updates, "updated_at = CURRENT_TIMESTAMP") + conn:query("UPDATE devices SET " .. table.concat(updates, ", ") .. " WHERE id = $1", tonumber(id)) + updated = true + end + end) + + local red = get_redis_connection() + if red then + red:del("device:" .. id) + end + + local device = Device.find(id) + if device and updated then + publish_event("DeviceUpdated", { device_id = tonumber(id), device_name = device.name }, request_id) + end + return device +end + +function Device.delete(id, request_id) + local device = Device.find(id) + db.with_connection(function(conn) + conn:query("DELETE FROM devices WHERE id = $1", tonumber(id)) + end) + + local red = get_redis_connection() + if red then + red:del("device:" .. id) + end + + if device then + publish_event("DeviceDeleted", { device_id = tonumber(id), device_name = device.name }, request_id) + end + + return true +end + +function Device.get_count() + local res = db.with_connection(function(conn) + return conn:query("SELECT COUNT(*) as total FROM devices") + end) + if res and res[1] then + return tonumber(res[1].total) or 0 + end + return 0 +end + +-- HTTP Request Parser +local function parse_request(request_line) + local method, path, version = request_line:match("^(%w+)%s+(%S+)%s+(%S+)") + return method, path, version +end + +local function parse_headers(client) + local headers = {} + while true do + -- Use a small timeout for individual header lines to handle slow clients + -- or cases where headers are partially sent. + client:settimeout(0.1) + local line, err = client:receive("*l") + if not line or line == "" then break end + local key, value = line:match("^([^:]+):%s*(.*)$") + if key then + headers[key:lower()] = value + end + end + client:settimeout(0) -- Back to non-blocking + return headers +end + +local function parse_query_string(query_str) + local params = {} + if query_str and query_str ~= "" then + for key, value in query_str:gmatch("([^=&]+)=([^&]*)") do + params[key] = value + end + end + return params +end + +local function parse_path(full_path) + local path, query = full_path:match("^([^?]+)%??(.*)$") + return path or full_path, query or "" +end + +local function generate_request_id() + return string.format("%x-%x-%x", math.random(0, 0xffff), math.random(0, 0xffff), os.time()) +end + +-- HTTP Response Builder +local function build_response(status, body, content_type, request_id) + content_type = content_type or "application/json" + local extra_headers = "" + if request_id then + extra_headers = "X-Request-ID: " .. request_id .. "\r\n" + end + local response = string.format( + "HTTP/1.1 %s\r\n" .. + "Content-Type: %s\r\n" .. + "Content-Length: %d\r\n" .. + "%s" .. + "Access-Control-Allow-Origin: *\r\n" .. + "Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS\r\n" .. + "Access-Control-Allow-Headers: Content-Type, X-Request-ID\r\n" .. + "Connection: close\r\n" .. + "\r\n" .. + "%s", + status, content_type, #body, extra_headers, body + ) + return response +end + +-- Route Handlers +local function handle_get_devices(query_params) + local page = tonumber(query_params.page) or 1 + local per_page = 10 + local offset = (page - 1) * per_page + + local devices = Device.all(per_page, offset) + local total = Device.get_count() + + return "200 OK", cjson.encode({ + data = devices, + total = total, + page = page, + per_page = per_page + }) +end + +local function handle_post_devices(json_data, request_id) + if not json_data.name or not json_data.manufacturer then + return "400 Bad Request", cjson.encode({ error = "Missing required fields: name, manufacturer" }) + end + + local device = Device.create(json_data, request_id) + if device then + return "201 Created", cjson.encode(device) + else + return "500 Internal Server Error", cjson.encode({ error = "Failed to create device" }) + end +end + +local function handle_get_device(id) + local device = Device.find(id) + if device then + return "200 OK", cjson.encode(device) + else + return "404 Not Found", cjson.encode({ error = "Device not found" }) + end +end + +local function handle_put_device(id, json_data, request_id) + local device = Device.update(id, json_data, request_id) + if device then + return "200 OK", cjson.encode(device) + else + return "404 Not Found", cjson.encode({ error = "Device not found" }) + end +end + +local function handle_delete_device(id, request_id) + Device.delete(id, request_id) + return "200 OK", cjson.encode({ success = true }) +end + +-- Health: liveness (process alive) +local function handle_health_live() + return "200 OK", cjson.encode({ status = "ok" }) +end + +-- Health: readiness (DB + Redis OK) +local function handle_health_ready() + local db_ok = db.ping() + local redis_ok = redis_ping() + if db_ok and redis_ok then + return "200 OK", cjson.encode({ status = "ok", db = "ok", redis = "ok" }) + end + local details = { db = db_ok and "ok" or "fail", redis = redis_ok and "ok" or "fail" } + return "503 Service Unavailable", cjson.encode({ status = "degraded", details = details }) +end + +-- Main Request Handler +local function handle_request_with_headers(client, request_line, headers) + local method, full_path = parse_request(request_line) + local path, query_str = parse_path(full_path) + local query_params = parse_query_string(query_str) + + local request_id = headers["x-request-id"] or generate_request_id() + + -- Read body if POST/PUT + local body = "" + if method == "POST" or method == "PUT" then + local content_length = tonumber(headers["content-length"]) or 0 + if content_length > 0 then + body = client:receive(content_length) + end + end + + -- Parse JSON + local json_data = {} + if body ~= "" then + local ok, data = pcall(cjson.decode, body) + if ok then json_data = data end + end + + -- Handle CORS preflight + if method == "OPTIONS" then + client:send(build_response("200 OK", "", nil, request_id)) + client:close() + return + end + + -- Route handling + local status, response_body = "404 Not Found", cjson.encode({ error = "Not found" }) + + if method == "GET" and path == "/devices" then + status, response_body = handle_get_devices(query_params) + elseif method == "POST" and path == "/devices" then + status, response_body = handle_post_devices(json_data, request_id) + elseif method == "GET" and path:match("^/devices/%d+$") then + local id = path:match("/devices/(%d+)") + status, response_body = handle_get_device(id) + elseif method == "PUT" and path:match("^/devices/%d+$") then + local id = path:match("/devices/(%d+)") + status, response_body = handle_put_device(id, json_data, request_id) + elseif method == "DELETE" and path:match("^/devices/%d+$") then + local id = path:match("/devices/(%d+)") + status, response_body = handle_delete_device(id, request_id) + elseif method == "GET" and path == "/health" then + status, response_body = "200 OK", cjson.encode({ status = "ok" }) + elseif method == "GET" and path == "/health/live" then + status, response_body = handle_health_live() + elseif method == "GET" and path == "/health/ready" then + status, response_body = handle_health_ready() + end + + log.info("Request", { + method = method, + path = path, + status = status:match("^(%d+)") or "?", + request_id = request_id, + }) + + -- Send response + local response = build_response(status, response_body, nil, request_id) + client:send(response) + client:close() +end + +-- Start server +function app.start() + log.info("Handheld Devices API Server starting", { + component = "api", + port = app.port, + db = DB_NAME .. "@" .. DB_HOST .. ":" .. DB_PORT, + redis = redis and "enabled" or "disabled", + }) + + init_db() + seed_db() + + local server = socket.bind(app.host, app.port) + if not server then + error("Failed to bind to " .. app.host .. ":" .. app.port) + end + server:settimeout(0) -- Non-blocking + + log.info("Server started successfully", { component = "api" }) + + local clients = {} + local ws_clients = {} + + -- Background Redis subscriber + local function connect_subscriber() + local red = get_redis_connection() + if red then + -- Note: redis-lua's subscribe() sets the connection into a subscription state. + local ok, err = pcall(function() return red:subscribe("devices:events") end) + if ok then + -- Set the underlying socket to a very small timeout for non-blocking feel + pcall(function() + if red.network and red.network.socket then + red.network.socket:settimeout(0.001) + end + end) + log.info("Redis subscriber connected", { component = "redis" }) + else + log.error("Redis subscriber failed", { component = "redis", err = tostring(err) }) + red = nil + end + end + return red + end + + local red_sub = connect_subscriber() + local last_sub_reconnect = os.time() + + while true do + local client = server:accept() + if client then + client:settimeout(0) + table.insert(clients, { socket = client }) + end + + -- Reconnect subscriber if lost + if not red_sub and os.time() - last_sub_reconnect > 5 then + red_sub = connect_subscriber() + last_sub_reconnect = os.time() + end + + -- Handle HTTP clients + local to_remove = {} + for i, c in ipairs(clients) do + local line, err = c.socket:receive("*l") + if line then + local method, full_path = parse_request(line) + local headers = parse_headers(c.socket) + + if headers["upgrade"] == "websocket" then + -- Handle WebSocket Handshake + local key = headers["sec-websocket-key"] + if key then + local sha1_key = sha1(key .. "258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + local accept = b64(sha1_key) + local response = "HTTP/1.1 101 Switching Protocols\r\n" .. + "Upgrade: websocket\r\n" .. + "Connection: Upgrade\r\n" .. + "Sec-WebSocket-Accept: " .. accept .. "\r\n\r\n" + c.socket:send(response) + table.insert(ws_clients, c.socket) + log.info("WebSocket client connected", { component = "ws", total = #ws_clients }) + else + c.socket:close() + end + table.insert(to_remove, i) + else + handle_request_with_headers(c.socket, line, headers) + table.insert(to_remove, i) + end + elseif err == "closed" then + table.insert(to_remove, i) + end + end + for i = #to_remove, 1, -1 do table.remove(clients, to_remove[i]) end + + -- Handle Redis Messages -> Send to WS + -- redis-lua has no read_reply; pub/sub messages come as multibulk: [kind, channel, payload] + if red_sub then + local ok, err = pcall(function() + local sock = red_sub.network.socket + if not sock then return end + sock:settimeout(0.001) + local line, serr = sock:receive("*l") + if not line then + if serr ~= "timeout" then + error(serr or "socket read failed") + end + return + end + -- Parse RESP: *3\r\n means 3-element array + local prefix = line:sub(1, 1) + if prefix == "*" then + local count = tonumber(line:sub(2)) + if count and count >= 3 then + local parts = {} + for i = 1, count do + local bline, berr = sock:receive("*l") + if not bline then error(berr or "incomplete") end + if bline:sub(1, 1) == "$" then + local len = tonumber(bline:sub(2)) + local bulk = len > 0 and sock:receive(len + 2) or "" + if bulk then + parts[i] = len > 0 and bulk:sub(1, -3) or "" + end + end + end + -- Pub/sub message: ["message", channel, payload] + if parts[1] == "message" and parts[3] then + local payload = parts[3] + log.info("WebSocket broadcast", { component = "ws", payload_len = #tostring(payload) }) + local frame = encode_ws_frame(payload) + local closed_ws = {} + for i, ws in ipairs(ws_clients) do + local _, send_err = ws:send(frame) + if send_err then table.insert(closed_ws, i) end + end + for i = #closed_ws, 1, -1 do table.remove(ws_clients, closed_ws[i]) end + end + end + end + end) + + if not ok then + local err_str = tostring(err) + if not string.find(err_str, "timeout") and not string.find(err_str, "closed") then + log.error("Redis subscriber error", { component = "redis", err = err_str }) + red_sub = nil + end + end + end + + socket.sleep(0.01) + end +end + +-- Run if executed directly +if arg[0]:match("app%-standalone") then + app.start() +end + +return app diff --git a/devices-api/db.lua b/devices-api/db.lua new file mode 100644 index 0000000..3ba17b0 --- /dev/null +++ b/devices-api/db.lua @@ -0,0 +1,124 @@ +-- PostgreSQL connection pool using pgmoon +local pgmoon = require("pgmoon") +local cjson = require("cjson") + +local DB_HOST = os.getenv("DB_HOST") or "localhost" +local DB_PORT = tonumber(os.getenv("DB_PORT")) or 5432 +local DB_NAME = os.getenv("DB_NAME") or "handheld_devices" +local DB_USER = os.getenv("DB_USER") or "devices_user" +local DB_PASSWORD = os.getenv("DB_PASSWORD") or "devices_password" +local DB_POOL_SIZE = tonumber(os.getenv("DB_POOL_SIZE")) or 10 +local DB_CONNECT_TIMEOUT_MS = tonumber(os.getenv("DB_CONNECT_TIMEOUT_MS")) or 5000 +local DB_QUERY_TIMEOUT_MS = tonumber(os.getenv("DB_QUERY_TIMEOUT_MS")) or 10000 + +local config = { + host = DB_HOST, + port = tostring(DB_PORT), + database = DB_NAME, + user = DB_USER, + password = DB_PASSWORD, + socket_type = "luasocket", +} + +local pool = { + available = {}, + in_use = {}, + max_size = DB_POOL_SIZE, +} + +local function create_connection() + local pg = pgmoon.new(config) + pg:settimeout(DB_CONNECT_TIMEOUT_MS) + local ok, err = pg:connect() + if not ok then + return nil, err + end + pg:settimeout(DB_QUERY_TIMEOUT_MS) + return pg +end + +local function get_connection() + local conn = table.remove(pool.available) + if conn then + table.insert(pool.in_use, conn) + return conn + end + if #pool.in_use >= pool.max_size then + return nil, "connection pool exhausted" + end + local pg, err = create_connection() + if not pg then + return nil, err + end + table.insert(pool.in_use, pg) + return pg +end + +local function release_connection(conn) + for i, c in ipairs(pool.in_use) do + if c == conn then + table.remove(pool.in_use, i) + if #pool.available < pool.max_size then + table.insert(pool.available, conn) + else + pcall(function() conn:disconnect() end) + end + return + end + end +end + +-- Execute with connection from pool; auto-release on return +local function with_connection(fn) + local conn, err = get_connection() + if not conn then + return nil, err + end + local ok, result, result_err = pcall(function() + return fn(conn) + end) + release_connection(conn) + if not ok then + return nil, result + end + return result, result_err +end + +-- Retry with exponential backoff +local function with_retry(fn, max_attempts) + max_attempts = max_attempts or 3 + local attempt = 0 + local last_err + while attempt < max_attempts do + attempt = attempt + 1 + local result, err = fn() + if result ~= nil or (err and not (err:match("connection") or err:match("timeout"))) then + return result, err + end + last_err = err + if attempt < max_attempts then + local delay = math.min(2 ^ attempt * 100, 5000) + require("socket").sleep(delay / 1000) + end + end + return nil, last_err +end + +local function ping() + return with_connection(function(conn) + local res, err = conn:query("SELECT 1") + if res and type(res) == "table" and (res[1] or #res >= 1) then + return true + end + return nil, err or "ping failed" + end) +end + +return { + config = config, + get_connection = get_connection, + release_connection = release_connection, + with_connection = with_connection, + with_retry = with_retry, + ping = ping, +} diff --git a/devices-api/log.lua b/devices-api/log.lua new file mode 100644 index 0000000..cc6369e --- /dev/null +++ b/devices-api/log.lua @@ -0,0 +1,16 @@ +-- Structured JSON logging +local cjson = require("cjson") + +local function log(level, msg, fields) + fields = fields or {} + fields.level = level + fields.msg = msg + fields.time = os.date("!%Y-%m-%dT%H:%M:%SZ") + print(cjson.encode(fields)) +end + +return { + info = function(msg, fields) log("info", msg, fields) end, + warn = function(msg, fields) log("warn", msg, fields) end, + error = function(msg, fields) log("error", msg, fields) end, +} diff --git a/devices-api/migrations/001_create_devices.sql b/devices-api/migrations/001_create_devices.sql new file mode 100644 index 0000000..d5b6ccb --- /dev/null +++ b/devices-api/migrations/001_create_devices.sql @@ -0,0 +1,69 @@ +-- Create extensions +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + +-- Create devices table +CREATE TABLE IF NOT EXISTS devices ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + manufacturer VARCHAR(255) NOT NULL, + release_year INTEGER, + cpu VARCHAR(255), + ram_mb INTEGER, + storage_mb INTEGER, + display_size VARCHAR(50), + battery_hours REAL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create ratings table +CREATE TABLE IF NOT EXISTS ratings ( + id SERIAL PRIMARY KEY, + device_id INTEGER NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + user_id VARCHAR(255) NOT NULL, + score INTEGER CHECK (score >= 1 AND score <= 5), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create reviews table +CREATE TABLE IF NOT EXISTS reviews ( + id SERIAL PRIMARY KEY, + device_id INTEGER NOT NULL REFERENCES devices(id) ON DELETE CASCADE, + user_id VARCHAR(255) NOT NULL, + content TEXT NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create device_events table for worker logging +CREATE TABLE IF NOT EXISTS device_events ( + id SERIAL PRIMARY KEY, + device_id INTEGER, + device_name VARCHAR(255), + event_type VARCHAR(100), + processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create rating_events table +CREATE TABLE IF NOT EXISTS rating_events ( + id SERIAL PRIMARY KEY, + device_id INTEGER, + user_id VARCHAR(255), + score INTEGER, + processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create review_events table +CREATE TABLE IF NOT EXISTS review_events ( + id SERIAL PRIMARY KEY, + device_id INTEGER, + user_id VARCHAR(255), + processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create indexes for better performance +CREATE INDEX IF NOT EXISTS idx_devices_name ON devices(name); +CREATE INDEX IF NOT EXISTS idx_devices_manufacturer ON devices(manufacturer); +CREATE INDEX IF NOT EXISTS idx_ratings_device_id ON ratings(device_id); +CREATE INDEX IF NOT EXISTS idx_ratings_user_id ON ratings(user_id); +CREATE INDEX IF NOT EXISTS idx_reviews_device_id ON reviews(device_id); +CREATE INDEX IF NOT EXISTS idx_reviews_user_id ON reviews(user_id); diff --git a/devices-worker/Dockerfile b/devices-worker/Dockerfile new file mode 100644 index 0000000..b2fe4e6 --- /dev/null +++ b/devices-worker/Dockerfile @@ -0,0 +1,13 @@ +FROM nickblah/lua:5.4-luarocks-alpine + +RUN apk add --no-cache gcc musl-dev make libpq git curl procps linux-headers pkgconfig + +RUN luarocks install lua-cjson +RUN luarocks install luasocket +RUN luarocks install pgmoon +RUN luarocks install redis-lua + +WORKDIR /app +COPY . /app + +CMD ["lua", "worker.lua"] diff --git a/devices-worker/Makefile b/devices-worker/Makefile new file mode 100644 index 0000000..e8e0bd5 --- /dev/null +++ b/devices-worker/Makefile @@ -0,0 +1,4 @@ +.PHONY: run + +run: + lua worker.lua diff --git a/devices-worker/db.lua b/devices-worker/db.lua new file mode 100644 index 0000000..2fcc5f1 --- /dev/null +++ b/devices-worker/db.lua @@ -0,0 +1,33 @@ +-- PostgreSQL using pgmoon (worker: one connection per process is fine) +local pgmoon = require("pgmoon") + +local DB_HOST = os.getenv("DB_HOST") or "localhost" +local DB_PORT = tonumber(os.getenv("DB_PORT")) or 5432 +local DB_NAME = os.getenv("DB_NAME") or "handheld_devices" +local DB_USER = os.getenv("DB_USER") or "devices_user" +local DB_PASSWORD = os.getenv("DB_PASSWORD") or "devices_password" +local DB_CONNECT_TIMEOUT_MS = tonumber(os.getenv("DB_CONNECT_TIMEOUT_MS")) or 5000 + +local config = { + host = DB_HOST, + port = tostring(DB_PORT), + database = DB_NAME, + user = DB_USER, + password = DB_PASSWORD, + socket_type = "luasocket", +} + +local function get_connection() + local pg = pgmoon.new(config) + pg:settimeout(DB_CONNECT_TIMEOUT_MS) + local ok, err = pg:connect() + if not ok then + return nil, err + end + return pg +end + +return { + get_connection = get_connection, + config = config, +} diff --git a/devices-worker/handlers/device_handler.lua b/devices-worker/handlers/device_handler.lua new file mode 100644 index 0000000..bd46062 --- /dev/null +++ b/devices-worker/handlers/device_handler.lua @@ -0,0 +1,44 @@ +local db = require("db") +local log = require("log") + +local DeviceHandler = {} + +function DeviceHandler.handle(event) + local conn, err = db.get_connection() + if not conn then + error("Database connection failed: " .. tostring(err)) + end + + local ok, handler_err = pcall(function() + conn:query([[ + CREATE TABLE IF NOT EXISTS device_events ( + id SERIAL PRIMARY KEY, + device_id INTEGER, + device_name VARCHAR(255), + event_type VARCHAR(100), + processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ]]) + + conn:query( + "INSERT INTO device_events (device_id, device_name, event_type) VALUES ($1, $2, $3)", + tonumber(event.device_id) or 0, + event.device_name or "", + event.event_type + ) + end) + + conn:disconnect() + + if not ok then + error(handler_err) + end + + log.info("Device event logged", { + component = "device_handler", + device_name = event.device_name, + request_id = event.request_id, + }) +end + +return DeviceHandler diff --git a/devices-worker/handlers/rating_handler.lua b/devices-worker/handlers/rating_handler.lua new file mode 100644 index 0000000..8809b63 --- /dev/null +++ b/devices-worker/handlers/rating_handler.lua @@ -0,0 +1,45 @@ +local db = require("db") +local log = require("log") + +local RatingHandler = {} + +function RatingHandler.handle(event) + local conn, err = db.get_connection() + if not conn then + error("Database connection failed: " .. tostring(err)) + end + + local ok, handler_err = pcall(function() + conn:query([[ + CREATE TABLE IF NOT EXISTS rating_events ( + id SERIAL PRIMARY KEY, + device_id INTEGER, + user_id VARCHAR(255), + score INTEGER, + processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ]]) + + conn:query( + "INSERT INTO rating_events (device_id, user_id, score) VALUES ($1, $2, $3)", + tonumber(event.device_id) or 0, + event.user_id or "", + tonumber(event.score) or 0 + ) + end) + + conn:disconnect() + + if not ok then + error(handler_err) + end + + log.info("Rating event logged", { + component = "rating_handler", + device_id = event.device_id, + score = event.score, + request_id = event.request_id, + }) +end + +return RatingHandler diff --git a/devices-worker/handlers/review_handler.lua b/devices-worker/handlers/review_handler.lua new file mode 100644 index 0000000..594aa62 --- /dev/null +++ b/devices-worker/handlers/review_handler.lua @@ -0,0 +1,42 @@ +local db = require("db") +local log = require("log") + +local ReviewHandler = {} + +function ReviewHandler.handle(event) + local conn, err = db.get_connection() + if not conn then + error("Database connection failed: " .. tostring(err)) + end + + local ok, handler_err = pcall(function() + conn:query([[ + CREATE TABLE IF NOT EXISTS review_events ( + id SERIAL PRIMARY KEY, + device_id INTEGER, + user_id VARCHAR(255), + processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ]]) + + conn:query( + "INSERT INTO review_events (device_id, user_id) VALUES ($1, $2)", + tonumber(event.device_id) or 0, + event.user_id or "" + ) + end) + + conn:disconnect() + + if not ok then + error(handler_err) + end + + log.info("Review event logged", { + component = "review_handler", + device_id = event.device_id, + request_id = event.request_id, + }) +end + +return ReviewHandler diff --git a/devices-worker/log.lua b/devices-worker/log.lua new file mode 100644 index 0000000..d5e3f7f --- /dev/null +++ b/devices-worker/log.lua @@ -0,0 +1,15 @@ +local cjson = require("cjson") + +local function log(level, msg, fields) + fields = fields or {} + fields.level = level + fields.msg = msg + fields.time = os.date("!%Y-%m-%dT%H:%M:%SZ") + print(cjson.encode(fields)) +end + +return { + info = function(msg, fields) log("info", msg, fields) end, + warn = function(msg, fields) log("warn", msg, fields) end, + error = function(msg, fields) log("error", msg, fields) end, +} diff --git a/devices-worker/worker.lua b/devices-worker/worker.lua new file mode 100644 index 0000000..c1d7639 --- /dev/null +++ b/devices-worker/worker.lua @@ -0,0 +1,111 @@ +local redis = require("redis") +local cjson = require("cjson") +local socket = require("socket") +local log = require("log") + +local REDIS_HOST = os.getenv("REDIS_HOST") or "127.0.0.1" +local REDIS_PORT = tonumber(os.getenv("REDIS_PORT")) or 6379 +local MAX_RETRIES = tonumber(os.getenv("WORKER_MAX_RETRIES")) or 3 +local DLQ_KEY = "devices:events:dlq" + +local device_handler = require("handlers.device_handler") +local rating_handler = require("handlers.rating_handler") +local review_handler = require("handlers.review_handler") + +local handlers = { + DevicePublished = device_handler.handle, + DeviceDeleted = device_handler.handle, + DeviceUpdated = device_handler.handle, + RatingPublished = rating_handler.handle, + ReviewPublished = review_handler.handle, + UserCreated = function(event) + log.info("User created", { component = "worker", user_id = event.user_id }) + end, +} + +local function move_to_dlq(red, event_json, reason) + red:lpush(DLQ_KEY, cjson.encode({ + event = event_json, + reason = reason, + failed_at = os.time(), + })) + log.warn("Event moved to DLQ", { reason = reason, dlq_key = DLQ_KEY }) +end + +local function process_event(red, event_json) + local ok_decode, event = pcall(cjson.decode, event_json) + if not ok_decode or not event then + return false, "decode_failed" + end + + local handler = handlers[event.event_type] + if not handler then + return true, nil -- Acknowledge unknown event types + end + + local attempt = 0 + local last_err + + while attempt < MAX_RETRIES do + attempt = attempt + 1 + local success, handler_err = pcall(handler, event) + if success then + return true, nil + end + last_err = tostring(handler_err) + if attempt < MAX_RETRIES then + local delay = math.min(2 ^ attempt * 0.5, 10) + log.warn("Handler failed, retrying", { + component = "worker", + attempt = attempt, + max_retries = MAX_RETRIES, + delay = delay, + err = last_err, + }) + socket.sleep(delay) + end + end + + return false, last_err +end + +local function run_worker() + while true do + local ok, err = pcall(function() + log.info("Connecting to Redis", { component = "worker" }) + + local red = redis.connect(REDIS_HOST, REDIS_PORT) + log.info("Connected to Redis, waiting for events", { component = "worker" }) + + while true do + local event_json = red:brpoplpush("devices:events:queue", "devices:events:processing", 0) + + if event_json then + local success, reason = process_event(red, event_json) + + if success then + red:lrem("devices:events:processing", 1, event_json) + log.info("Event processed", { component = "worker" }) + else + -- Move to DLQ and remove from processing + move_to_dlq(red, event_json, reason) + red:lrem("devices:events:processing", 1, event_json) + end + end + end + end) + + if not ok then + log.error("Worker error", { component = "worker", err = tostring(err) }) + log.info("Reconnecting in 5 seconds", { component = "worker" }) + socket.sleep(5) + end + end +end + +log.info("Handheld Devices Worker starting", { + component = "worker", + redis = REDIS_HOST .. ":" .. REDIS_PORT, +}) + +run_worker() diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..f6a6772 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,109 @@ +services: + api: + build: + context: ./devices-api + dockerfile: Dockerfile + container_name: handheld-api + ports: + - "8080:8080" + volumes: + - ./devices-api:/app + env_file: + - .env + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + environment: + - DB_HOST=postgres + - DB_PORT=5432 + - DB_NAME=handheld_devices + - DB_USER=devices_user + - DB_PASSWORD=devices_password + - REDIS_HOST=redis + - REDIS_PORT=6379 + healthcheck: + test: ["CMD-SHELL", "wget -q -O - http://localhost:8080/health/ready || exit 1"] + interval: 10s + timeout: 5s + retries: 3 + start_period: 10s + + postgres: + image: postgres:15-alpine + container_name: handheld-postgres + ports: + - "5432:5432" + environment: + POSTGRES_DB: handheld_devices + POSTGRES_USER: devices_user + POSTGRES_PASSWORD: devices_password + volumes: + - ./postgres_data:/var/lib/postgresql/data + - ./devices-api/migrations/001_create_devices.sql:/docker-entrypoint-initdb.d/001_create_devices.sql + healthcheck: + test: ["CMD-SHELL", "pg_isready -U devices_user -d handheld_devices"] + interval: 5s + timeout: 3s + retries: 10 + + redis: + image: redis:7-alpine + container_name: handheld-redis + ports: + - "6379:6379" + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + + frontend: + build: + context: ./frontend + dockerfile: Dockerfile + container_name: handheld-frontend + ports: + - "8090:8090" + env_file: + - .env + environment: + - API_URL=http://localhost:8080 + depends_on: + api: + condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "wget -q -O - http://localhost:8090/health || exit 1"] + interval: 10s + timeout: 5s + retries: 3 + start_period: 5s + + worker: + build: + context: ./devices-worker + dockerfile: Dockerfile + container_name: handheld-worker + volumes: + - ./devices-worker:/app + env_file: + - .env + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + environment: + - DB_HOST=postgres + - DB_PORT=5432 + - DB_NAME=handheld_devices + - DB_USER=devices_user + - DB_PASSWORD=devices_password + - REDIS_HOST=redis + - REDIS_PORT=6379 + healthcheck: + test: ["CMD-SHELL", "pgrep -f 'worker.lua' || exit 1"] + interval: 10s + timeout: 3s + retries: 3 diff --git a/frontend/.dockerignore b/frontend/.dockerignore new file mode 100644 index 0000000..9f07c89 --- /dev/null +++ b/frontend/.dockerignore @@ -0,0 +1,8 @@ +node_modules +npm-debug.log +.git +.gitignore +.env +.env.local +.DS_Store +README.md diff --git a/frontend/.idea/.gitignore b/frontend/.idea/.gitignore new file mode 100644 index 0000000..ab1f416 --- /dev/null +++ b/frontend/.idea/.gitignore @@ -0,0 +1,10 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Ignored default folder with query files +/queries/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml +# Editor-based HTTP Client requests +/httpRequests/ diff --git a/frontend/.idea/copilot.data.migration.agent.xml b/frontend/.idea/copilot.data.migration.agent.xml new file mode 100644 index 0000000..4ea72a9 --- /dev/null +++ b/frontend/.idea/copilot.data.migration.agent.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/frontend/.idea/copilot.data.migration.ask.xml b/frontend/.idea/copilot.data.migration.ask.xml new file mode 100644 index 0000000..7ef04e2 --- /dev/null +++ b/frontend/.idea/copilot.data.migration.ask.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/frontend/.idea/copilot.data.migration.ask2agent.xml b/frontend/.idea/copilot.data.migration.ask2agent.xml new file mode 100644 index 0000000..1f2ea11 --- /dev/null +++ b/frontend/.idea/copilot.data.migration.ask2agent.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/frontend/.idea/copilot.data.migration.edit.xml b/frontend/.idea/copilot.data.migration.edit.xml new file mode 100644 index 0000000..8648f94 --- /dev/null +++ b/frontend/.idea/copilot.data.migration.edit.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/frontend/.idea/encodings.xml b/frontend/.idea/encodings.xml new file mode 100644 index 0000000..df87cf9 --- /dev/null +++ b/frontend/.idea/encodings.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/frontend/.idea/frontend.iml b/frontend/.idea/frontend.iml new file mode 100644 index 0000000..c956989 --- /dev/null +++ b/frontend/.idea/frontend.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/frontend/.idea/modules.xml b/frontend/.idea/modules.xml new file mode 100644 index 0000000..f3d93d7 --- /dev/null +++ b/frontend/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..a59edaf --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,28 @@ +FROM denoland/deno:alpine + +RUN apk add --no-cache wget + +# The port the application will listen on +ARG PORT=8090 +ENV PORT=${PORT} + +WORKDIR /app + +# Copy the frontend files +# Since the Dockerfile is in the root and files are in alpinejs/, we copy from there +COPY alpinejs/ . + +# Ensure the deno user owns the directory so it can write public/config.js +RUN chown -R deno:deno /app + +USER deno + +# Cache dependencies +RUN deno cache main.ts + +# Expose the port +EXPOSE ${PORT} + +# Run the application +# We include --allow-write because main.ts writes public/config.js on startup +CMD ["run", "--allow-net", "--allow-read", "--allow-write", "--allow-env", "--allow-run", "main.ts"] diff --git a/frontend/Makefile b/frontend/Makefile new file mode 100644 index 0000000..68ad2b3 --- /dev/null +++ b/frontend/Makefile @@ -0,0 +1,7 @@ +.PHONY: run build + +build: + docker build -t frontend-alpinejs . + +run: + docker run -p 8090:8090 frontend-alpinejs diff --git a/frontend/alpinejs/.vscode/launch.json b/frontend/alpinejs/.vscode/launch.json new file mode 100644 index 0000000..dc529ed --- /dev/null +++ b/frontend/alpinejs/.vscode/launch.json @@ -0,0 +1,35 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Run app", + "type": "node", + "request": "launch", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "deno", + "runtimeArgs": [ + "run", + "--inspect", + "--allow-net", + "--allow-read", + "--allow-write", + "--allow-env", + "--watch", + "main.ts" + ], + "attachSimplePort": 9229, + "console": "integratedTerminal", + "serverReadyAction": { + "action": "startDebugging", + "pattern": "URL: http", + "name": "client" + } + }, + { + "name": "client", + "type": "chrome", + "request": "launch", + "url": "http://localhost:8000" + } + ] +} diff --git a/frontend/alpinejs/.vscode/settings.json b/frontend/alpinejs/.vscode/settings.json new file mode 100644 index 0000000..11bf469 --- /dev/null +++ b/frontend/alpinejs/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "deno.enable": true, + "deno.lint": true, + "deno.enablePaths": ["."] +} diff --git a/frontend/alpinejs/config.json b/frontend/alpinejs/config.json new file mode 100644 index 0000000..12e01f1 --- /dev/null +++ b/frontend/alpinejs/config.json @@ -0,0 +1,4 @@ +{ + "api_url": "http://localhost:8080", + "port": 8090 +} diff --git a/frontend/alpinejs/deno.json b/frontend/alpinejs/deno.json new file mode 100644 index 0000000..b34891c --- /dev/null +++ b/frontend/alpinejs/deno.json @@ -0,0 +1,8 @@ +{ + "tasks": { + "start": "deno run --allow-net --allow-read --allow-write --allow-env --allow-run --watch main.ts" + }, + "imports": { + "@dx/alpine-server": "jsr:@dx/alpine-server" + } +} diff --git a/frontend/alpinejs/deno.lock b/frontend/alpinejs/deno.lock new file mode 100644 index 0000000..42d6197 --- /dev/null +++ b/frontend/alpinejs/deno.lock @@ -0,0 +1,120 @@ +{ + "version": "5", + "specifiers": { + "jsr:@b-fuze/deno-dom@~0.1.56": "0.1.56", + "jsr:@dx/alpine-server@*": "0.1.13", + "jsr:@oak/commons@1": "1.0.1", + "jsr:@oak/oak@^17.2.0": "17.2.0", + "jsr:@std/assert@1": "1.0.16", + "jsr:@std/bytes@1": "1.0.6", + "jsr:@std/crypto@1": "1.0.5", + "jsr:@std/encoding@1": "1.0.10", + "jsr:@std/encoding@^1.0.10": "1.0.10", + "jsr:@std/fmt@^1.0.5": "1.0.9", + "jsr:@std/fmt@^1.0.9": "1.0.9", + "jsr:@std/fs@^1.0.11": "1.0.22", + "jsr:@std/http@1": "1.0.24", + "jsr:@std/internal@^1.0.12": "1.0.12", + "jsr:@std/io@~0.225.2": "0.225.2", + "jsr:@std/log@~0.224.14": "0.224.14", + "jsr:@std/media-types@1": "1.1.0", + "jsr:@std/path@1": "1.1.4", + "jsr:@std/path@^1.1.4": "1.1.4", + "npm:path-to-regexp@^6.3.0": "6.3.0" + }, + "jsr": { + "@b-fuze/deno-dom@0.1.56": { + "integrity": "8030e2dc1d8750f1682b53462ab893d9c3470f2287feecbe22f44a88c54ab148" + }, + "@dx/alpine-server@0.1.13": { + "integrity": "64e294e2064b76f8ebd7f8e08f4351068c27915bb4fc8cd8136a63d8bb517774", + "dependencies": [ + "jsr:@b-fuze/deno-dom", + "jsr:@oak/oak", + "jsr:@std/fmt@^1.0.9", + "jsr:@std/log", + "jsr:@std/path@^1.1.4" + ] + }, + "@oak/commons@1.0.1": { + "integrity": "889ff210f0b4292591721be07244ecb1b5c118742f5273c70cf30d7cd4184d0c", + "dependencies": [ + "jsr:@std/assert", + "jsr:@std/bytes", + "jsr:@std/crypto", + "jsr:@std/encoding@1", + "jsr:@std/http", + "jsr:@std/media-types" + ] + }, + "@oak/oak@17.2.0": { + "integrity": "938537a92fc7922a46a9984696c65fb189c9baad164416ac3e336768a9ff0cd1", + "dependencies": [ + "jsr:@oak/commons", + "jsr:@std/assert", + "jsr:@std/bytes", + "jsr:@std/http", + "jsr:@std/media-types", + "jsr:@std/path@1", + "npm:path-to-regexp" + ] + }, + "@std/assert@1.0.16": { + "integrity": "6a7272ed1eaa77defe76e5ff63ca705d9c495077e2d5fd0126d2b53fc5bd6532" + }, + "@std/bytes@1.0.6": { + "integrity": "f6ac6adbd8ccd99314045f5703e23af0a68d7f7e58364b47d2c7f408aeb5820a" + }, + "@std/crypto@1.0.5": { + "integrity": "0dcfbb319fe0bba1bd3af904ceb4f948cde1b92979ec1614528380ed308a3b40" + }, + "@std/encoding@1.0.10": { + "integrity": "8783c6384a2d13abd5e9e87a7ae0520a30e9f56aeeaa3bdf910a3eaaf5c811a1" + }, + "@std/fmt@1.0.9": { + "integrity": "2487343e8899fb2be5d0e3d35013e54477ada198854e52dd05ed0422eddcabe0" + }, + "@std/fs@1.0.22": { + "integrity": "de0f277a58a867147a8a01bc1b181d0dfa80bfddba8c9cf2bacd6747bcec9308" + }, + "@std/http@1.0.24": { + "integrity": "4dd59afd7cfd6e2e96e175b67a5a829b449ae55f08575721ec691e5d85d886d4", + "dependencies": [ + "jsr:@std/encoding@^1.0.10" + ] + }, + "@std/internal@1.0.12": { + "integrity": "972a634fd5bc34b242024402972cd5143eac68d8dffaca5eaa4dba30ce17b027" + }, + "@std/io@0.225.2": { + "integrity": "3c740cd4ee4c082e6cfc86458f47e2ab7cb353dc6234d5e9b1f91a2de5f4d6c7" + }, + "@std/log@0.224.14": { + "integrity": "257f7adceee3b53bb2bc86c7242e7d1bc59729e57d4981c4a7e5b876c808f05e", + "dependencies": [ + "jsr:@std/fmt@^1.0.5", + "jsr:@std/fs", + "jsr:@std/io" + ] + }, + "@std/media-types@1.1.0": { + "integrity": "c9d093f0c05c3512932b330e3cc1fe1d627b301db33a4c2c2185c02471d6eaa4" + }, + "@std/path@1.1.4": { + "integrity": "1d2d43f39efb1b42f0b1882a25486647cb851481862dc7313390b2bb044314b5", + "dependencies": [ + "jsr:@std/internal" + ] + } + }, + "npm": { + "path-to-regexp@6.3.0": { + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==" + } + }, + "workspace": { + "dependencies": [ + "jsr:@dx/alpine-server@*" + ] + } +} diff --git a/frontend/alpinejs/main.ts b/frontend/alpinejs/main.ts new file mode 100644 index 0000000..794c13b --- /dev/null +++ b/frontend/alpinejs/main.ts @@ -0,0 +1,68 @@ +import { AlpineApp } from '@dx/alpine-server'; +import { Router } from 'jsr:@oak/oak'; + +let config = { + api_url: Deno.env.get('API_URL') || 'http://localhost:8080', + port: parseInt(Deno.env.get('PORT') || '8090') +}; + +try { + const configFile = await Deno.readTextFile('./config.json'); + const fileConfig = JSON.parse(configFile); + config = { ...config, ...fileConfig }; +} catch (e) { + console.warn('Could not read config.json, using defaults', e.message); +} + +const app = new AlpineApp({ + app: { + dev: true, + staticFilesPath: './public', + }, + oak: { + listenOptions: { port: config.port }, + }, +}); + +const healthRouter = new Router(); +healthRouter.get('/health', (ctx) => { + ctx.response.body = { status: 'ok' }; + ctx.response.status = 200; +}); +app.append(healthRouter); + +app.use(async (ctx, next) => { + await next(); + const contentType = ctx.response.headers.get('content-type') || ''; + if (contentType.includes('text/html')) { + let csp = ctx.response.headers.get('Content-Security-Policy'); + if (!csp) { + // securityHeaders might not have set it yet if it runs after us in the call stack + // but in run(), securityHeaders is app.use()ed BEFORE user middlewares. + // So when we are here (after await next()), securityHeaders has already finished its await next() + // and set the headers. + csp = [ + "default-src 'self'", + "base-uri 'self'", + "object-src 'none'", + "frame-ancestors 'none'", + "script-src 'self' 'unsafe-eval'", + "style-src 'self'", + "img-src 'self' data:", + "font-src 'self'", + "connect-src 'self'", + "media-src 'self'", + ].join('; '); + } + const updatedCsp = csp.replace("connect-src 'self'", `connect-src 'self' ${config.api_url} ws:`); + ctx.response.headers.set('Content-Security-Policy', updatedCsp); + } +}); + +console.log(`URL: http://localhost:${config.port}`); +console.log(`API: ${config.api_url}`); + +// Create config.js file in public directory +await Deno.writeTextFile('./public/config.js', `window.APP_CONFIG = ${JSON.stringify(config)};`); + +await app.run(); diff --git a/frontend/alpinejs/public/alpine.min.js b/frontend/alpinejs/public/alpine.min.js new file mode 100644 index 0000000..8e339a2 --- /dev/null +++ b/frontend/alpinejs/public/alpine.min.js @@ -0,0 +1,5 @@ +(()=>{var ie=!1,oe=!1,Y=[],se=-1,ae=!1;function We(t){Bn(t)}function Ge(){ae=!0}function Je(){ae=!1,Xe()}function Bn(t){Y.includes(t)||Y.push(t),Xe()}function Ye(t){let e=Y.indexOf(t);e!==-1&&e>se&&Y.splice(e,1)}function Xe(){if(!oe&&!ie){if(ae)return;ie=!0,queueMicrotask(zn)}}function zn(){ie=!1,oe=!0;for(let t=0;tt.effect(e,{scheduler:r=>{ce?We(r):r()}}),le=t.raw}function ue(t){D=t}function tr(t){let e=()=>{};return[n=>{let i=D(n);return t._x_effects||(t._x_effects=new Set,t._x_runEffects=()=>{t._x_effects.forEach(o=>o())}),t._x_effects.add(i),e=()=>{i!==void 0&&(t._x_effects.delete(i),z(i))},i},()=>{e()}]}function Ot(t,e){let r=!0,n,i=D(()=>{let o=t();if(JSON.stringify(o),!r&&(typeof o=="object"||o!==n)){let s=n;queueMicrotask(()=>{e(o,s)})}n=o,r=!1});return()=>z(i)}async function er(t){Ge();try{await t(),await Promise.resolve()}finally{Je()}}var rr=[],nr=[],ir=[];function or(t){ir.push(t)}function it(t,e){typeof e=="function"?(t._x_cleanups||(t._x_cleanups=[]),t._x_cleanups.push(e)):(e=t,nr.push(e))}function Tt(t){rr.push(t)}function Rt(t,e,r){t._x_attributeCleanups||(t._x_attributeCleanups={}),t._x_attributeCleanups[e]||(t._x_attributeCleanups[e]=[]),t._x_attributeCleanups[e].push(r)}function fe(t,e){t._x_attributeCleanups&&Object.entries(t._x_attributeCleanups).forEach(([r,n])=>{(e===void 0||e.includes(r))&&(n.forEach(i=>i()),delete t._x_attributeCleanups[r])})}function sr(t){for(t._x_effects?.forEach(Ye);t._x_cleanups?.length;)t._x_cleanups.pop()()}var de=new MutationObserver(_e),pe=!1;function mt(){de.observe(document,{subtree:!0,childList:!0,attributes:!0,attributeOldValue:!0}),pe=!0}function me(){Hn(),de.disconnect(),pe=!1}var pt=[];function Hn(){let t=de.takeRecords();pt.push(()=>t.length>0&&_e(t));let e=pt.length;queueMicrotask(()=>{if(pt.length===e)for(;pt.length>0;)pt.shift()()})}function h(t){if(!pe)return t();me();let e=t();return mt(),e}var he=!1,Ct=[];function ar(){he=!0}function cr(){he=!1,_e(Ct),Ct=[]}function _e(t){if(he){Ct=Ct.concat(t);return}let e=[],r=new Set,n=new Map,i=new Map;for(let o=0;o{s.nodeType===1&&s._x_marker&&r.add(s)}),t[o].addedNodes.forEach(s=>{if(s.nodeType===1){if(r.has(s)){r.delete(s);return}s._x_marker||e.push(s)}})),t[o].type==="attributes")){let s=t[o].target,a=t[o].attributeName,c=t[o].oldValue,l=()=>{n.has(s)||n.set(s,[]),n.get(s).push({name:a,value:s.getAttribute(a)})},u=()=>{i.has(s)||i.set(s,[]),i.get(s).push(a)};s.hasAttribute(a)&&c===null?l():s.hasAttribute(a)?(u(),l()):u()}i.forEach((o,s)=>{fe(s,o)}),n.forEach((o,s)=>{rr.forEach(a=>a(s,o))});for(let o of r)e.some(s=>s.contains(o))||nr.forEach(s=>s(o));for(let o of e)o.isConnected&&ir.forEach(s=>s(o));e=null,r=null,n=null,i=null}function Mt(t){return I(H(t))}function P(t,e,r){return t._x_dataStack=[e,...H(r||t)],()=>{t._x_dataStack=t._x_dataStack.filter(n=>n!==e)}}function H(t){return t._x_dataStack?t._x_dataStack:typeof ShadowRoot=="function"&&t instanceof ShadowRoot?H(t.host):t.parentNode?H(t.parentNode):[]}function I(t){return new Proxy({objects:t},Kn)}var Kn={ownKeys({objects:t}){return Array.from(new Set(t.flatMap(e=>Object.keys(e))))},has({objects:t},e){return e==Symbol.unscopables?!1:t.some(r=>Object.prototype.hasOwnProperty.call(r,e)||Reflect.has(r,e))},get({objects:t},e,r){return e=="toJSON"?Vn:Reflect.get(t.find(n=>Reflect.has(n,e))||{},e,r)},set({objects:t},e,r,n){let i=t.find(s=>Object.prototype.hasOwnProperty.call(s,e))||t[t.length-1],o=Object.getOwnPropertyDescriptor(i,e);return o?.set&&o?.get?o.set.call(n,r)||!0:Reflect.set(i,e,r)}};function Vn(){return Reflect.ownKeys(this).reduce((e,r)=>(e[r]=Reflect.get(this,r),e),{})}function ot(t){let e=n=>typeof n=="object"&&!Array.isArray(n)&&n!==null,r=(n,i="")=>{Object.entries(Object.getOwnPropertyDescriptors(n)).forEach(([o,{value:s,enumerable:a}])=>{if(a===!1||s===void 0||typeof s=="object"&&s!==null&&s.__v_skip)return;let c=i===""?o:`${i}.${o}`;typeof s=="object"&&s!==null&&s._x_interceptor?n[o]=s.initialize(t,c,o):e(s)&&s!==n&&!(s instanceof Element)&&r(s,c)})};return r(t)}function Nt(t,e=()=>{}){let r={initialValue:void 0,_x_interceptor:!0,initialize(n,i,o){return t(this.initialValue,()=>Un(n,i),s=>ge(n,i,s),i,o)}};return e(r),n=>{if(typeof n=="object"&&n!==null&&n._x_interceptor){let i=r.initialize.bind(r);r.initialize=(o,s,a)=>{let c=n.initialize(o,s,a);return r.initialValue=c,i(o,s,a)}}else r.initialValue=n;return r}}function Un(t,e){return e.split(".").reduce((r,n)=>r[n],t)}function ge(t,e,r){if(typeof e=="string"&&(e=e.split(".")),e.length===1)t[e[0]]=r;else{if(e.length===0)throw error;return t[e[0]]||(t[e[0]]={}),ge(t[e[0]],e.slice(1),r)}}var lr={};function y(t,e){lr[t]=e}function U(t,e){let r=qn(e);return Object.entries(lr).forEach(([n,i])=>{Object.defineProperty(t,`$${n}`,{get(){return i(e,r)},enumerable:!1})}),t}function qn(t){let[e,r]=xe(t),n={interceptor:Nt,...e};return it(t,r),n}function ur(t,e,r,...n){try{return r(...n)}catch(i){st(i,t,e)}}function st(...t){return fr(...t)}var fr=Wn;function dr(t){fr=t}function Wn(t,e,r=void 0){t=Object.assign(t??{message:"No error message given."},{el:e,expression:r}),console.warn(`Alpine Expression Error: ${t.message} + +${r?'Expression: "'+r+`" + +`:""}`,e),setTimeout(()=>{throw t},0)}var at=!0;function kt(t){let e=at;at=!1;let r=t();return at=e,r}function N(t,e,r={}){let n;return x(t,e)(i=>n=i,r),n}function x(...t){return pr(...t)}var pr=be;function mr(t){pr=t}var hr;function _r(t){hr=t}function be(t,e){let r={};U(r,t);let n=[r,...H(t)],i=typeof e=="function"?Gn(n,e):Yn(n,e,t);return ur.bind(null,t,e,i)}function Gn(t,e){return(r=()=>{},{scope:n={},params:i=[],context:o}={})=>{if(!at){ht(r,e,I([n,...t]),i);return}let s=e.apply(I([n,...t]),i);ht(r,s)}}var ye={};function Jn(t,e){if(ye[t])return ye[t];let r=Object.getPrototypeOf(async function(){}).constructor,n=/^[\n\s]*if.*\(.*\)/.test(t.trim())||/^(let|const)\s/.test(t.trim())?`(async()=>{ ${t} })()`:t,o=(()=>{try{let s=new r(["__self","scope"],`with (scope) { __self.result = ${n} }; __self.finished = true; return __self.result;`);return Object.defineProperty(s,"name",{value:`[Alpine] ${t}`}),s}catch(s){return st(s,e,t),Promise.resolve()}})();return ye[t]=o,o}function Yn(t,e,r){let n=Jn(e,r);return(i=()=>{},{scope:o={},params:s=[],context:a}={})=>{n.result=void 0,n.finished=!1;let c=I([o,...t]);if(typeof n=="function"){let l=n.call(a,n,c).catch(u=>st(u,r,e));n.finished?(ht(i,n.result,c,s,r),n.result=void 0):l.then(u=>{ht(i,u,c,s,r)}).catch(u=>st(u,r,e)).finally(()=>n.result=void 0)}}}function ht(t,e,r,n,i){if(at&&typeof e=="function"){let o=e.apply(r,n);o instanceof Promise?o.then(s=>ht(t,s,r,n)).catch(s=>st(s,i,e)):t(o)}else typeof e=="object"&&e instanceof Promise?e.then(o=>t(o)):t(e)}function gr(...t){return hr(...t)}function xr(t,e,r={}){let n={};U(n,t);let i=[n,...H(t)],o=I([r.scope??{},...i]),s=r.params??[];if(e.includes("await")){let a=Object.getPrototypeOf(async function(){}).constructor,c=/^[\n\s]*if.*\(.*\)/.test(e.trim())||/^(let|const)\s/.test(e.trim())?`(async()=>{ ${e} })()`:e;return new a(["scope"],`with (scope) { let __result = ${c}; return __result }`).call(r.context,o)}else{let a=/^[\n\s]*if.*\(.*\)/.test(e.trim())||/^(let|const)\s/.test(e.trim())?`(()=>{ ${e} })()`:e,l=new Function(["scope"],`with (scope) { let __result = ${a}; return __result }`).call(r.context,o);return typeof l=="function"&&at?l.apply(o,s):l}}var ve="x-";function T(t=""){return ve+t}function yr(t){ve=t}var Dt={};function d(t,e){return Dt[t]=e,{before(r){if(!Dt[r]){console.warn(String.raw`Cannot find directive \`${r}\`. \`${t}\` will use the default order of execution`);return}let n=X.indexOf(r);X.splice(n>=0?n:X.indexOf("DEFAULT"),0,t)}}}function br(t){return Object.keys(Dt).includes(t)}function gt(t,e,r){if(e=Array.from(e),t._x_virtualDirectives){let o=Object.entries(t._x_virtualDirectives).map(([a,c])=>({name:a,value:c})),s=Se(o);o=o.map(a=>s.find(c=>c.name===a.name)?{name:`x-bind:${a.name}`,value:`"${a.value}"`}:a),e=e.concat(o)}let n={};return e.map(vr((o,s)=>n[o]=s)).filter(Ar).map(Zn(n,r)).sort(Qn).map(o=>Xn(t,o))}function Se(t){return Array.from(t).map(vr()).filter(e=>!Ar(e))}var we=!1,_t=new Map,wr=Symbol();function Er(t){we=!0;let e=Symbol();wr=e,_t.set(e,[]);let r=()=>{for(;_t.get(e).length;)_t.get(e).shift()();_t.delete(e)},n=()=>{we=!1,r()};t(r),n()}function xe(t){let e=[],r=a=>e.push(a),[n,i]=tr(t);return e.push(i),[{Alpine:K,effect:n,cleanup:r,evaluateLater:x.bind(x,t),evaluate:N.bind(N,t)},()=>e.forEach(a=>a())]}function Xn(t,e){let r=()=>{},n=Dt[e.type]||r,[i,o]=xe(t);Rt(t,e.original,o);let s=()=>{t._x_ignore||t._x_ignoreSelf||(n.inline&&n.inline(t,e,i),n=n.bind(n,t,e,i),we?_t.get(wr).push(n):n())};return s.runCleanups=o,s}var Pt=(t,e)=>({name:r,value:n})=>(r.startsWith(t)&&(r=r.replace(t,e)),{name:r,value:n}),It=t=>t;function vr(t=()=>{}){return({name:e,value:r})=>{let{name:n,value:i}=Sr.reduce((o,s)=>s(o),{name:e,value:r});return n!==e&&t(n,e),{name:n,value:i}}}var Sr=[];function ct(t){Sr.push(t)}function Ar({name:t}){return Or().test(t)}var Or=()=>new RegExp(`^${ve}([^:^.]+)\\b`);function Zn(t,e){return({name:r,value:n})=>{r===n&&(n="");let i=r.match(Or()),o=r.match(/:([a-zA-Z0-9\-_:]+)/),s=r.match(/\.[^.\]]+(?=[^\]]*$)/g)||[],a=e||t[r]||r;return{type:i?i[1]:null,value:o?o[1]:null,modifiers:s.map(c=>c.replace(".","")),expression:n,original:a}}}var Ee="DEFAULT",X=["ignore","ref","data","id","anchor","bind","init","for","model","modelable","transition","show","if",Ee,"teleport"];function Qn(t,e){let r=X.indexOf(t.type)===-1?Ee:t.type,n=X.indexOf(e.type)===-1?Ee:e.type;return X.indexOf(r)-X.indexOf(n)}function Z(t,e,r={}){t.dispatchEvent(new CustomEvent(e,{detail:r,bubbles:!0,composed:!0,cancelable:!0}))}function $(t,e){if(typeof ShadowRoot=="function"&&t instanceof ShadowRoot){Array.from(t.children).forEach(i=>$(i,e));return}let r=!1;if(e(t,()=>r=!0),r)return;let n=t.firstElementChild;for(;n;)$(n,e,!1),n=n.nextElementSibling}function w(t,...e){console.warn(`Alpine Warning: ${t}`,...e)}var Cr=!1;function Tr(){Cr&&w("Alpine has already been initialized on this page. Calling Alpine.start() more than once can cause problems."),Cr=!0,document.body||w("Unable to initialize. Trying to load Alpine before `` is available. Did you forget to add `defer` in Alpine's ` + + + + + + diff --git a/frontend/alpinejs/public/main.js b/frontend/alpinejs/public/main.js new file mode 100644 index 0000000..1084e0a --- /dev/null +++ b/frontend/alpinejs/public/main.js @@ -0,0 +1,449 @@ +// Inject the HTML before Alpine initializes its data components +if (!document.querySelector('.container')) { + document.body.innerHTML = ` +
+
+

🎮 Handheld Emulation Devices

+

Manage your collection of retro handheld devices

+
+ +
+ + +
+ + + + + +
+ + + + + + + + + + + + + + + + +
Device NameManufacturerYearCPURAMDisplayBatteryActions
+
+

No devices found. Try adding one!

+
+
+ + + + + +
+
+`; +} + +document.addEventListener('alpine:init', () => { + + Alpine.data('deviceManager', () => ({ + devices: [], + searchQuery: '', + showCreateForm: false, + showDetailsModal: false, + showToast: false, + toastMessage: '', + editingDevice: null, + selectedDevice: null, + + // Use config from server or default + apiBase: window.APP_CONFIG?.api_url || '', + + init() { + this.$watch('showCreateForm', value => { + const popover = document.getElementById('createFormPopover'); + if (value) { + try { popover.showPopover(); } catch(e) {} + } else { + try { popover.hidePopover(); } catch(e) {} + } + }); + this.$watch('showDetailsModal', value => { + const popover = document.getElementById('detailsModalPopover'); + if (value) { + try { popover.showPopover(); } catch(e) {} + } else { + try { popover.hidePopover(); } catch(e) {} + } + }); + this.$watch('showToast', value => { + const popover = document.getElementById('toastPopover'); + if (value) { + try { popover.showPopover(); } catch(e) {} + } else { + try { popover.hidePopover(); } catch(e) {} + } + }); + }, + + form: { + name: '', + manufacturer: '', + release_year: null, + cpu: '', + ram_mb: null, + storage_mb: null, + display_size: '', + battery_hours: null + }, + + get filteredDevices() { + return this.devices.filter(device => + device.name.toLowerCase().includes(this.searchQuery.toLowerCase()) || + device.manufacturer.toLowerCase().includes(this.searchQuery.toLowerCase()) + ); + }, + + async loadDevices() { + try { + const response = await fetch(this.apiBase + '/devices'); + const data = await response.json(); + console.log('Devices data:', data); + this.devices = Array.isArray(data.data) ? data.data : (Array.isArray(data) ? data : []); + } catch (error) { + this.showNotification('Failed to load devices'); + console.error(error); + } + }, + + connectWebSocket() { + const wsUrl = this.apiBase.replace('http', 'ws'); + console.log('Connecting to WebSocket:', wsUrl); + const socket = new WebSocket(wsUrl); + + socket.onmessage = (event) => { + const data = JSON.parse(event.data); + console.log('WS Message received:', data); + if (data.event_type === 'DevicePublished') { + this.showNotification(`New device: ${data.device_name}`); + this.loadDevices(); // Refresh list + } else if (data.event_type === 'DeviceUpdated') { + this.showNotification(`Device updated: ${data.device_name}`); + this.loadDevices(); // Refresh list + } else if (data.event_type === 'DeviceDeleted') { + this.showNotification(`Device removed: ${data.device_name}`); + // Optimistically remove from local list or just refresh + this.devices = this.devices.filter(d => d.id != data.device_id); + if (this.selectedDevice && this.selectedDevice.id == data.device_id) { + this.showDetailsModal = false; + } + } + }; + + socket.onclose = () => { + console.log('WS disconnected, retrying in 5s...'); + setTimeout(() => this.connectWebSocket(), 5000); + }; + + socket.onerror = (error) => { + console.error('WS Error:', error); + }; + }, + + async saveDevice() { + try { + const url = this.editingDevice + ? `${this.apiBase}/devices/${this.editingDevice.id}` + : `${this.apiBase}/devices`; + + const method = this.editingDevice ? 'PUT' : 'POST'; + + // Filter out null/undefined values and only send necessary data + const payload = { + name: this.form.name, + manufacturer: this.form.manufacturer, + release_year: this.form.release_year ? parseInt(this.form.release_year) : null, + cpu: this.form.cpu || null, + ram_mb: this.form.ram_mb ? parseInt(this.form.ram_mb) : null, + storage_mb: this.form.storage_mb ? parseInt(this.form.storage_mb) : null, + display_size: this.form.display_size || null, + battery_hours: this.form.battery_hours ? parseFloat(this.form.battery_hours) : null + }; + + const response = await fetch(url, { + method, + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + + if (response.ok) { + await this.loadDevices(); + this.resetForm(); + this.showCreateForm = false; + this.showNotification( + this.editingDevice ? 'Device updated successfully!' : 'Device created successfully!' + ); + } else { + const errorData = await response.json().catch(() => ({})); + this.showNotification(`Failed to save device: ${errorData.error || response.statusText}`); + } + } catch (error) { + this.showNotification('Failed to save device: Network error'); + console.error(error); + } + }, + + editDevice(device) { + this.editingDevice = device; + this.form = { ...device }; + this.showCreateForm = true; + this.showDetailsModal = false; + }, + + async deleteDevice(id) { + if (confirm('Are you sure you want to delete this device?')) { + try { + const response = await fetch(`${this.apiBase}/devices/${id}`, { + method: 'DELETE' + }); + + if (response.ok) { + await this.loadDevices(); + this.showNotification('Device deleted successfully!'); + } else { + this.showNotification('Failed to delete device'); + } + } catch (error) { + this.showNotification('Failed to delete device'); + console.error(error); + } + } + }, + + viewDevice(device) { + this.selectedDevice = device; + this.showDetailsModal = true; + }, + + resetForm() { + this.form = { + name: '', + manufacturer: '', + release_year: null, + cpu: '', + ram_mb: null, + storage_mb: null, + display_size: '', + battery_hours: null + }; + this.editingDevice = null; + }, + + showNotification(message) { + this.toastMessage = message; + this.showToast = true; + setTimeout(() => { + this.showToast = false; + }, 3000); + }, + + addNewDevice() { + this.resetForm(); + this.showCreateForm = true; + } + })); +}); diff --git a/frontend/alpinejs/public/style.css b/frontend/alpinejs/public/style.css new file mode 100644 index 0000000..9b3e68e --- /dev/null +++ b/frontend/alpinejs/public/style.css @@ -0,0 +1,432 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +:root { + --primary: #3b82f6; + --secondary: #6b7280; + --success: #10b981; + --danger: #ef4444; + --warning: #f59e0b; + --dark: #1f2937; + --light: #f3f4f6; + --border: #e5e7eb; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + min-height: 100vh; + padding: 20px; + color: var(--dark); +} + +.container { + max-width: 1200px; + margin: 0 auto; + background: white; + border-radius: 12px; + box-shadow: 0 20px 25px rgba(0, 0, 0, 0.1); + overflow: hidden; +} + +header { + background: linear-gradient(135deg, var(--primary) 0%, #667eea 100%); + color: white; + padding: 40px 20px; + text-align: center; +} + +header h1 { + font-size: 2.5em; + margin-bottom: 10px; +} + +.subtitle { + font-size: 1.1em; + opacity: 0.9; +} + +.controls { + display: flex; + gap: 15px; + padding: 20px; + background: var(--light); + border-bottom: 1px solid var(--border); + flex-wrap: wrap; +} + +.btn { + padding: 10px 20px; + border: none; + border-radius: 6px; + cursor: pointer; + font-size: 1em; + font-weight: 500; + transition: all 0.3s ease; +} + +.btn-primary { + background: var(--primary); + color: white; +} + +.btn-primary:hover { + background: #2563eb; + transform: translateY(-2px); + box-shadow: 0 4px 12px rgba(59, 130, 246, 0.4); +} + +.btn-success { + background: var(--success); + color: white; +} + +.btn-success:hover { + background: #059669; +} + +.btn-danger { + background: var(--danger); + color: white; +} + +.btn-danger:hover { + background: #dc2626; +} + +.btn-warning { + background: var(--warning); + color: white; +} + +.btn-warning:hover { + background: #d97706; +} + +.btn-secondary { + background: var(--secondary); + color: white; +} + +.btn-secondary:hover { + background: #4b5563; +} + +.btn-icon { + background: none; + border: none; + font-size: 1.2em; + cursor: pointer; + padding: 5px 10px; + transition: transform 0.2s; +} + +.btn-icon:hover { + transform: scale(1.2); +} + +.search-input { + padding: 10px 15px; + border: 1px solid var(--border); + border-radius: 6px; + font-size: 1em; + flex: 1; + min-width: 250px; + transition: border-color 0.3s; +} + +.search-input:focus { + outline: none; + border-color: var(--primary); + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1); +} + +.table-container { + overflow-x: auto; + padding: 20px; +} + +.devices-table { + width: 100%; + border-collapse: collapse; +} + +.devices-table thead { + background: var(--light); + border-bottom: 2px solid var(--border); +} + +.devices-table th { + padding: 15px; + text-align: left; + font-weight: 600; + color: var(--dark); +} + +.devices-table tbody tr { + border-bottom: 1px solid var(--border); + transition: background 0.2s; +} + +.devices-table tbody tr:hover { + background: #f9fafb; +} + +.devices-table td { + padding: 15px; +} + +.device-name { + cursor: pointer; + color: var(--primary); + text-decoration: none; + font-weight: 500; +} + +.device-name:hover { + text-decoration: underline; +} + +.small { + font-size: 0.9em; + color: var(--secondary); +} + +.actions { + display: flex; + gap: 10px; +} + +.empty-state { + text-align: center; + padding: 60px 20px; + color: var(--secondary); + font-size: 1.1em; +} + +/* Modal Styles */ +.modal { + padding: 0; + border: none; + background: transparent; + max-width: 100vw; + max-height: 100vh; + width: 100%; + height: 100%; + overflow: visible; +} + +.modal::backdrop { + background: rgba(0, 0, 0, 0.5); +} + +.modal:popover-open { + display: flex; + align-items: center; + justify-content: center; +} + +.modal-content { + background: white; + border-radius: 12px; + width: 90%; + max-width: 600px; + max-height: 90vh; + overflow-y: auto; + box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.25); +} + +@keyframes slideUp { + from { + transform: translateY(30px); + opacity: 0; + } + + to { + transform: translateY(0); + opacity: 1; + } +} + +.modal-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 20px; + border-bottom: 1px solid var(--border); + background: var(--light); +} + +.modal-header h2 { + font-size: 1.5em; +} + +.close-btn { + background: none; + border: none; + font-size: 2em; + cursor: pointer; + color: var(--secondary); + transition: color 0.2s; +} + +.close-btn:hover { + color: var(--dark); +} + +.form { + padding: 20px; +} + +.form-group { + margin-bottom: 20px; +} + +.form-row { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 20px; +} + +.form-group label { + display: block; + margin-bottom: 8px; + font-weight: 500; + color: var(--dark); +} + +.form-group input { + width: 100%; + padding: 10px 12px; + border: 1px solid var(--border); + border-radius: 6px; + font-size: 1em; + transition: border-color 0.3s; +} + +.form-group input:focus { + outline: none; + border-color: var(--primary); + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1); +} + +.form-actions { + display: flex; + gap: 10px; + justify-content: flex-end; + padding-top: 20px; + border-top: 1px solid var(--border); +} + +.device-details { + padding: 20px; +} + +.details-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 20px; + margin-bottom: 30px; +} + +.detail-item { + padding: 15px; + background: var(--light); + border-radius: 8px; +} + +.detail-item .label { + display: block; + font-weight: 600; + color: var(--secondary); + margin-bottom: 5px; + font-size: 0.9em; +} + +.detail-item span:last-child { + font-size: 1.1em; + color: var(--dark); +} + +.actions-modal { + display: flex; + gap: 10px; + justify-content: flex-end; + padding-top: 20px; + border-top: 1px solid var(--border); +} + +/* Toast Notification */ +.toast { + margin: 0; + padding: 15px 20px; + border: none; + position: fixed; + bottom: 20px; + right: 20px; + background: var(--success); + color: white; + border-radius: 6px; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15); + z-index: 2000; + max-width: 300px; +} + +.toast:popover-open { + display: block; + animation: toastIn 0.3s ease; +} + +@keyframes toastIn { + from { + transform: translateY(100px); + opacity: 0; + } + to { + transform: translateY(0); + opacity: 1; + } +} + +/* Responsive Design */ +@media (max-width: 768px) { + header h1 { + font-size: 1.8em; + } + + .controls { + flex-direction: column; + } + + .search-input { + min-width: auto; + } + + .form-row { + grid-template-columns: 1fr; + } + + .devices-table { + font-size: 0.9em; + } + + .devices-table th, + .devices-table td { + padding: 10px; + } + + .actions { + flex-direction: column; + } + + .details-grid { + grid-template-columns: 1fr; + } + + .modal-content { + width: 95%; + } +} diff --git a/k8s/handheld-devices/Chart.yaml b/k8s/handheld-devices/Chart.yaml new file mode 100644 index 0000000..6341bd9 --- /dev/null +++ b/k8s/handheld-devices/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: handheld-devices +description: Handheld Devices microservices - API, worker, frontend +type: application +version: 0.1.0 +appVersion: "0.1.0" diff --git a/k8s/handheld-devices/templates/api-deployment.yaml b/k8s/handheld-devices/templates/api-deployment.yaml new file mode 100644 index 0000000..85b1b34 --- /dev/null +++ b/k8s/handheld-devices/templates/api-deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Service +metadata: + name: api + labels: + app: api +spec: + ports: + - port: {{ .Values.api.service.port }} + targetPort: {{ .Values.api.service.port }} + name: http + selector: + app: api +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: api + labels: + app: api +spec: + replicas: {{ .Values.api.replicaCount }} + selector: + matchLabels: + app: api + template: + metadata: + labels: + app: api + spec: + containers: + - name: api + image: "{{ .Values.api.image.repository }}:{{ .Values.api.image.tag }}" + imagePullPolicy: {{ .Values.api.image.pullPolicy }} + ports: + - containerPort: {{ .Values.api.service.port }} + envFrom: + - configMapRef: + name: handheld-devices-config + env: + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: handheld-devices-secrets + key: db-password + livenessProbe: + httpGet: + path: /health/live + port: {{ .Values.api.service.port }} + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: {{ .Values.api.service.port }} + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + resources: + {{- toYaml .Values.api.resources | nindent 12 }} diff --git a/k8s/handheld-devices/templates/configmap.yaml b/k8s/handheld-devices/templates/configmap.yaml new file mode 100644 index 0000000..c6a959b --- /dev/null +++ b/k8s/handheld-devices/templates/configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: handheld-devices-config + labels: + app.kubernetes.io/name: handheld-devices + app.kubernetes.io/instance: {{ .Release.Name }} +data: + DB_HOST: "postgres" + DB_PORT: "5432" + DB_NAME: "handheld_devices" + DB_USER: "devices_user" + REDIS_HOST: "redis" + REDIS_PORT: "6379" diff --git a/k8s/handheld-devices/templates/frontend-deployment.yaml b/k8s/handheld-devices/templates/frontend-deployment.yaml new file mode 100644 index 0000000..1cfde39 --- /dev/null +++ b/k8s/handheld-devices/templates/frontend-deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: frontend +spec: + ports: + - port: {{ .Values.frontend.service.port }} + targetPort: {{ .Values.frontend.service.port }} + name: http + selector: + app: frontend +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend + labels: + app: frontend +spec: + replicas: {{ .Values.frontend.replicaCount }} + selector: + matchLabels: + app: frontend + template: + metadata: + labels: + app: frontend + spec: + containers: + - name: frontend + image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}" + imagePullPolicy: {{ .Values.frontend.image.pullPolicy }} + ports: + - containerPort: {{ .Values.frontend.service.port }} + env: + - name: API_URL + value: {{ .Values.frontend.apiUrl | quote }} + - name: PORT + value: "{{ .Values.frontend.service.port }}" + livenessProbe: + httpGet: + path: /health + port: {{ .Values.frontend.service.port }} + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /health + port: {{ .Values.frontend.service.port }} + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 3 + resources: + {{- toYaml .Values.frontend.resources | nindent 12 }} diff --git a/k8s/handheld-devices/templates/ingress.yaml b/k8s/handheld-devices/templates/ingress.yaml new file mode 100644 index 0000000..e5fb59f --- /dev/null +++ b/k8s/handheld-devices/templates/ingress.yaml @@ -0,0 +1,31 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: handheld-devices-ingress + labels: + app.kubernetes.io/name: handheld-devices + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ .service }} + port: + number: {{ .port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/handheld-devices/templates/postgres.yaml b/k8s/handheld-devices/templates/postgres.yaml new file mode 100644 index 0000000..5b8715c --- /dev/null +++ b/k8s/handheld-devices/templates/postgres.yaml @@ -0,0 +1,73 @@ +{{- if .Values.postgres.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: postgres + labels: + app: postgres +spec: + ports: + - port: 5432 + targetPort: 5432 + name: postgres + selector: + app: postgres +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + labels: + app: postgres +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: postgres + podManagementPolicy: Parallel + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }} + env: + - name: POSTGRES_DB + value: handheld_devices + - name: POSTGRES_USER + value: devices_user + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: handheld-devices-secrets + key: db-password + ports: + - containerPort: 5432 + volumeMounts: + - name: postgres-data + mountPath: /var/lib/postgresql/data + volumes: + - name: postgres-data + {{- if .Values.postgres.persistence.enabled }} + persistentVolumeClaim: + claimName: postgres-pvc + {{- else }} + emptyDir: {} + {{- end }} +--- +{{- if .Values.postgres.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.postgres.persistence.size }} +{{- end }} +{{- end }} diff --git a/k8s/handheld-devices/templates/redis.yaml b/k8s/handheld-devices/templates/redis.yaml new file mode 100644 index 0000000..ba78b28 --- /dev/null +++ b/k8s/handheld-devices/templates/redis.yaml @@ -0,0 +1,37 @@ +{{- if .Values.redis.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: redis + labels: + app: redis +spec: + ports: + - port: 6379 + targetPort: 6379 + name: redis + selector: + app: redis +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + labels: + app: redis +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: {{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }} + ports: + - containerPort: 6379 +{{- end }} diff --git a/k8s/handheld-devices/templates/secret.yaml b/k8s/handheld-devices/templates/secret.yaml new file mode 100644 index 0000000..91f708a --- /dev/null +++ b/k8s/handheld-devices/templates/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: handheld-devices-secrets + labels: + app.kubernetes.io/name: handheld-devices + app.kubernetes.io/instance: {{ .Release.Name }} +data: + db-password: {{ .Values.dbPassword | b64enc }} diff --git a/k8s/handheld-devices/templates/worker-deployment.yaml b/k8s/handheld-devices/templates/worker-deployment.yaml new file mode 100644 index 0000000..87e2a45 --- /dev/null +++ b/k8s/handheld-devices/templates/worker-deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: worker + labels: + app: worker +spec: + replicas: {{ .Values.worker.replicaCount }} + selector: + matchLabels: + app: worker + template: + metadata: + labels: + app: worker + spec: + containers: + - name: worker + image: "{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag }}" + imagePullPolicy: {{ .Values.worker.image.pullPolicy }} + envFrom: + - configMapRef: + name: handheld-devices-config + env: + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: handheld-devices-secrets + key: db-password + resources: + {{- toYaml .Values.worker.resources | nindent 12 }} diff --git a/k8s/handheld-devices/values.yaml b/k8s/handheld-devices/values.yaml new file mode 100644 index 0000000..6ff8930 --- /dev/null +++ b/k8s/handheld-devices/values.yaml @@ -0,0 +1,80 @@ +api: + replicaCount: 2 + image: + repository: handheld-devices-api + tag: latest + pullPolicy: IfNotPresent + service: + port: 8080 + resources: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + +worker: + replicaCount: 1 + image: + repository: handheld-devices-worker + tag: latest + pullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + +frontend: + replicaCount: 1 + apiUrl: "http://localhost:8080" # Override for ingress: use external URL, e.g. https://handheld.example.com + image: + repository: handheld-devices-frontend + tag: latest + pullPolicy: IfNotPresent + service: + port: 8090 + resources: + limits: + cpu: 100m + memory: 64Mi + requests: + cpu: 50m + memory: 32Mi + +postgres: + enabled: true + image: + repository: postgres + tag: "15-alpine" + persistence: + enabled: true + size: 1Gi + +redis: + enabled: true + image: + repository: redis + tag: "7-alpine" + +ingress: + enabled: false + className: nginx + annotations: {} + hosts: + - host: handheld.example.com + paths: + - path: /api + pathType: Prefix + service: api + port: 8080 + - path: / + pathType: Prefix + service: frontend + port: 8090 + +# Secrets - override in production or use external secret manager +dbPassword: devices_password