Compare commits

..

20 Commits

Author SHA1 Message Date
7217b7491f more updates 2026-03-10 15:50:42 +00:00
575ff7d957 update message 2026-03-10 15:38:49 +00:00
e84f7a757f fix ex 2026-03-10 15:36:33 +00:00
d641e181ba fix 2026-03-10 15:34:22 +00:00
7635caa71d fix compression 2026-03-10 14:38:51 +00:00
3ffaf0cc4d check retention 2026-03-10 14:30:10 +00:00
7f0447f82c fix min filter 2026-03-10 14:20:50 +00:00
7fe564bb12 add statement 2026-03-10 14:12:50 +00:00
cc9ef27ba0 organize the recent checks 2026-03-10 14:10:34 +00:00
b8f097848f update trigger interval check 2026-03-10 14:01:42 +00:00
5d03ad2d45 Update README.md 2026-03-08 19:56:07 +00:00
ed6a38b8ea update jenkinfile 2026-03-07 17:36:57 +00:00
edf6a88a60 update 2026-03-07 06:08:22 +00:00
f9b82cd594 update repo 2026-03-07 06:03:25 +00:00
40a076e4e5 fix ssh key 2026-03-07 05:50:20 +00:00
4d1ade2b36 ssh creds 2026-03-07 05:42:47 +00:00
ee1b9692a3 fix ssh creds 2026-03-07 05:39:03 +00:00
129909ae13 fix reg2 2026-03-07 05:36:04 +00:00
aee168adbe fix registry 2026-03-07 05:33:12 +00:00
f97612d52e update gitignore 2026-03-07 05:32:15 +00:00
22 changed files with 811 additions and 84 deletions

View File

@@ -3,3 +3,15 @@
DOCKER_REGISTRY=docker.io
DOCKER_IMAGE=myorg/myapp
IMAGE_TAG=latest
# Required for authentication
SECRET_KEY=<random-32-byte-hex>
ADMIN_USER=admin # change this in production
ADMIN_PASSWORD=admin # change this in production
# Optional: check retention (limits DB growth)
# CHECK_RETENTION_COUNT=5000 # keep last N checks per service (default 5000)
# CHECK_RETENTION_DAYS=30 # also delete checks older than N days (0=disabled)
# Rollup: aggregate checks older than N hours into hourly buckets for 90+ day reporting
# ROLLUP_AGE_HOURS=24 # default 24; raw checks kept for this long, then rolled up

3
.gitignore vendored
View File

@@ -1,4 +1,5 @@
.env
*.pyc
__pycache__/
monitor.db
monitor.db
data/monitor.db

1
.venv/bin/python Symbolic link
View File

@@ -0,0 +1 @@
python3

1
.venv/bin/python3 Symbolic link
View File

@@ -0,0 +1 @@
/usr/bin/python3

1
.venv/bin/python3.12 Symbolic link
View File

@@ -0,0 +1 @@
python3

1
.venv/lib64 Symbolic link
View File

@@ -0,0 +1 @@
lib

5
.venv/pyvenv.cfg Normal file
View File

@@ -0,0 +1,5 @@
home = /usr/bin
include-system-site-packages = false
version = 3.12.3
executable = /usr/bin/python3.12
command = /usr/bin/python3 -m venv /home/ryanv/jenkins-docker-deploy-example/.venv

View File

@@ -13,6 +13,12 @@ COPY app/ app/
COPY templates/ templates/
COPY static/ static/
# Run as non-root user
RUN addgroup --system --gid 1000 appgroup && \
adduser --system --uid 1000 --gid 1000 --no-create-home appuser
RUN chown -R appuser:appgroup /app
USER appuser
EXPOSE 8080
CMD ["python", "-u", "app.py"]

9
Jenkinsfile vendored
View File

@@ -3,12 +3,12 @@ pipeline {
environment {
// Configure these in Jenkins or as pipeline parameters
DOCKER_REGISTRY = 'https://git.wrigglyt.xyz/'
DOCKER_REGISTRY = 'git.wrigglyt.xyz'
DOCKER_IMAGE = 'ryanv/myapp' // e.g., username/repo for Docker Hub
DEPLOY_HOST = '10.0.11.3'
DEPLOY_USER = 'ryanv'
DEPLOY_PATH = '/opt/myapp'
GIT_REPO_URL = 'https://github.com/myorg/jenkins-docker-deploy-example.git'
GIT_REPO_URL = 'https://git.wrigglyt.xyz/ryanv/jenkins-docker-deploy-example.git'
}
options {
@@ -47,6 +47,9 @@ pipeline {
stage('Deploy via SSH') {
steps {
script {
env.DEPLOY_BRANCH = env.BRANCH_NAME ?: 'main'
}
sshagent(credentials: ['deploy-ssh-key']) {
sh """
ssh -o StrictHostKeyChecking=no ${env.DEPLOY_USER}@${env.DEPLOY_HOST} << 'DEPLOY_EOF'
@@ -56,7 +59,7 @@ pipeline {
# Clone or pull the repo (contains docker-compose.yml)
if [ -d .git ]; then
git fetch origin
git reset --hard origin/${env.BRANCH_NAME}
git reset --hard origin/${env.DEPLOY_BRANCH}
else
git clone ${env.GIT_REPO_URL} .
fi

4
Makefile Normal file
View File

@@ -0,0 +1,4 @@
.PHONY: audit
audit:
pip install pip-audit
pip-audit

118
README.md
View File

@@ -1,6 +1,9 @@
# Status
[![Build Status](https://jenkins.wrigglyt.xyz/job/t2/badge/icon)](https://jenkins.wrigglyt.xyz/job/t2/)
# Jenkins Docker Deploy Example
A Statping-like status monitoring app that demonstrates a Jenkins pipeline for Docker build, push, and deploy. The app performs HTTP/HTTPS and TCP checks, stores history in SQLite, and provides a dashboard with reports.
A Statping-like status monitoring app that demonstrates a Jenkins pipeline for Docker build, push, and deploy. The app performs HTTP/HTTPS and TCP checks, stores history in SQLite, and provides a dashboard with reports. It may be useful for monitoring sites legitimately as well.
## App Features
@@ -8,6 +11,7 @@ A Statping-like status monitoring app that demonstrates a Jenkins pipeline for D
- **TCP checks** Verify connectivity to host:port
- **History storage** SQLite database persists check results
- **Reports** Uptime %, avg/min/max latency, recent check history
- **Authentication** Session-based login; multi-user with admin-managed accounts
## Repository Structure
@@ -31,20 +35,48 @@ A Statping-like status monitoring app that demonstrates a Jenkins pipeline for D
## Manual Test
```bash
# Build and run locally
# Build and run locally (set SECRET_KEY and ADMIN_* for auth)
docker build -t myapp:test .
docker run -p 8080:8080 -v $(pwd)/data:/app/data myapp:test
# Visit http://localhost:8080
docker run -p 8080:8080 -v $(pwd)/data:/app/data \
-e SECRET_KEY=dev-secret-change-in-production \
-e ADMIN_USER=admin -e ADMIN_PASSWORD=changeme \
myapp:test
# Visit http://localhost:8080 and log in
```
Add services from the dashboard (e.g. `https://example.com`, `google.com:443` for TCP) and view reports.
### Authentication
The app uses session-based authentication. On first run, if `ADMIN_USER` and `ADMIN_PASSWORD` are set and no users exist, an admin user is created. Admins can add more users at `/users`. Set `SECRET_KEY` to a random value (e.g. 32-byte hex) for production.
### Check Retention and Rollups
To limit database growth, the app **rolls up** old checks into hourly aggregates, then prunes raw data:
1. **Rollup** (every 15 min): Checks older than `ROLLUP_AGE_HOURS` are aggregated into hourly buckets (total, success count, latency stats) and stored in `uptime_rollups`. Raw checks in those hours are deleted.
2. **Prune**: Keeps last `CHECK_RETENTION_COUNT` raw checks per service; optionally deletes by age.
This lets you report accurate uptime over **90+ days** without storing millions of raw checks. Reports combine rollups (historical) + raw checks (recent).
| Env var | Default | Description |
|---------|---------|-------------|
| `ROLLUP_AGE_HOURS` | 24 | Aggregate checks older than N hours into hourly buckets |
| `CHECK_RETENTION_COUNT` | 5000 | Keep last N raw checks per service |
| `CHECK_RETENTION_DAYS` | 0 (disabled) | Also delete checks older than N days |
Example: keep 2000 raw checks per service and drop anything older than 30 days:
```bash
docker run -e CHECK_RETENTION_COUNT=2000 -e CHECK_RETENTION_DAYS=30 ...
```
## Jenkins Pipeline
The pipeline:
1. **Builds** a Docker image
2. **Pushes** the image to a container registry (Docker Hub, etc.)
2. **Pushes** the image to a container registry (Gitea, Docker Hub, etc.)
3. **SSHs** to a deployment machine
4. **Clones** (or pulls) this repo to get `docker-compose.yml`
5. **Deploys** with `docker compose up -d`
@@ -53,47 +85,75 @@ The pipeline:
**Jenkins**
- Docker installed and Jenkins user in `docker` group
- Pipeline and SSH Agent plugins
- Docker installed and Jenkins agent in `docker` group
- **Pipeline** and **SSH Agent** plugins
- Git for cloning
**Jenkins Credentials**
| ID | Type | Purpose |
|----|------|---------|
| `docker-registry-credentials` | Username/Password | Docker Hub or registry login |
| `deploy-ssh-key` | SSH Username with private key | SSH to deploy host |
**Deploy Host**
- Docker and Docker Compose installed
- SSH access for the deploy user
- If using a private registry: run `docker login` on the deploy host
### Configuration
### Adapting the Jenkinsfile for Your Own Repo
Edit the `environment` block in `Jenkinsfile`:
Edit the `environment` block in `Jenkinsfile` for your setup:
```groovy
environment {
DOCKER_REGISTRY = 'docker.io'
DOCKER_IMAGE = 'myorg/myapp'
DEPLOY_HOST = 'deploy-server.example.com'
DEPLOY_USER = 'deploy'
DEPLOY_PATH = '/opt/myapp'
GIT_REPO_URL = 'https://github.com/myorg/jenkins-docker-deploy-example.git'
}
```
| Variable | Description | Example |
|----------|-------------|---------|
| `DOCKER_REGISTRY` | Registry hostname (no `https://`) | `git.wrigglyt.xyz` or `docker.io` |
| `DOCKER_IMAGE` | Image path (org/repo) | `ryanv/myapp` |
| `DEPLOY_HOST` | Deploy server hostname or IP | `10.0.11.3` |
| `DEPLOY_USER` | SSH user on deploy host | `ryanv` |
| `DEPLOY_PATH` | Path on deploy host for this app | `/opt/myapp` |
| `GIT_REPO_URL` | Git repo URL (for deploy host to clone) | `https://git.wrigglyt.xyz/ryanv/myapp.git` |
**Credential IDs** Update these in the Jenkinsfile if you use different IDs:
| Credential ID | Type | Who sets it up |
|---------------|------|----------------|
| `gitea_credentials` | Username/Password | **Each user** your Gitea login for pushing images |
| `deploy-ssh-key` | SSH Username with private key | **Shared** one key for the deploy host, can be reused |
> **Shared deploy host:** If you share a deploy host (e.g. a home server), you can reuse the same `deploy-ssh-key` credential—no need to create your own. Each person **must** add their own Gitea credentials in Jenkins for their pipeline job (Manage Jenkins → Credentials → Add → Username with password, ID `gitea_credentials`). Use your Gitea username and an access token with package read/write for the registry.
### Jenkins Credentials Setup
1. **`deploy-ssh-key`** (shared for the deploy host)
- Kind: SSH Username with private key
- ID: `deploy-ssh-key`
- Username: matches `DEPLOY_USER`
- Private key: RSA key in PEM format (`ssh-keygen -t rsa -b 4096 -m PEM`)
- Public key must be in `~/.ssh/authorized_keys` on the deploy host
2. **`gitea_credentials`** (per user, for registry push)
- Kind: Username with password
- ID: `gitea_credentials` (or change `credentialsId` in the Jenkinsfile)
- Username: your Gitea username
- Password: your Gitea password or access token (token recommended)
### First-Time Deploy Host Setup
On the deploy host:
```bash
sudo mkdir -p /opt/myapp
sudo chown deploy:deploy /opt/myapp
sudo usermod -aG docker deploy
sudo chown ryanv:ryanv /opt/myapp
sudo usermod -aG docker ryanv
```
The `docker-compose.yml` mounts `./data:/app/data` for SQLite persistence. Ensure the deploy directory is writable.
If multiple users deploy to the same host, use separate paths (e.g. `/opt/myapp-alice`, `/opt/myapp-bob`) and update `docker-compose.yml` to use different ports for each app.
The `docker-compose.yml` mounts `./data:/app/data` for SQLite persistence. The container runs as UID 1000. Ensure the data directory is writable:
```bash
mkdir -p data
chown 1000:1000 data
```
### Dependency Audit
Before deploying, run `make audit` (or `pip-audit`) to check for known vulnerabilities in dependencies.
### Branch Behavior

View File

@@ -3,10 +3,42 @@ import os
from datetime import datetime, timedelta, timezone
from flask import Flask, redirect, render_template, request, url_for
from flask_login import LoginManager, current_user, login_required, login_user, logout_user
from app import models
def _is_safe_redirect_url(url: str | None) -> bool:
"""Check that redirect URL is relative to our app (prevents open redirect)."""
if not url:
return False
return url.startswith("/") and "//" not in url
class User:
"""Flask-Login compatible user wrapper."""
def __init__(self, user_dict: dict):
self.id = user_dict["id"]
self.username = user_dict["username"]
self.is_admin = bool(user_dict.get("is_admin", 0))
def get_id(self) -> str:
return str(self.id)
@property
def is_authenticated(self) -> bool:
return True
@property
def is_active(self) -> bool:
return True
@property
def is_anonymous(self) -> bool:
return False
def _parse_report_dates(from_ts, to_ts, preset):
"""Parse from/to dates, applying preset if given. Returns (from_ts, to_ts, from_display, to_display)."""
now = datetime.now(timezone.utc)
@@ -19,6 +51,9 @@ def _parse_report_dates(from_ts, to_ts, preset):
elif preset == "30d":
to_ts = now.isoformat()
from_ts = (now - timedelta(days=30)).isoformat()
elif preset == "90d":
to_ts = now.isoformat()
from_ts = (now - timedelta(days=90)).isoformat()
if from_ts and len(from_ts) == 10:
from_ts = from_ts + "T00:00:00"
if to_ts and len(to_ts) == 10:
@@ -33,22 +68,86 @@ app = Flask(
template_folder=os.path.join(ROOT, "templates"),
static_folder=os.path.join(ROOT, "static"),
)
app.config["SECRET_KEY"] = os.environ.get("SECRET_KEY", "dev-secret-change-in-production")
VERSION = os.environ.get("VERSION", "dev")
login_manager = LoginManager(app)
login_manager.login_view = "login"
login_manager.login_message = "Please log in to access this page."
@login_manager.user_loader
def load_user(user_id: str):
user_dict = models.get_user_by_id(int(user_id)) if user_id.isdigit() else None
return User(user_dict) if user_dict else None
@login_manager.unauthorized_handler
def unauthorized():
if request.is_json or request.accept_mimetypes.best == "application/json":
return {"error": "Authentication required"}, 401
return redirect(url_for("login", next=request.url))
@app.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for("dashboard"))
if request.method == "POST":
username = (request.form.get("username") or "").strip()
password = request.form.get("password") or ""
user_dict = models.verify_user(username, password)
if user_dict:
login_user(User(user_dict))
next_param = request.form.get("next") or request.args.get("next")
next_url = next_param if _is_safe_redirect_url(next_param) else url_for("dashboard")
return redirect(next_url)
return render_template("login.html", error="Invalid username or password", version=VERSION)
return render_template("login.html", version=VERSION)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("login"))
@app.route("/users", methods=["GET", "POST"])
@login_required
def users():
if not current_user.is_admin:
return "Forbidden", 403
if request.method == "POST":
username = (request.form.get("username") or "").strip()
password = request.form.get("password") or ""
user_id = models.create_user(username, password, is_admin=False)
if user_id:
return redirect(url_for("users"))
return render_template(
"users.html",
users=models.list_users(),
error="Username already exists or invalid",
version=VERSION,
)
return render_template("users.html", users=models.list_users(), version=VERSION)
@app.route("/")
@login_required
def dashboard():
services = models.list_services()
return render_template("dashboard.html", services=services, version=VERSION)
@app.route("/api/services", methods=["GET"])
@login_required
def api_list_services():
services = models.list_services()
return {"services": services}
@app.route("/api/services", methods=["POST"])
@login_required
def api_add_service():
data = request.get_json(silent=True)
if data is None and request.form:
@@ -74,6 +173,7 @@ def api_add_service():
@app.route("/api/services/<int:service_id>", methods=["DELETE"])
@login_required
def api_delete_service(service_id):
if models.delete_service(service_id):
return {"deleted": service_id}
@@ -81,6 +181,7 @@ def api_delete_service(service_id):
@app.route("/api/services/<int:service_id>", methods=["PATCH"])
@login_required
def api_update_service(service_id):
svc = models.get_service(service_id)
if not svc:
@@ -113,6 +214,7 @@ def api_update_service(service_id):
@app.route("/api/services/<int:service_id>")
@login_required
def api_get_service(service_id):
svc = models.get_service(service_id)
if not svc:
@@ -122,6 +224,7 @@ def api_get_service(service_id):
@app.route("/api/services/<int:service_id>/edit")
@login_required
def edit_service(service_id):
svc = models.get_service(service_id)
if not svc:
@@ -130,6 +233,7 @@ def edit_service(service_id):
@app.route("/api/services/<int:service_id>/report")
@login_required
def report(service_id):
svc = models.get_service(service_id)
if not svc:
@@ -140,10 +244,22 @@ def report(service_id):
from_ts, to_ts, from_display, to_display = _parse_report_dates(from_ts, to_ts, preset)
status_filter = request.args.get("status")
search = request.args.get("search", "").strip() or None
page = max(1, int(request.args.get("page", 1)))
per_page = min(100, max(10, int(request.args.get("per_page", 10))))
stats = models.get_report_stats(service_id, from_ts=from_ts, to_ts=to_ts)
checks = models.get_checks(service_id, limit=100, from_ts=from_ts, to_ts=to_ts, status_filter=status_filter, search=search)
checks_total = models.get_checks_count(service_id, from_ts=from_ts, to_ts=to_ts, status_filter=status_filter, search=search)
checks = models.get_checks(
service_id,
limit=per_page,
offset=(page - 1) * per_page,
from_ts=from_ts,
to_ts=to_ts,
status_filter=status_filter,
search=search,
)
chart_checks = models.get_checks(service_id, limit=200, from_ts=from_ts, to_ts=to_ts)
period_label = _format_period_label(from_display, to_display) if (from_ts or to_ts) else None
total_pages = (checks_total + per_page - 1) // per_page if checks_total else 1
return render_template(
"report.html",
service=dict(svc),
@@ -157,6 +273,10 @@ def report(service_id):
preset=preset,
status_filter=status_filter or "",
search=search or "",
page=page,
per_page=per_page,
checks_total=checks_total,
total_pages=total_pages,
)
@@ -172,6 +292,7 @@ def _format_period_label(from_display, to_display):
@app.route("/api/services/<int:service_id>/history")
@login_required
def api_history(service_id):
svc = models.get_service(service_id)
if not svc:
@@ -188,6 +309,7 @@ def api_history(service_id):
@app.route("/api/services/<int:service_id>/stats")
@login_required
def api_report_stats(service_id):
"""JSON report stats with optional from/to query params for date range."""
svc = models.get_service(service_id)

View File

@@ -2,12 +2,21 @@
import os
import sqlite3
from contextlib import contextmanager
from datetime import datetime
from datetime import datetime, timedelta, timezone
from pathlib import Path
from werkzeug.security import check_password_hash, generate_password_hash
DATA_PATH = os.environ.get("DATA_PATH", "/app/data")
DB_PATH = Path(DATA_PATH) / "monitor.db"
# Retention: keep last N checks per service, and optionally drop checks older than N days
CHECK_RETENTION_COUNT = int(os.environ.get("CHECK_RETENTION_COUNT", "5000"))
CHECK_RETENTION_DAYS = int(os.environ.get("CHECK_RETENTION_DAYS", "0")) or None
# Rollup: aggregate checks older than N hours into hourly buckets for long-term reporting
ROLLUP_AGE_HOURS = int(os.environ.get("ROLLUP_AGE_HOURS", "24"))
def _ensure_data_dir():
Path(DATA_PATH).mkdir(parents=True, exist_ok=True)
@@ -22,6 +31,110 @@ def _migrate_add_status(conn):
conn.execute("UPDATE checks SET status = CASE WHEN success = 1 THEN 'OK' ELSE 'ERROR' END")
def _migrate_add_rollups(conn):
"""Create uptime_rollups table for aggregated hourly stats (long-term reporting)."""
conn.execute("""
CREATE TABLE IF NOT EXISTS uptime_rollups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
period_start TEXT NOT NULL,
period_end TEXT NOT NULL,
total_checks INTEGER NOT NULL,
success_count INTEGER NOT NULL,
sum_response_ms REAL NOT NULL,
response_count INTEGER NOT NULL,
min_response_ms REAL,
max_response_ms REAL,
FOREIGN KEY (service_id) REFERENCES services(id),
UNIQUE(service_id, period_start)
)
""")
conn.execute("CREATE INDEX IF NOT EXISTS idx_rollups_service ON uptime_rollups(service_id)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_rollups_period ON uptime_rollups(period_start)")
try:
conn.execute("SELECT response_count FROM uptime_rollups LIMIT 1")
except sqlite3.OperationalError:
conn.execute("ALTER TABLE uptime_rollups ADD COLUMN response_count INTEGER NOT NULL DEFAULT 0")
def _migrate_add_users(conn):
"""Create users table for authentication."""
conn.execute("""
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
is_admin INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL
)
""")
conn.execute("CREATE INDEX IF NOT EXISTS idx_users_username ON users(username)")
def _seed_admin_if_empty(conn):
"""Create initial admin user from env if no users exist."""
row = conn.execute("SELECT COUNT(*) FROM users").fetchone()
if row[0] > 0:
return
admin_user = os.environ.get("ADMIN_USER")
admin_password = os.environ.get("ADMIN_PASSWORD")
if not admin_user or not admin_password:
return
password_hash = generate_password_hash(admin_password)
conn.execute(
"INSERT INTO users (username, password_hash, is_admin, created_at) VALUES (?, ?, 1, ?)",
(admin_user, password_hash, datetime.utcnow().isoformat()),
)
def create_user(username: str, password: str, is_admin: bool = False) -> int | None:
"""Create a new user. Returns user id or None if username exists."""
username = username.strip()
if not username or not password:
return None
password_hash = generate_password_hash(password)
with get_db() as conn:
try:
cur = conn.execute(
"INSERT INTO users (username, password_hash, is_admin, created_at) VALUES (?, ?, ?, ?)",
(username, password_hash, 1 if is_admin else 0, datetime.utcnow().isoformat()),
)
return cur.lastrowid
except sqlite3.IntegrityError:
return None
def get_user_by_id(user_id: int) -> dict | None:
"""Get a user by id."""
with get_db() as conn:
row = conn.execute("SELECT * FROM users WHERE id = ?", (user_id,)).fetchone()
return dict(row) if row else None
def get_user_by_username(username: str) -> dict | None:
"""Get a user by username."""
with get_db() as conn:
row = conn.execute("SELECT * FROM users WHERE username = ?", (username.strip(),)).fetchone()
return dict(row) if row else None
def verify_user(username: str, password: str) -> dict | None:
"""Verify credentials and return user dict if valid."""
user = get_user_by_username(username)
if not user or not check_password_hash(user["password_hash"], password):
return None
return user
def list_users():
"""Return all users (id, username, is_admin, created_at)."""
with get_db() as conn:
rows = conn.execute(
"SELECT id, username, is_admin, created_at FROM users ORDER BY username"
).fetchall()
return [dict(r) for r in rows]
@contextmanager
def get_db():
_ensure_data_dir()
@@ -63,6 +176,9 @@ def init_db():
conn.execute("CREATE INDEX IF NOT EXISTS idx_checks_timestamp ON checks(timestamp)")
_migrate_add_status(conn)
conn.execute("CREATE INDEX IF NOT EXISTS idx_checks_status ON checks(status)")
_migrate_add_rollups(conn)
_migrate_add_users(conn)
_seed_admin_if_empty(conn)
def list_services():
@@ -137,55 +253,105 @@ def add_check(service_id: int, success: bool, response_time_ms: float | None, er
)
def get_checks(service_id: int, limit: int = 50, from_ts: str = None, to_ts: str = None, status_filter: str = None, search: str = None):
"""Get recent checks for a service, optionally filtered by timestamp, status (ok/error), and error search."""
def _checks_where_args(service_id: int, from_ts: str = None, to_ts: str = None, status_filter: str = None, search: str = None):
"""Build WHERE clause and args for checks queries."""
q = "WHERE service_id = ?"
args = [service_id]
if from_ts:
q += " AND timestamp >= ?"
args.append(from_ts)
if to_ts:
q += " AND timestamp <= ?"
args.append(to_ts)
if status_filter == "error":
q += " AND status = 'ERROR'"
elif status_filter == "ok":
q += " AND status = 'OK'"
if search:
q += " AND (error_message LIKE ? OR status LIKE ?)"
args.extend([f"%{search}%", f"%{search}%"])
return q, args
def get_checks_count(service_id: int, from_ts: str = None, to_ts: str = None, status_filter: str = None, search: str = None) -> int:
"""Count checks matching filters (for pagination)."""
where, args = _checks_where_args(service_id, from_ts, to_ts, status_filter, search)
with get_db() as conn:
q = "SELECT * FROM checks WHERE service_id = ?"
args = [service_id]
if from_ts:
q += " AND timestamp >= ?"
args.append(from_ts)
if to_ts:
q += " AND timestamp <= ?"
args.append(to_ts)
if status_filter == "error":
q += " AND status = 'ERROR'"
elif status_filter == "ok":
q += " AND status = 'OK'"
if search:
q += " AND (error_message LIKE ? OR status LIKE ?)"
args.extend([f"%{search}%", f"%{search}%"])
q += " ORDER BY timestamp DESC LIMIT ?"
args.append(limit)
rows = conn.execute(q, args).fetchall()
row = conn.execute(f"SELECT COUNT(*) FROM checks {where}", args).fetchone()
return row[0]
def get_checks(service_id: int, limit: int = 50, offset: int = 0, from_ts: str = None, to_ts: str = None, status_filter: str = None, search: str = None):
"""Get recent checks for a service, optionally filtered and paginated."""
where, args = _checks_where_args(service_id, from_ts, to_ts, status_filter, search)
args.extend([limit, offset])
with get_db() as conn:
rows = conn.execute(f"SELECT * FROM checks {where} ORDER BY timestamp DESC LIMIT ? OFFSET ?", args).fetchall()
return [dict(r) for r in rows]
def get_report_stats(service_id: int, from_ts: str = None, to_ts: str = None):
"""Compute uptime % and latency stats for a service, optionally over a time range."""
"""
Compute uptime % and latency stats for a service over a time range.
Uses hourly rollups for old data + raw checks for recent data (last ROLLUP_AGE_HOURS).
Supports accurate reporting over 90+ days.
"""
now = datetime.now(timezone.utc)
raw_cutoff = (now - timedelta(hours=ROLLUP_AGE_HOURS)).isoformat()
to_ts = to_ts or now.isoformat()
from_ts = from_ts or "1970-01-01T00:00:00"
total = 0
success_count = 0
sum_response_ms = 0.0
count_with_response = 0
min_ms = None
max_ms = None
with get_db() as conn:
q = "SELECT success, response_time_ms FROM checks WHERE service_id = ?"
args = [service_id]
if from_ts:
q += " AND timestamp >= ?"
args.append(from_ts)
if to_ts:
q += " AND timestamp <= ?"
args.append(to_ts)
q += " ORDER BY timestamp DESC LIMIT 10000"
rows = conn.execute(q, args).fetchall()
if not rows:
# 1. Rollups: hourly buckets that end before raw_cutoff
rollup_end = raw_cutoff if raw_cutoff < to_ts else from_ts
if from_ts < rollup_end:
q = """
SELECT total_checks, success_count, sum_response_ms, response_count, min_response_ms, max_response_ms
FROM uptime_rollups
WHERE service_id = ? AND period_start >= ? AND period_start < ?
"""
rollup_rows = conn.execute(q, (service_id, from_ts, rollup_end)).fetchall()
for r in rollup_rows:
total += r["total_checks"]
success_count += r["success_count"]
sum_response_ms += r["sum_response_ms"] or 0
count_with_response += r["response_count"] or 0
if r["min_response_ms"] is not None:
min_ms = r["min_response_ms"] if min_ms is None else min(min_ms, r["min_response_ms"])
if r["max_response_ms"] is not None:
max_ms = r["max_response_ms"] if max_ms is None else max(max_ms, r["max_response_ms"])
# 2. Raw checks: recent data (overlaps with rollup period if range is entirely recent)
raw_from = from_ts if from_ts >= raw_cutoff else raw_cutoff
if raw_from <= to_ts:
q = "SELECT success, response_time_ms FROM checks WHERE service_id = ? AND timestamp >= ? AND timestamp <= ?"
raw_rows = conn.execute(q, (service_id, raw_from, to_ts)).fetchall()
for r in raw_rows:
total += 1
success_count += 1 if r["success"] else 0
if r["response_time_ms"] is not None:
sum_response_ms += r["response_time_ms"]
count_with_response += 1
min_ms = r["response_time_ms"] if min_ms is None else min(min_ms, r["response_time_ms"])
max_ms = r["response_time_ms"] if max_ms is None else max(max_ms, r["response_time_ms"])
if total == 0:
return {"total": 0, "uptime_pct": 0, "avg_ms": None, "min_ms": None, "max_ms": None}
total = len(rows)
success_count = sum(1 for r in rows if r["success"])
uptime_pct = (success_count / total) * 100 if total else 0
response_times = [r["response_time_ms"] for r in rows if r["response_time_ms"] is not None]
uptime_pct = (success_count / total) * 100
avg_ms = round(sum_response_ms / count_with_response, 2) if count_with_response else None
return {
"total": total,
"uptime_pct": round(uptime_pct, 2),
"avg_ms": round(sum(response_times) / len(response_times), 2) if response_times else None,
"min_ms": min(response_times) if response_times else None,
"max_ms": max(response_times) if response_times else None,
"avg_ms": avg_ms,
"min_ms": round(min_ms, 2) if min_ms is not None else None,
"max_ms": round(max_ms, 2) if max_ms is not None else None,
}
@@ -210,6 +376,7 @@ def delete_service(service_id: int) -> bool:
"""Delete a service and its check history. Returns True if deleted."""
with get_db() as conn:
conn.execute("DELETE FROM checks WHERE service_id = ?", (service_id,))
conn.execute("DELETE FROM uptime_rollups WHERE service_id = ?", (service_id,))
cur = conn.execute("DELETE FROM services WHERE id = ?", (service_id,))
return cur.rowcount > 0
@@ -219,3 +386,108 @@ def get_all_services_for_scheduler():
with get_db() as conn:
rows = conn.execute("SELECT id, target, protocol, interval_seconds FROM services").fetchall()
return [dict(r) for r in rows]
def _hour_start(ts: str) -> str:
"""Return ISO timestamp truncated to hour boundary (e.g. 2026-03-10T14:00:00)."""
dt = datetime.fromisoformat(ts.replace("Z", "+00:00"))
return dt.replace(minute=0, second=0, microsecond=0).isoformat()
def rollup_old_checks() -> int:
"""
Aggregate checks older than ROLLUP_AGE_HOURS into hourly buckets.
Returns number of raw checks that were rolled up and deleted.
"""
cutoff = datetime.now(timezone.utc) - timedelta(hours=ROLLUP_AGE_HOURS)
cutoff_ts = cutoff.isoformat()
with get_db() as conn:
# Get checks older than cutoff, grouped by service and hour
rows = conn.execute(
"""
SELECT service_id,
strftime('%Y-%m-%dT%H:00:00', timestamp) as period_start,
COUNT(*) as total_checks,
SUM(success) as success_count,
SUM(CASE WHEN response_time_ms IS NOT NULL THEN response_time_ms ELSE 0 END) as sum_response_ms,
SUM(CASE WHEN response_time_ms IS NOT NULL THEN 1 ELSE 0 END) as response_count,
MIN(CASE WHEN response_time_ms IS NOT NULL THEN response_time_ms END) as min_response_ms,
MAX(response_time_ms) as max_response_ms,
GROUP_CONCAT(id) as check_ids
FROM checks
WHERE timestamp < ?
GROUP BY service_id, period_start
""",
(cutoff_ts,),
).fetchall()
if not rows:
return 0
deleted = 0
for r in rows:
period_end = datetime.fromisoformat(r["period_start"].replace("Z", "+00:00")) + timedelta(hours=1)
period_end_ts = period_end.isoformat()
conn.execute(
"""
INSERT INTO uptime_rollups (service_id, period_start, period_end, total_checks, success_count, sum_response_ms, response_count, min_response_ms, max_response_ms)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(service_id, period_start) DO UPDATE SET
total_checks = total_checks + excluded.total_checks,
success_count = success_count + excluded.success_count,
sum_response_ms = sum_response_ms + excluded.sum_response_ms,
response_count = response_count + excluded.response_count,
min_response_ms = MIN(min_response_ms, excluded.min_response_ms),
max_response_ms = MAX(max_response_ms, excluded.max_response_ms)
""",
(
r["service_id"],
r["period_start"],
period_end_ts,
r["total_checks"],
r["success_count"],
r["sum_response_ms"] or 0,
r["response_count"] or 0,
r["min_response_ms"],
r["max_response_ms"],
),
)
ids = [int(x) for x in (r["check_ids"] or "").split(",") if x]
if ids:
placeholders = ",".join("?" * len(ids))
cur = conn.execute(f"DELETE FROM checks WHERE id IN ({placeholders})", ids)
deleted += cur.rowcount
return deleted
def prune_checks_retention() -> int:
"""
Remove old checks to limit storage. Keeps last CHECK_RETENTION_COUNT per service.
If CHECK_RETENTION_DAYS is set, also deletes checks older than that.
Returns number of rows deleted.
"""
with get_db() as conn:
deleted = 0
# Delete checks older than N days (if configured)
if CHECK_RETENTION_DAYS:
cutoff = (datetime.now(timezone.utc) - timedelta(days=CHECK_RETENTION_DAYS)).isoformat()
cur = conn.execute("DELETE FROM checks WHERE timestamp < ?", (cutoff,))
deleted += cur.rowcount
# Keep only last N checks per service
service_ids = [r[0] for r in conn.execute("SELECT id FROM services").fetchall()]
for sid in service_ids:
# Get ids of checks to keep (most recent N)
keep_ids = conn.execute(
"SELECT id FROM checks WHERE service_id = ? ORDER BY timestamp DESC LIMIT ?",
(sid, CHECK_RETENTION_COUNT),
).fetchall()
keep_ids = [r[0] for r in keep_ids]
if not keep_ids:
continue
placeholders = ",".join("?" * len(keep_ids))
cur = conn.execute(
f"DELETE FROM checks WHERE service_id = ? AND id NOT IN ({placeholders})",
[sid] + keep_ids,
)
deleted += cur.rowcount
return deleted

View File

@@ -2,7 +2,7 @@
from apscheduler.schedulers.background import BackgroundScheduler
from app.checker import run_check
from app.models import get_all_services_for_scheduler
from app.models import get_all_services_for_scheduler, prune_checks_retention, rollup_old_checks
def _run_all_checks():
@@ -15,13 +15,28 @@ def _run_all_checks():
def start_scheduler():
"""Start the background scheduler. Uses interval jobs per service."""
scheduler = BackgroundScheduler()
_scheduled_ids = set()
def add_jobs():
def sync_jobs():
"""Only add/remove jobs when the service list changes."""
nonlocal _scheduled_ids
services = get_all_services_for_scheduler()
for svc in services:
job_id = f"service_{svc['id']}"
current_ids = {svc["id"] for svc in services}
svc_by_id = {svc["id"]: svc for svc in services}
# Remove jobs for deleted services
for sid in _scheduled_ids - current_ids:
job_id = f"service_{sid}"
if scheduler.get_job(job_id):
scheduler.remove_job(job_id)
_scheduled_ids.discard(sid)
# Add jobs only for services that don't have one yet
for sid in current_ids:
if sid in _scheduled_ids:
continue
svc = svc_by_id[sid]
job_id = f"service_{sid}"
interval = max(10, svc["interval_seconds"])
scheduler.add_job(
run_check,
@@ -30,12 +45,20 @@ def start_scheduler():
id=job_id,
args=[svc["id"], svc["target"], svc["protocol"]],
)
_scheduled_ids.add(sid)
# Run checks immediately on startup, then schedule
_run_all_checks()
add_jobs()
sync_jobs()
# Refresh job list every 60 seconds in case services were added
scheduler.add_job(add_jobs, "interval", seconds=60, id="refresh_jobs")
# Sync job list every 60 seconds (only adds/removes when services change)
scheduler.add_job(sync_jobs, "interval", seconds=60, id="sync_jobs")
# Roll up old checks into hourly buckets, then prune (every 15 min)
def rollup_and_prune():
rollup_old_checks()
prune_checks_retention()
scheduler.add_job(rollup_and_prune, "interval", minutes=15, id="prune_checks")
scheduler.start()

Binary file not shown.

View File

@@ -4,10 +4,15 @@ services:
app:
image: ${DOCKER_REGISTRY:-docker.io}/${DOCKER_IMAGE:-myapp}:${IMAGE_TAG:-latest}
container_name: jenkins-deploy-app
user: "1000:1000"
ports:
- "8080:8080"
volumes:
- ./data:/app/data
environment:
- VERSION=${IMAGE_TAG:-latest}
- SECRET_KEY=${SECRET_KEY}
- ADMIN_USER=${ADMIN_USER}
- ADMIN_PASSWORD=${ADMIN_PASSWORD}
# Optional: CHECK_RETENTION_COUNT=5000, CHECK_RETENTION_DAYS=30
restart: unless-stopped

View File

@@ -1,3 +1,4 @@
flask>=3.0
requests>=2.31
apscheduler>=3.10
flask>=3.0,<4
requests>=2.31,<3
apscheduler>=3.10,<4
flask-login>=0.6.3,<1

View File

@@ -56,6 +56,48 @@ header h1 a:hover {
font-size: 0.875rem;
}
.header-nav {
display: flex;
gap: 1rem;
margin-left: auto;
}
.header-nav a {
font-size: 0.875rem;
}
.error {
color: var(--down);
margin-bottom: 1rem;
}
.login-form {
display: flex;
flex-direction: column;
gap: 0.75rem;
max-width: 300px;
padding: 1rem;
background: var(--surface);
border-radius: 6px;
}
.login-form input {
padding: 0.5rem 0.75rem;
border: 1px solid var(--muted);
border-radius: 4px;
background: var(--bg);
color: var(--text);
}
.login-form button {
padding: 0.5rem 1rem;
background: var(--accent);
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
}
main {
max-width: 900px;
width: 100%;
@@ -254,6 +296,55 @@ h2 {
text-overflow: ellipsis;
}
.pagination {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between;
gap: 1rem;
margin: 1rem 0;
padding: 0.75rem 0;
}
.pagination-info {
font-size: 0.875rem;
color: var(--muted);
}
.pagination-links {
display: flex;
flex-wrap: wrap;
align-items: center;
gap: 0.25rem;
}
.pagination-btn {
display: inline-block;
padding: 0.35rem 0.6rem;
font-size: 0.875rem;
background: var(--surface);
color: var(--text);
border: 1px solid var(--muted);
border-radius: 4px;
text-decoration: none;
}
.pagination-btn:hover {
border-color: var(--accent);
color: var(--accent);
}
.pagination-btn.pagination-current {
background: var(--accent);
border-color: var(--accent);
color: white;
}
.pagination-ellipsis {
padding: 0 0.25rem;
color: var(--muted);
}
.btn-delete {
padding: 0.25rem 0.5rem;
font-size: 0.8rem;

View File

@@ -10,6 +10,14 @@
<header>
<h1><a href="/">Status Monitor</a></h1>
<span class="version">v{{ version }}</span>
{% if current_user.is_authenticated %}
<nav class="header-nav">
{% if current_user.is_admin %}
<a href="{{ url_for('users') }}">Users</a>
{% endif %}
<a href="{{ url_for('logout') }}">Logout</a>
</nav>
{% endif %}
</header>
<main>
{% block content %}{% endblock %}

18
templates/login.html Normal file
View File

@@ -0,0 +1,18 @@
{% extends "base.html" %}
{% block title %}Login - Status Monitor{% endblock %}
{% block content %}
<h2>Login</h2>
{% if error %}
<p class="error">{{ error }}</p>
{% endif %}
<form method="post" action="{{ url_for('login') }}" class="login-form">
<input type="hidden" name="next" value="{{ request.args.get('next') or '' }}">
<label for="username">Username</label>
<input type="text" id="username" name="username" required autofocus>
<label for="password">Password</label>
<input type="password" id="password" name="password" required>
<button type="submit">Log in</button>
</form>
{% endblock %}

View File

@@ -16,11 +16,16 @@
<a href="{{ url_for('report', service_id=service.id, preset='24h') }}" class="preset-btn{% if preset == '24h' %} preset-active{% endif %}">Last 24h</a>
<a href="{{ url_for('report', service_id=service.id, preset='7d') }}" class="preset-btn{% if preset == '7d' %} preset-active{% endif %}">Last 7 days</a>
<a href="{{ url_for('report', service_id=service.id, preset='30d') }}" class="preset-btn{% if preset == '30d' %} preset-active{% endif %}">Last 30 days</a>
<a href="{{ url_for('report', service_id=service.id, preset='90d') }}" class="preset-btn{% if preset == '90d' %} preset-active{% endif %}">Last 90 days</a>
</div>
{% if period_label %}
<p class="period-label">Showing: {{ period_label }}</p>
{% endif %}
<form method="get" action="{{ url_for('report', service_id=service.id) }}" class="date-range-form">
<input type="hidden" name="preset" value="{{ preset or '' }}">
<input type="hidden" name="status" value="{{ status_filter or '' }}">
<input type="hidden" name="search" value="{{ search or '' }}">
<input type="hidden" name="per_page" value="{{ per_page }}">
<label>From</label>
<input type="datetime-local" name="from" value="{{ from_date }}" placeholder="Start (optional)">
<label>To</label>
@@ -71,12 +76,19 @@
<input type="hidden" name="preset" value="{{ preset or '' }}">
<input type="hidden" name="from" value="{{ from_date }}">
<input type="hidden" name="to" value="{{ to_date }}">
<input type="hidden" name="page" value="1">
<select name="status">
<option value="">All</option>
<option value="ok" {% if status_filter == 'ok' %}selected{% endif %}>OK only</option>
<option value="error" {% if status_filter == 'error' %}selected{% endif %}>Errors only</option>
</select>
<input type="text" name="search" value="{{ search }}" placeholder="Search error message...">
<select name="per_page">
<option value="10" {% if per_page == 10 %}selected{% endif %}>10 per page</option>
<option value="25" {% if per_page == 25 %}selected{% endif %}>25 per page</option>
<option value="50" {% if per_page == 50 %}selected{% endif %}>50 per page</option>
<option value="100" {% if per_page == 100 %}selected{% endif %}>100 per page</option>
</select>
<button type="submit">Filter</button>
</form>
<table class="checks-table">
@@ -110,6 +122,45 @@
</tbody>
</table>
{% if checks_total > 0 %}
<nav class="pagination">
<span class="pagination-info">
Showing {{ (page - 1) * per_page + 1 }}-{{ [page * per_page, checks_total] | min }} of {{ checks_total }}
</span>
<div class="pagination-links">
{% if page > 1 %}
<a href="{{ url_for('report', service_id=service.id, preset=preset or '', from=from_date, to=to_date, status=status_filter or '', search=search or '', per_page=per_page, page=page-1) }}" class="pagination-btn">Previous</a>
{% endif %}
{% if total_pages <= 7 %}
{% for p in range(1, total_pages + 1) %}
{% if p == page %}
<span class="pagination-btn pagination-current">{{ p }}</span>
{% else %}
<a href="{{ url_for('report', service_id=service.id, preset=preset or '', from=from_date, to=to_date, status=status_filter or '', search=search or '', per_page=per_page, page=p) }}" class="pagination-btn">{{ p }}</a>
{% endif %}
{% endfor %}
{% else %}
<a href="{{ url_for('report', service_id=service.id, preset=preset or '', from=from_date, to=to_date, status=status_filter or '', search=search or '', per_page=per_page, page=1) }}" class="pagination-btn">1</a>
{% if page > 3 %}<span class="pagination-ellipsis"></span>{% endif %}
{% for p in range([2, page - 1] | max, [total_pages, page + 1] | min + 1) %}
{% if p == page %}
<span class="pagination-btn pagination-current">{{ p }}</span>
{% else %}
<a href="{{ url_for('report', service_id=service.id, preset=preset or '', from=from_date, to=to_date, status=status_filter or '', search=search or '', per_page=per_page, page=p) }}" class="pagination-btn">{{ p }}</a>
{% endif %}
{% endfor %}
{% if page < total_pages - 2 %}<span class="pagination-ellipsis"></span>{% endif %}
{% if total_pages > 1 %}
<a href="{{ url_for('report', service_id=service.id, preset=preset or '', from=from_date, to=to_date, status=status_filter or '', search=search or '', per_page=per_page, page=total_pages) }}" class="pagination-btn">{{ total_pages }}</a>
{% endif %}
{% endif %}
{% if page < total_pages %}
<a href="{{ url_for('report', service_id=service.id, preset=preset or '', from=from_date, to=to_date, status=status_filter or '', search=search or '', per_page=per_page, page=page+1) }}" class="pagination-btn">Next</a>
{% endif %}
</div>
</nav>
{% endif %}
<p>
<a href="/">&larr; Back to Dashboard</a>
<span style="margin-left: 1rem;">

41
templates/users.html Normal file
View File

@@ -0,0 +1,41 @@
{% extends "base.html" %}
{% block title %}Users - Status Monitor{% endblock %}
{% block content %}
<h2>Users</h2>
{% if error %}
<p class="error">{{ error }}</p>
{% endif %}
<form method="post" action="{{ url_for('users') }}" class="add-form">
<input type="text" name="username" placeholder="Username" required>
<input type="password" name="password" placeholder="Password" required>
<button type="submit">Add User</button>
</form>
<table class="services-table">
<thead>
<tr>
<th>Username</th>
<th>Admin</th>
<th>Created</th>
</tr>
</thead>
<tbody>
{% for u in users %}
<tr>
<td>{{ u.username }}</td>
<td>{% if u.is_admin %}Yes{% else %}No{% endif %}</td>
<td>{{ u.created_at[:19] if u.created_at else '-' }}</td>
</tr>
{% else %}
<tr>
<td colspan="3">No users.</td>
</tr>
{% endfor %}
</tbody>
</table>
<p><a href="{{ url_for('dashboard') }}">&larr; Back to Dashboard</a></p>
{% endblock %}