mirror of
https://github.com/louislam/uptime-kuma.git
synced 2025-09-12 22:47:00 +08:00
Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
cb26b79498 | ||
|
dd5347b25e | ||
|
733ce8ded5 | ||
|
16225acdbd | ||
|
5dce44277f | ||
|
c19eef9e04 | ||
|
9befdd70cc | ||
|
8aebfd82d8 |
4
.github/workflows/auto-test.yml
vendored
4
.github/workflows/auto-test.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, ubuntu-latest, windows-latest, ARM64]
|
||||
node: [ 18, 20 ]
|
||||
node: [ 18, 20, 22 ]
|
||||
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
|
||||
|
||||
steps:
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
|
||||
e2e-test:
|
||||
needs: [ ]
|
||||
runs-on: ARM64
|
||||
runs-on: e2e
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false # Mainly for Windows
|
||||
- uses: actions/checkout@v4
|
||||
|
@@ -1,13 +0,0 @@
|
||||
// Update info_json column to LONGTEXT mainly for MariaDB
|
||||
exports.up = function (knex) {
|
||||
return knex.schema
|
||||
.alterTable("monitor_tls_info", function (table) {
|
||||
table.text("info_json", "longtext").alter();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function (knex) {
|
||||
return knex.schema.alterTable("monitor_tls_info", function (table) {
|
||||
table.text("info_json", "text").alter();
|
||||
});
|
||||
};
|
@@ -1,15 +1,5 @@
|
||||
# Download Apprise deb package
|
||||
FROM node:20-bookworm-slim AS download-apprise
|
||||
WORKDIR /app
|
||||
COPY ./extra/download-apprise.mjs ./download-apprise.mjs
|
||||
RUN apt update && \
|
||||
apt --yes --no-install-recommends install curl && \
|
||||
npm install cheerio semver && \
|
||||
node ./download-apprise.mjs
|
||||
|
||||
# Base Image (Slim)
|
||||
# If the image changed, the second stage image should be changed too
|
||||
FROM node:20-bookworm-slim AS base2-slim
|
||||
FROM node:22-bookworm-slim AS base2-slim
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Specify --no-install-recommends to skip unused dependencies, make the base much smaller!
|
||||
@@ -37,9 +27,8 @@ RUN apt update && \
|
||||
# apprise = for notifications (Install from the deb package, as the stable one is too old) (workaround for #4867)
|
||||
# Switching to testing repo is no longer working, as the testing repo is not bookworm anymore.
|
||||
# python3-paho-mqtt (#4859)
|
||||
# TODO: no idea how to delete the deb file after installation as it becomes a layer already
|
||||
COPY --from=download-apprise /app/apprise.deb ./apprise.deb
|
||||
RUN apt update && \
|
||||
RUN curl http://ftp.debian.org/debian/pool/main/a/apprise/apprise_1.8.0-2_all.deb --output apprise.deb && \
|
||||
apt update && \
|
||||
apt --yes --no-install-recommends install ./apprise.deb python3-paho-mqtt && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -f apprise.deb && \
|
||||
|
@@ -27,6 +27,7 @@ RUN mkdir ./data
|
||||
# ⭐ Main Image
|
||||
############################################
|
||||
FROM $BASE_IMAGE AS release
|
||||
USER node
|
||||
WORKDIR /app
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/louislam/uptime-kuma"
|
||||
@@ -45,7 +46,6 @@ CMD ["node", "server/server.js"]
|
||||
# Rootless Image
|
||||
############################################
|
||||
FROM release AS rootless
|
||||
USER node
|
||||
|
||||
############################################
|
||||
# Mark as Nightly
|
||||
|
@@ -4,11 +4,8 @@
|
||||
// Target file: the latest version of Apprise, which the format is apprise_{VERSION}_all.deb
|
||||
|
||||
import * as cheerio from "cheerio";
|
||||
import semver from "semver";
|
||||
import * as childProcess from "child_process";
|
||||
|
||||
const baseURL = "http://ftp.debian.org/debian/pool/main/a/apprise/";
|
||||
const response = await fetch(baseURL);
|
||||
const response = await fetch("http://ftp.debian.org/debian/pool/main/a/apprise/");
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Failed to fetch page of Apprise Debian repository.");
|
||||
@@ -23,35 +20,13 @@ const linkElements = $("a");
|
||||
|
||||
// Filter the links which match apprise_{VERSION}_all.deb
|
||||
const links = [];
|
||||
const pattern = /apprise_(.*?)_all.deb/;
|
||||
|
||||
for (let i = 0; i < linkElements.length; i++) {
|
||||
const link = linkElements[i];
|
||||
if (link.attribs.href.match(pattern) && !link.attribs.href.includes("~")) {
|
||||
links.push({
|
||||
filename: link.attribs.href,
|
||||
version: link.attribs.href.match(pattern)[1],
|
||||
});
|
||||
if (link.attribs.href.match(/apprise_(.*?)_all.deb/) && !link.attribs.href.includes("~")) {
|
||||
links.push(link.attribs.href);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(links);
|
||||
|
||||
// semver compare and download
|
||||
let latestLink = {
|
||||
filename: "",
|
||||
version: "0.0.0",
|
||||
};
|
||||
|
||||
for (const link of links) {
|
||||
if (semver.gt(link.version, latestLink.version)) {
|
||||
latestLink = link;
|
||||
}
|
||||
}
|
||||
|
||||
const downloadURL = baseURL + latestLink.filename;
|
||||
console.log(`Downloading ${downloadURL}...`);
|
||||
let result = childProcess.spawnSync("curl", [ downloadURL, "--output", "apprise.deb" ]);
|
||||
console.log(result.stdout?.toString());
|
||||
console.error(result.stderr?.toString());
|
||||
process.exit(result.status !== null ? result.status : 1);
|
||||
// TODO: semver compare and download?
|
||||
|
@@ -1,24 +0,0 @@
|
||||
const { R } = require("redbean-node");
|
||||
const Database = require("../server/database");
|
||||
const args = require("args-parser")(process.argv);
|
||||
const { Settings } = require("../server/settings");
|
||||
|
||||
const main = async () => {
|
||||
console.log("Connecting the database");
|
||||
Database.initDataDir(args);
|
||||
await Database.connect(false, false, true);
|
||||
|
||||
console.log("Deleting all data from aggregate tables");
|
||||
await R.exec("DELETE FROM stat_minutely");
|
||||
await R.exec("DELETE FROM stat_hourly");
|
||||
await R.exec("DELETE FROM stat_daily");
|
||||
|
||||
console.log("Resetting the aggregate table state");
|
||||
await Settings.set("migrateAggregateTableState", "");
|
||||
|
||||
await Database.close();
|
||||
console.log("Done");
|
||||
};
|
||||
|
||||
main();
|
||||
|
12
extra/test-backend.mjs
Normal file
12
extra/test-backend.mjs
Normal file
@@ -0,0 +1,12 @@
|
||||
// If Node.js >= 22, run `npm run test-backend-node22`, otherwise run `npm run test-backend-node20`
|
||||
import * as childProcess from "child_process";
|
||||
|
||||
const version = parseInt(process.version.slice(1));
|
||||
|
||||
console.log(`Node.js version: ${version}`);
|
||||
|
||||
if (version >= 22) {
|
||||
childProcess.execSync("npm run test-backend-node22", { stdio: "inherit" });
|
||||
} else {
|
||||
childProcess.execSync("npm run test-backend-node20", { stdio: "inherit" });
|
||||
}
|
4236
package-lock.json
generated
4236
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
15
package.json
15
package.json
@@ -27,7 +27,9 @@
|
||||
"build": "vite build --config ./config/vite.config.js",
|
||||
"test": "npm run test-backend && npm run test-e2e",
|
||||
"test-with-build": "npm run build && npm test",
|
||||
"test-backend": "cross-env TEST_BACKEND=1 node --test test/backend-test",
|
||||
"test-backend": "node extra/test-backend.mjs",
|
||||
"test-backend-node20": "cross-env TEST_BACKEND=1 node --test test/backend-test",
|
||||
"test-backend-node22": "cross-env TEST_BACKEND=1 node --test test/backend-test/test-*.js && node --test test/backend-test/**/test-*.js",
|
||||
"test-e2e": "playwright test --config ./config/playwright.config.js",
|
||||
"test-e2e-ui": "playwright test --config ./config/playwright.config.js --ui --ui-port=51063",
|
||||
"playwright-codegen": "playwright codegen localhost:3000 --save-storage=./private/e2e-auth.json",
|
||||
@@ -38,8 +40,8 @@
|
||||
"build-docker-base": "docker buildx build -f docker/debian-base.dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:base2 --target base2 . --push",
|
||||
"build-docker-base-slim": "docker buildx build -f docker/debian-base.dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:base2-slim --target base2-slim . --push",
|
||||
"build-docker-builder-go": "docker buildx build -f docker/builder-go.dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:builder-go . --push",
|
||||
"build-docker-slim": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:next-slim -t louislam/uptime-kuma:2-slim -t louislam/uptime-kuma:$VERSION-slim --target release --build-arg BASE_IMAGE=louislam/uptime-kuma:base2-slim . --push",
|
||||
"build-docker-full": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:next -t louislam/uptime-kuma:2 -t louislam/uptime-kuma:$VERSION --target release . --push",
|
||||
"build-docker-slim": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2-slim -t louislam/uptime-kuma:$VERSION-slim --target release --build-arg BASE_IMAGE=louislam/uptime-kuma:base2-slim . --push",
|
||||
"build-docker-full": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2 -t louislam/uptime-kuma:$VERSION --target release . --push",
|
||||
"build-docker-nightly": "node ./extra/test-docker.js && npm run build && docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:nightly2 --target nightly . --push",
|
||||
"build-docker-slim-rootless": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2-slim-rootless -t louislam/uptime-kuma:$VERSION-slim-rootless --target rootless --build-arg BASE_IMAGE=louislam/uptime-kuma:base2-slim . --push",
|
||||
"build-docker-full-rootless": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2-rootless -t louislam/uptime-kuma:$VERSION-rootless --target rootless . --push",
|
||||
@@ -68,8 +70,7 @@
|
||||
"sort-contributors": "node extra/sort-contributors.js",
|
||||
"quick-run-nightly": "docker run --rm --env NODE_ENV=development -p 3001:3001 louislam/uptime-kuma:nightly2",
|
||||
"start-dev-container": "cd docker && docker-compose -f docker-compose-dev.yml up --force-recreate",
|
||||
"rebase-pr-to-1.23.X": "node extra/rebase-pr.js 1.23.X",
|
||||
"reset-migrate-aggregate-table-state": "node extra/reset-migrate-aggregate-table-state.js"
|
||||
"rebase-pr-to-1.23.X": "node extra/rebase-pr.js 1.23.X"
|
||||
},
|
||||
"dependencies": {
|
||||
"@grpc/grpc-js": "~1.8.22",
|
||||
@@ -109,7 +110,7 @@
|
||||
"jsonwebtoken": "~9.0.0",
|
||||
"jwt-decode": "~3.1.2",
|
||||
"kafkajs": "^2.2.4",
|
||||
"knex": "~3.1.0",
|
||||
"knex": "^2.4.2",
|
||||
"limiter": "~2.1.0",
|
||||
"liquidjs": "^10.7.0",
|
||||
"marked": "^14.0.0",
|
||||
@@ -117,7 +118,7 @@
|
||||
"mongodb": "~4.17.1",
|
||||
"mqtt": "~4.3.7",
|
||||
"mssql": "~11.0.0",
|
||||
"mysql2": "~3.11.3",
|
||||
"mysql2": "~3.9.6",
|
||||
"nanoid": "~3.3.4",
|
||||
"net-snmp": "^3.11.2",
|
||||
"node-cloudflared-tunnel": "~1.0.9",
|
||||
|
@@ -6,11 +6,6 @@ const knex = require("knex");
|
||||
const path = require("path");
|
||||
const { EmbeddedMariaDB } = require("./embedded-mariadb");
|
||||
const mysql = require("mysql2/promise");
|
||||
const { Settings } = require("./settings");
|
||||
const { UptimeCalculator } = require("./uptime-calculator");
|
||||
const dayjs = require("dayjs");
|
||||
const { SimpleMigrationServer } = require("./utils/simple-migration-server");
|
||||
const KumaColumnCompiler = require("./utils/knex/lib/dialects/mysql2/schema/mysql2-columncompiler");
|
||||
|
||||
/**
|
||||
* Database & App Data Folder
|
||||
@@ -199,14 +194,6 @@ class Database {
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
static async connect(testMode = false, autoloadModels = true, noLog = false) {
|
||||
// Patch "mysql2" knex client
|
||||
// Workaround: Tried extending the ColumnCompiler class, but it didn't work for unknown reasons, so I override the function via prototype
|
||||
const { getDialectByNameOrAlias } = require("knex/lib/dialects");
|
||||
const mysql2 = getDialectByNameOrAlias("mysql2");
|
||||
mysql2.prototype.columnCompiler = function () {
|
||||
return new KumaColumnCompiler(this, ...arguments);
|
||||
};
|
||||
|
||||
const acquireConnectionTimeout = 120 * 1000;
|
||||
let dbConfig;
|
||||
try {
|
||||
@@ -392,11 +379,9 @@ class Database {
|
||||
|
||||
/**
|
||||
* Patch the database
|
||||
* @param {number} port Start the migration server for aggregate tables on this port if provided
|
||||
* @param {string} hostname Start the migration server for aggregate tables on this hostname if provided
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
static async patch(port = undefined, hostname = undefined) {
|
||||
static async patch() {
|
||||
// Still need to keep this for old versions of Uptime Kuma
|
||||
if (Database.dbConfig.type === "sqlite") {
|
||||
await this.patchSqlite();
|
||||
@@ -406,23 +391,9 @@ class Database {
|
||||
// https://knexjs.org/guide/migrations.html
|
||||
// https://gist.github.com/NigelEarle/70db130cc040cc2868555b29a0278261
|
||||
try {
|
||||
// Disable foreign key check for SQLite
|
||||
// Known issue of knex: https://github.com/drizzle-team/drizzle-orm/issues/1813
|
||||
if (Database.dbConfig.type === "sqlite") {
|
||||
await R.exec("PRAGMA foreign_keys = OFF");
|
||||
}
|
||||
|
||||
await R.knex.migrate.latest({
|
||||
directory: Database.knexMigrationsPath,
|
||||
});
|
||||
|
||||
// Enable foreign key check for SQLite
|
||||
if (Database.dbConfig.type === "sqlite") {
|
||||
await R.exec("PRAGMA foreign_keys = ON");
|
||||
}
|
||||
|
||||
await this.migrateAggregateTable(port, hostname);
|
||||
|
||||
} catch (e) {
|
||||
// Allow missing patch files for downgrade or testing pr.
|
||||
if (e.message.includes("the following files are missing:")) {
|
||||
@@ -740,173 +711,6 @@ class Database {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrate the old data in the heartbeat table to the new format (stat_daily, stat_hourly, stat_minutely)
|
||||
* It should be run once while upgrading V1 to V2
|
||||
*
|
||||
* Normally, it should be in transaction, but UptimeCalculator wasn't designed to be in transaction before that.
|
||||
* I don't want to heavily modify the UptimeCalculator, so it is not in transaction.
|
||||
* Run `npm run reset-migrate-aggregate-table-state` to reset, in case the migration is interrupted.
|
||||
* @param {number} port Start the migration server on this port if provided
|
||||
* @param {string} hostname Start the migration server on this hostname if provided
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
static async migrateAggregateTable(port, hostname = undefined) {
|
||||
log.debug("db", "Enter Migrate Aggregate Table function");
|
||||
|
||||
// Add a setting for 2.0.0-dev users to skip this migration
|
||||
if (process.env.SET_MIGRATE_AGGREGATE_TABLE_TO_TRUE === "1") {
|
||||
log.warn("db", "SET_MIGRATE_AGGREGATE_TABLE_TO_TRUE is set to 1, skipping aggregate table migration forever (for 2.0.0-dev users)");
|
||||
await Settings.set("migrateAggregateTableState", "migrated");
|
||||
}
|
||||
|
||||
let migrateState = await Settings.get("migrateAggregateTableState");
|
||||
|
||||
// Skip if already migrated
|
||||
// If it is migrating, it possibly means the migration was interrupted, or the migration is in progress
|
||||
if (migrateState === "migrated") {
|
||||
log.debug("db", "Migrated aggregate table already, skip");
|
||||
return;
|
||||
} else if (migrateState === "migrating") {
|
||||
log.warn("db", "Aggregate table migration is already in progress, or it was interrupted");
|
||||
throw new Error("Aggregate table migration is already in progress");
|
||||
}
|
||||
|
||||
/**
|
||||
* Start migration server for displaying the migration status
|
||||
* @type {SimpleMigrationServer}
|
||||
*/
|
||||
let migrationServer;
|
||||
let msg;
|
||||
|
||||
if (port) {
|
||||
migrationServer = new SimpleMigrationServer();
|
||||
await migrationServer.start(port, hostname);
|
||||
}
|
||||
|
||||
log.info("db", "Migrating Aggregate Table");
|
||||
|
||||
log.info("db", "Getting list of unique monitors");
|
||||
|
||||
// Get a list of unique monitors from the heartbeat table, using raw sql
|
||||
let monitors = await R.getAll(`
|
||||
SELECT DISTINCT monitor_id
|
||||
FROM heartbeat
|
||||
ORDER BY monitor_id ASC
|
||||
`);
|
||||
|
||||
// Stop if stat_* tables are not empty
|
||||
for (let table of [ "stat_minutely", "stat_hourly", "stat_daily" ]) {
|
||||
let countResult = await R.getRow(`SELECT COUNT(*) AS count FROM ${table}`);
|
||||
let count = countResult.count;
|
||||
if (count > 0) {
|
||||
log.warn("db", `Aggregate table ${table} is not empty, migration will not be started (Maybe you were using 2.0.0-dev?)`);
|
||||
await migrationServer?.stop();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
await Settings.set("migrateAggregateTableState", "migrating");
|
||||
|
||||
let progressPercent = 0;
|
||||
let part = 100 / monitors.length;
|
||||
let i = 1;
|
||||
for (let monitor of monitors) {
|
||||
// Get a list of unique dates from the heartbeat table, using raw sql
|
||||
let dates = await R.getAll(`
|
||||
SELECT DISTINCT DATE(time) AS date
|
||||
FROM heartbeat
|
||||
WHERE monitor_id = ?
|
||||
ORDER BY date ASC
|
||||
`, [
|
||||
monitor.monitor_id
|
||||
]);
|
||||
|
||||
for (let date of dates) {
|
||||
// New Uptime Calculator
|
||||
let calculator = new UptimeCalculator();
|
||||
calculator.monitorID = monitor.monitor_id;
|
||||
calculator.setMigrationMode(true);
|
||||
|
||||
// Get all the heartbeats for this monitor and date
|
||||
let heartbeats = await R.getAll(`
|
||||
SELECT status, ping, time
|
||||
FROM heartbeat
|
||||
WHERE monitor_id = ?
|
||||
AND DATE(time) = ?
|
||||
ORDER BY time ASC
|
||||
`, [ monitor.monitor_id, date.date ]);
|
||||
|
||||
if (heartbeats.length > 0) {
|
||||
msg = `[DON'T STOP] Migrating monitor data ${monitor.monitor_id} - ${date.date} [${progressPercent.toFixed(2)}%][${i}/${monitors.length}]`;
|
||||
log.info("db", msg);
|
||||
migrationServer?.update(msg);
|
||||
}
|
||||
|
||||
for (let heartbeat of heartbeats) {
|
||||
await calculator.update(heartbeat.status, parseFloat(heartbeat.ping), dayjs(heartbeat.time));
|
||||
}
|
||||
|
||||
progressPercent += (Math.round(part / dates.length * 100) / 100);
|
||||
|
||||
// Lazy to fix the floating point issue, it is acceptable since it is just a progress bar
|
||||
if (progressPercent > 100) {
|
||||
progressPercent = 100;
|
||||
}
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
msg = "Clearing non-important heartbeats";
|
||||
log.info("db", msg);
|
||||
migrationServer?.update(msg);
|
||||
|
||||
await Database.clearHeartbeatData(true);
|
||||
await Settings.set("migrateAggregateTableState", "migrated");
|
||||
await migrationServer?.stop();
|
||||
|
||||
if (monitors.length > 0) {
|
||||
log.info("db", "Aggregate Table Migration Completed");
|
||||
} else {
|
||||
log.info("db", "No data to migrate");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all non-important heartbeats from heartbeat table, keep last 24-hour or {KEEP_LAST_ROWS} rows for each monitor
|
||||
* @param {boolean} detailedLog Log detailed information
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
static async clearHeartbeatData(detailedLog = false) {
|
||||
let monitors = await R.getAll("SELECT id FROM monitor");
|
||||
const sqlHourOffset = Database.sqlHourOffset();
|
||||
|
||||
for (let monitor of monitors) {
|
||||
if (detailedLog) {
|
||||
log.info("db", "Deleting non-important heartbeats for monitor " + monitor.id);
|
||||
}
|
||||
await R.exec(`
|
||||
DELETE FROM heartbeat
|
||||
WHERE monitor_id = ?
|
||||
AND important = 0
|
||||
AND time < ${sqlHourOffset}
|
||||
AND id NOT IN (
|
||||
SELECT id
|
||||
FROM heartbeat
|
||||
WHERE monitor_id = ?
|
||||
ORDER BY time DESC
|
||||
LIMIT ?
|
||||
)
|
||||
`, [
|
||||
monitor.id,
|
||||
-24,
|
||||
monitor.id,
|
||||
100,
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
module.exports = Database;
|
||||
|
@@ -1,22 +1,21 @@
|
||||
const { R } = require("redbean-node");
|
||||
const { log } = require("../../src/util");
|
||||
const { setSetting, setting } = require("../util-server");
|
||||
const Database = require("../database");
|
||||
const { Settings } = require("../settings");
|
||||
const dayjs = require("dayjs");
|
||||
|
||||
const DEFAULT_KEEP_PERIOD = 365;
|
||||
const DEFAULT_KEEP_PERIOD = 180;
|
||||
|
||||
/**
|
||||
* Clears old data from the heartbeat table and the stat_daily of the database.
|
||||
* Clears old data from the heartbeat table of the database.
|
||||
* @returns {Promise<void>} A promise that resolves when the data has been cleared.
|
||||
*/
|
||||
|
||||
const clearOldData = async () => {
|
||||
await Database.clearHeartbeatData();
|
||||
let period = await Settings.get("keepDataPeriodDays");
|
||||
let period = await setting("keepDataPeriodDays");
|
||||
|
||||
// Set Default Period
|
||||
if (period == null) {
|
||||
await Settings.set("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
|
||||
await setSetting("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
|
||||
period = DEFAULT_KEEP_PERIOD;
|
||||
}
|
||||
|
||||
@@ -26,28 +25,23 @@ const clearOldData = async () => {
|
||||
parsedPeriod = parseInt(period);
|
||||
} catch (_) {
|
||||
log.warn("clearOldData", "Failed to parse setting, resetting to default..");
|
||||
await Settings.set("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
|
||||
await setSetting("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
|
||||
parsedPeriod = DEFAULT_KEEP_PERIOD;
|
||||
}
|
||||
|
||||
if (parsedPeriod < 1) {
|
||||
log.info("clearOldData", `Data deletion has been disabled as period is less than 1. Period is ${parsedPeriod} days.`);
|
||||
} else {
|
||||
|
||||
log.debug("clearOldData", `Clearing Data older than ${parsedPeriod} days...`);
|
||||
|
||||
const sqlHourOffset = Database.sqlHourOffset();
|
||||
|
||||
try {
|
||||
// Heartbeat
|
||||
await R.exec("DELETE FROM heartbeat WHERE time < " + sqlHourOffset, [
|
||||
parsedPeriod * -24,
|
||||
]);
|
||||
|
||||
let timestamp = dayjs().subtract(parsedPeriod, "day").utc().startOf("day").unix();
|
||||
|
||||
// stat_daily
|
||||
await R.exec("DELETE FROM stat_daily WHERE timestamp < ? ", [
|
||||
timestamp,
|
||||
]);
|
||||
await R.exec(
|
||||
"DELETE FROM heartbeat WHERE time < " + sqlHourOffset,
|
||||
[ parsedPeriod * -24 ]
|
||||
);
|
||||
|
||||
if (Database.dbConfig.type === "sqlite") {
|
||||
await R.exec("PRAGMA optimize;");
|
||||
@@ -56,8 +50,6 @@ const clearOldData = async () => {
|
||||
log.error("clearOldData", `Failed to clear old data: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("clearOldData", "Data cleared.");
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
|
@@ -1604,20 +1604,18 @@ let needSetup = false;
|
||||
|
||||
await server.start();
|
||||
|
||||
server.httpServer.listen(port, hostname, async () => {
|
||||
server.httpServer.listen(port, hostname, () => {
|
||||
if (hostname) {
|
||||
log.info("server", `Listening on ${hostname}:${port}`);
|
||||
} else {
|
||||
log.info("server", `Listening on ${port}`);
|
||||
}
|
||||
await startMonitors();
|
||||
|
||||
// Put this here. Start background jobs after the db and server is ready to prevent clear up during db migration.
|
||||
await initBackgroundJobs();
|
||||
|
||||
startMonitors();
|
||||
checkVersion.startInterval();
|
||||
});
|
||||
|
||||
await initBackgroundJobs();
|
||||
|
||||
// Start cloudflared at the end if configured
|
||||
await cloudflaredAutoStart(cloudflaredToken);
|
||||
|
||||
@@ -1716,7 +1714,7 @@ async function initDatabase(testMode = false) {
|
||||
log.info("server", "Connected to the database");
|
||||
|
||||
// Patch the database
|
||||
await Database.patch(port, hostname);
|
||||
await Database.patch();
|
||||
|
||||
let jwtSecretBean = await R.findOne("setting", " `key` = ? ", [
|
||||
"jwtSecret",
|
||||
@@ -1811,11 +1809,7 @@ async function startMonitors() {
|
||||
}
|
||||
|
||||
for (let monitor of list) {
|
||||
try {
|
||||
await monitor.start(io);
|
||||
} catch (e) {
|
||||
log.error("monitor", e);
|
||||
}
|
||||
await monitor.start(io);
|
||||
// Give some delays, so all monitors won't make request at the same moment when just start the server.
|
||||
await sleep(getRandomInt(300, 1000));
|
||||
}
|
||||
|
@@ -12,6 +12,7 @@ class UptimeCalculator {
|
||||
* @private
|
||||
* @type {{string:UptimeCalculator}}
|
||||
*/
|
||||
|
||||
static list = {};
|
||||
|
||||
/**
|
||||
@@ -54,15 +55,6 @@ class UptimeCalculator {
|
||||
lastHourlyStatBean = null;
|
||||
lastMinutelyStatBean = null;
|
||||
|
||||
/**
|
||||
* For migration purposes.
|
||||
* @type {boolean}
|
||||
*/
|
||||
migrationMode = false;
|
||||
|
||||
statMinutelyKeepHour = 24;
|
||||
statHourlyKeepDay = 30;
|
||||
|
||||
/**
|
||||
* Get the uptime calculator for a monitor
|
||||
* Initializes and returns the monitor if it does not exist
|
||||
@@ -197,19 +189,16 @@ class UptimeCalculator {
|
||||
/**
|
||||
* @param {number} status status
|
||||
* @param {number} ping Ping
|
||||
* @param {dayjs.Dayjs} date Date (Only for migration)
|
||||
* @returns {dayjs.Dayjs} date
|
||||
* @throws {Error} Invalid status
|
||||
*/
|
||||
async update(status, ping = 0, date) {
|
||||
if (!date) {
|
||||
date = this.getCurrentDate();
|
||||
}
|
||||
async update(status, ping = 0) {
|
||||
let date = this.getCurrentDate();
|
||||
|
||||
let flatStatus = this.flatStatus(status);
|
||||
|
||||
if (flatStatus === DOWN && ping > 0) {
|
||||
log.debug("uptime-calc", "The ping is not effective when the status is DOWN");
|
||||
log.warn("uptime-calc", "The ping is not effective when the status is DOWN");
|
||||
}
|
||||
|
||||
let divisionKey = this.getMinutelyKey(date);
|
||||
@@ -308,61 +297,47 @@ class UptimeCalculator {
|
||||
}
|
||||
await R.store(dailyStatBean);
|
||||
|
||||
let currentDate = this.getCurrentDate();
|
||||
|
||||
// For migration mode, we don't need to store old hourly and minutely data, but we need 30-day's hourly data
|
||||
// Run anyway for non-migration mode
|
||||
if (!this.migrationMode || date.isAfter(currentDate.subtract(this.statHourlyKeepDay, "day"))) {
|
||||
let hourlyStatBean = await this.getHourlyStatBean(hourlyKey);
|
||||
hourlyStatBean.up = hourlyData.up;
|
||||
hourlyStatBean.down = hourlyData.down;
|
||||
hourlyStatBean.ping = hourlyData.avgPing;
|
||||
hourlyStatBean.pingMin = hourlyData.minPing;
|
||||
hourlyStatBean.pingMax = hourlyData.maxPing;
|
||||
{
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const { up, down, avgPing, minPing, maxPing, timestamp, ...extras } = hourlyData;
|
||||
if (Object.keys(extras).length > 0) {
|
||||
hourlyStatBean.extras = JSON.stringify(extras);
|
||||
}
|
||||
let hourlyStatBean = await this.getHourlyStatBean(hourlyKey);
|
||||
hourlyStatBean.up = hourlyData.up;
|
||||
hourlyStatBean.down = hourlyData.down;
|
||||
hourlyStatBean.ping = hourlyData.avgPing;
|
||||
hourlyStatBean.pingMin = hourlyData.minPing;
|
||||
hourlyStatBean.pingMax = hourlyData.maxPing;
|
||||
{
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const { up, down, avgPing, minPing, maxPing, timestamp, ...extras } = hourlyData;
|
||||
if (Object.keys(extras).length > 0) {
|
||||
hourlyStatBean.extras = JSON.stringify(extras);
|
||||
}
|
||||
await R.store(hourlyStatBean);
|
||||
}
|
||||
await R.store(hourlyStatBean);
|
||||
|
||||
// For migration mode, we don't need to store old hourly and minutely data, but we need 24-hour's minutely data
|
||||
// Run anyway for non-migration mode
|
||||
if (!this.migrationMode || date.isAfter(currentDate.subtract(this.statMinutelyKeepHour, "hour"))) {
|
||||
let minutelyStatBean = await this.getMinutelyStatBean(divisionKey);
|
||||
minutelyStatBean.up = minutelyData.up;
|
||||
minutelyStatBean.down = minutelyData.down;
|
||||
minutelyStatBean.ping = minutelyData.avgPing;
|
||||
minutelyStatBean.pingMin = minutelyData.minPing;
|
||||
minutelyStatBean.pingMax = minutelyData.maxPing;
|
||||
{
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const { up, down, avgPing, minPing, maxPing, timestamp, ...extras } = minutelyData;
|
||||
if (Object.keys(extras).length > 0) {
|
||||
minutelyStatBean.extras = JSON.stringify(extras);
|
||||
}
|
||||
let minutelyStatBean = await this.getMinutelyStatBean(divisionKey);
|
||||
minutelyStatBean.up = minutelyData.up;
|
||||
minutelyStatBean.down = minutelyData.down;
|
||||
minutelyStatBean.ping = minutelyData.avgPing;
|
||||
minutelyStatBean.pingMin = minutelyData.minPing;
|
||||
minutelyStatBean.pingMax = minutelyData.maxPing;
|
||||
{
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const { up, down, avgPing, minPing, maxPing, timestamp, ...extras } = minutelyData;
|
||||
if (Object.keys(extras).length > 0) {
|
||||
minutelyStatBean.extras = JSON.stringify(extras);
|
||||
}
|
||||
await R.store(minutelyStatBean);
|
||||
}
|
||||
await R.store(minutelyStatBean);
|
||||
|
||||
// No need to remove old data in migration mode
|
||||
if (!this.migrationMode) {
|
||||
// Remove the old data
|
||||
// TODO: Improvement: Convert it to a job?
|
||||
log.debug("uptime-calc", "Remove old data");
|
||||
await R.exec("DELETE FROM stat_minutely WHERE monitor_id = ? AND timestamp < ?", [
|
||||
this.monitorID,
|
||||
this.getMinutelyKey(currentDate.subtract(this.statMinutelyKeepHour, "hour")),
|
||||
]);
|
||||
// Remove the old data
|
||||
log.debug("uptime-calc", "Remove old data");
|
||||
await R.exec("DELETE FROM stat_minutely WHERE monitor_id = ? AND timestamp < ?", [
|
||||
this.monitorID,
|
||||
this.getMinutelyKey(date.subtract(24, "hour")),
|
||||
]);
|
||||
|
||||
await R.exec("DELETE FROM stat_hourly WHERE monitor_id = ? AND timestamp < ?", [
|
||||
this.monitorID,
|
||||
this.getHourlyKey(currentDate.subtract(this.statHourlyKeepDay, "day")),
|
||||
]);
|
||||
}
|
||||
await R.exec("DELETE FROM stat_hourly WHERE monitor_id = ? AND timestamp < ?", [
|
||||
this.monitorID,
|
||||
this.getHourlyKey(date.subtract(30, "day")),
|
||||
]);
|
||||
|
||||
return date;
|
||||
}
|
||||
@@ -837,14 +812,6 @@ class UptimeCalculator {
|
||||
return dayjs.utc();
|
||||
}
|
||||
|
||||
/**
|
||||
* For migration purposes.
|
||||
* @param {boolean} value Migration mode on/off
|
||||
* @returns {void}
|
||||
*/
|
||||
setMigrationMode(value) {
|
||||
this.migrationMode = value;
|
||||
}
|
||||
}
|
||||
|
||||
class UptimeDataResult {
|
||||
|
@@ -1,22 +0,0 @@
|
||||
const ColumnCompilerMySQL = require("knex/lib/dialects/mysql/schema/mysql-columncompiler");
|
||||
const { formatDefault } = require("knex/lib/formatter/formatterUtils");
|
||||
const { log } = require("../../../../../../../src/util");
|
||||
|
||||
class KumaColumnCompiler extends ColumnCompilerMySQL {
|
||||
/**
|
||||
* Override defaultTo method to handle default value for TEXT fields
|
||||
* @param {any} value Value
|
||||
* @returns {string|void} Default value (Don't understand why it can return void or string, but it's the original code, lol)
|
||||
*/
|
||||
defaultTo(value) {
|
||||
if (this.type === "text" && typeof value === "string") {
|
||||
log.debug("defaultTo", `${this.args[0]}: ${this.type} ${value} ${typeof value}`);
|
||||
// MySQL 8.0 is required and only if the value is written as an expression: https://dev.mysql.com/doc/refman/8.0/en/data-type-defaults.html
|
||||
// MariaDB 10.2 is required: https://mariadb.com/kb/en/text/
|
||||
return `default (${formatDefault(value, this.type, this.client)})`;
|
||||
}
|
||||
return super.defaultTo.apply(this, arguments);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = KumaColumnCompiler;
|
@@ -1,84 +0,0 @@
|
||||
const express = require("express");
|
||||
const http = require("node:http");
|
||||
const { log } = require("../../src/util");
|
||||
|
||||
/**
|
||||
* SimpleMigrationServer
|
||||
* For displaying the migration status of the server
|
||||
* Also, it is used to let Docker healthcheck know the status of the server, as the main server is not started yet, healthcheck will think the server is down incorrectly.
|
||||
*/
|
||||
class SimpleMigrationServer {
|
||||
/**
|
||||
* Express app instance
|
||||
* @type {?Express}
|
||||
*/
|
||||
app;
|
||||
|
||||
/**
|
||||
* Server instance
|
||||
* @type {?Server}
|
||||
*/
|
||||
server;
|
||||
|
||||
/**
|
||||
* Response object
|
||||
* @type {?Response}
|
||||
*/
|
||||
response;
|
||||
|
||||
/**
|
||||
* Start the server
|
||||
* @param {number} port Port
|
||||
* @param {string} hostname Hostname
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
start(port, hostname) {
|
||||
this.app = express();
|
||||
this.server = http.createServer(this.app);
|
||||
|
||||
this.app.get("/", (req, res) => {
|
||||
res.set("Content-Type", "text/plain");
|
||||
res.write("Migration is in progress, listening message...\n");
|
||||
if (this.response) {
|
||||
this.response.write("Disconnected\n");
|
||||
this.response.end();
|
||||
}
|
||||
this.response = res;
|
||||
// never ending response
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
this.server.listen(port, hostname, () => {
|
||||
if (hostname) {
|
||||
log.info("migration", `Migration server is running on http://${hostname}:${port}`);
|
||||
} else {
|
||||
log.info("migration", `Migration server is running on http://localhost:${port}`);
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the message
|
||||
* @param {string} msg Message to update
|
||||
* @returns {void}
|
||||
*/
|
||||
update(msg) {
|
||||
this.response?.write(msg + "\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the server
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async stop() {
|
||||
this.response?.write("Finished, please refresh this page.\n");
|
||||
this.response?.end();
|
||||
await this.server?.close();
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
SimpleMigrationServer,
|
||||
};
|
Reference in New Issue
Block a user