Compare commits

..

6 Commits

Author SHA1 Message Date
Louis Lam
49940a9dad Add jsonschema 2023-10-11 03:18:29 +08:00
Louis Lam
5fa2fcb0d9 Move port and hostname to the server object 2023-10-10 03:15:10 +08:00
Louis Lam
d4f9acee6a Add api-spec.json5 2023-10-08 08:17:03 +08:00
Louis Lam
6d2f624242 Update auth and skeleton of wrapper 2023-10-08 05:33:08 +08:00
Louis Lam
5773eeb6df Rename apiAuth to basicAuthMiddleware and it accepts API keys only 2023-10-08 02:24:18 +08:00
Louis Lam
8dfe6c6ea9 Bump socket.io from 4.6.X to 4.7.X and match the ws version with socket.io 2023-10-07 21:12:55 +08:00
371 changed files with 17627 additions and 33692 deletions

28
.devcontainer/README.md Normal file
View File

@@ -0,0 +1,28 @@
# Codespaces
You can modifiy Uptime Kuma in your browser without setting up a local development.
![image](https://github.com/louislam/uptime-kuma/assets/1336778/31d9f06d-dd0b-4405-8e0d-a96586ee4595)
1. Click `Code` -> `Create codespace on master`
2. Wait a few minutes until you see there are two exposed ports
3. Go to the `3000` url, see if it is working
![image](https://github.com/louislam/uptime-kuma/assets/1336778/909b2eb4-4c5e-44e4-ac26-6d20ed856e7f)
## Frontend
Since the frontend is using [Vite.js](https://vitejs.dev/), all changes in this area will be hot-reloaded.
You don't need to restart the frontend, unless you try to add a new frontend dependency.
## Backend
The backend does not automatically hot-reload.
You will need to restart the backend after changing something using these steps:
1. Click `Terminal`
2. Click `Codespaces: server-dev` in the right panel
3. Press `Ctrl + C` to stop the server
4. Press `Up` to run `npm run start-server-dev`
![image](https://github.com/louislam/uptime-kuma/assets/1336778/e0c0a350-fe46-4588-9f37-e053c85834d1)

View File

@@ -0,0 +1,23 @@
{
"image": "mcr.microsoft.com/devcontainers/javascript-node:dev-18-bookworm",
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
},
"updateContentCommand": "npm ci",
"postCreateCommand": "",
"postAttachCommand": {
"frontend-dev": "npm run start-frontend-devcontainer",
"server-dev": "npm run start-server-dev",
"open-port": "gh codespace ports visibility 3001:public -c $CODESPACE_NAME"
},
"customizations": {
"vscode": {
"extensions": [
"streetsidesoftware.code-spell-checker",
"dbaeumer.vscode-eslint",
"GitHub.copilot-chat"
]
}
},
"forwardPorts": [3000, 3001]
}

View File

@@ -1,6 +1,7 @@
/.idea
/node_modules
/data*
/cypress
/out
/test
/kubernetes
@@ -17,6 +18,7 @@ README.md
.vscode
.eslint*
.stylelint*
/.devcontainer
/.github
yarn.lock
app.json
@@ -28,6 +30,7 @@ SECURITY.md
tsconfig.json
.env
/tmp
/babel.config.js
/ecosystem.config.js
/extra/healthcheck.exe
/extra/healthcheck
@@ -35,10 +38,6 @@ tsconfig.json
/extra/push-examples
/extra/uptime-kuma-push
# Comment the following line if you want to rebuild the healthcheck binary
/extra/healthcheck-armv7
### .gitignore content (commented rules are duplicated)
#node_modules

View File

@@ -1,7 +1,8 @@
module.exports = {
ignorePatterns: [
"test/*.js",
"server/modules/*",
"test/cypress",
"server/modules/apicache/*",
"src/util.js"
],
root: true,
@@ -18,13 +19,12 @@ module.exports = {
],
parser: "vue-eslint-parser",
parserOptions: {
parser: "@typescript-eslint/parser",
parser: "@babel/eslint-parser",
sourceType: "module",
requireConfigFile: false,
},
plugins: [
"jsdoc",
"@typescript-eslint",
"jsdoc"
],
rules: {
"yoda": "error",
@@ -83,7 +83,7 @@ module.exports = {
"checkLoops": false,
}],
"space-before-blocks": "warn",
//"no-console": "warn",
//'no-console': 'warn',
"no-extra-boolean-cast": "off",
"no-multiple-empty-lines": [ "warn", {
"max": 1,
@@ -95,8 +95,7 @@ module.exports = {
"no-unneeded-ternary": "error",
"array-bracket-newline": [ "error", "consistent" ],
"eol-last": [ "error", "always" ],
//"prefer-template": "error",
"template-curly-spacing": [ "warn", "never" ],
//'prefer-template': 'error',
"comma-dangle": [ "warn", "only-multiline" ],
"no-empty": [ "error", {
"allowEmptyCatch": true
@@ -149,20 +148,21 @@ module.exports = {
}
},
// Override for TypeScript
// Override for jest puppeteer
{
"files": [
"**/*.ts",
"**/*.spec.js",
"**/*.spec.jsx"
],
extends: [
"plugin:@typescript-eslint/recommended",
],
"rules": {
"jsdoc/require-returns-type": "off",
"jsdoc/require-param-type": "off",
"@typescript-eslint/no-explicit-any": "off",
"prefer-const": "off",
}
env: {
jest: true,
},
globals: {
page: true,
browser: true,
context: true,
jestPuppeteer: true,
},
}
]
};

View File

@@ -6,7 +6,7 @@ body:
- type: checkboxes
id: no-duplicate-issues
attributes:
label: "⚠️ Please verify that this question has NOT been raised before."
label: "⚠️ Please verify that this bug has NOT been raised before."
description: "Search in the issues sections by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=)"
options:
- label: "I checked and didn't find similar issue"
@@ -24,7 +24,7 @@ body:
required: true
attributes:
label: "📝 Describe your problem"
description: "Please walk us through it step by step. Include all important details and add screenshots where appropriate"
description: "Please walk us through it step by step."
placeholder: "Describe what are you asking for..."
- type: textarea
id: error-msg
@@ -56,20 +56,19 @@ body:
placeholder: "Ex. Google Chrome 95.0.4638.69"
validations:
required: true
- type: textarea
id: deployment-info
- type: input
id: docker-version
attributes:
label: "🖥️ Deployment Environment"
description: |
examples:
- **Runtime**: Docker 20.10.9 / nodejs 14.18.0 / K8S via ... v1.3.3 / ..
- **Database**: sqlite/embedded mariadb/external mariadb
- **Filesystem used to store the database on**: Windows/ZFS/btrfs/NFSv3 on a SSD/HDD/eMMC
- **number of monitors**: 42
value: |
- Runtime:
- Database:
- Filesystem used to store the database on:
- number of monitors:
label: "🐋 Docker Version"
description: "If running with Docker, which version are you running?"
placeholder: "Ex. Docker 20.10.9 / K8S / Podman"
validations:
required: true
required: false
- type: input
id: nodejs-version
attributes:
label: "🟩 NodeJS Version"
description: "If running with Node.js? which version are you running?"
placeholder: "Ex. 14.18.0"
validations:
required: false

View File

@@ -3,14 +3,14 @@ description: "Submit a bug report to help us improve"
#title: "[Bug] "
labels: [bug]
body:
- type: textarea
id: related-issues
validations:
required: true
- type: checkboxes
id: no-duplicate-issues
attributes:
label: "📑 I have found these related issues/pull requests"
description: "Search related issues by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=) and explain what the difference between them or explain that you are unable to find any related issues"
placeholder: "Related to #1 by also touching the ... system. They should not be merged because ..."
label: "⚠️ Please verify that this bug has NOT been raised before."
description: "Search in the issues sections by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=)"
options:
- label: "I checked and didn't find similar issue"
required: true
- type: checkboxes
attributes:
label: "🛡️ Security Policy"
@@ -31,7 +31,7 @@ body:
required: true
attributes:
label: "👟 Reproduction steps"
description: "How do you trigger this bug? Please walk us through it step by step. Include all important details and add screenshots where appropriate"
description: "How do you trigger this bug? Please walk us through it step by step."
placeholder: "..."
- type: textarea
id: expected-behavior
@@ -73,23 +73,22 @@ body:
placeholder: "Ex. Google Chrome 95.0.4638.69"
validations:
required: true
- type: textarea
id: deployment-info
- type: input
id: docker-version
attributes:
label: "🖥️ Deployment Environment"
description: |
examples:
- **Runtime**: Docker 20.10.9 / nodejs 18.17.1 / K8S via ... v1.3.3 / ..
- **Database**: sqlite/embedded mariadb/external mariadb
- **Filesystem used to store the database on**: Windows/ZFS/btrfs/NFSv3 on a SSD/HDD/eMMC
- **number of monitors**: 42
value: |
- Runtime:
- Database:
- Filesystem used to store the database on:
- number of monitors:
label: "🐋 Docker Version"
description: "If running with Docker, which version are you running?"
placeholder: "Ex. Docker 20.10.9 / K8S / Podman"
validations:
required: true
required: false
- type: input
id: nodejs-version
attributes:
label: "🟩 NodeJS Version"
description: "If running with Node.js? which version are you running?"
placeholder: "Ex. 14.18.0"
validations:
required: false
- type: textarea
id: logs
attributes:

View File

@@ -3,14 +3,14 @@ description: "Submit a proposal for a new feature"
#title: "[Feature] "
labels: [feature-request]
body:
- type: textarea
id: related-issues
validations:
required: true
- type: checkboxes
id: no-duplicate-issues
attributes:
label: "📑 I have found these related issues/pull requests"
description: "Search related issues by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=) and explain what the difference between them or explain that you are unable to find any related issues"
placeholder: "Related to #1 by also touching the ... system. They should not be merged because ..."
label: "⚠️ Please verify that this feature request has NOT been suggested before."
description: "Search in the issues sections by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=)"
options:
- label: "I checked and didn't find similar feature request"
required: true
- type: dropdown
id: feature-area
attributes:
@@ -18,17 +18,10 @@ body:
description: "What kind of feature request is this?"
multiple: true
options:
- API / automation options
- New notification-provider
- Change to existing notification-provider
- New monitor
- Change to existing monitor
- Dashboard
- Status-page
- Maintenance
- Deployment
- Certificate expiry
- Settings
- API
- New Notification
- New Monitor
- UI Feature
- Other
validations:
required: true

View File

@@ -15,7 +15,7 @@ Please delete any options that are not relevant.
- Bug fix (non-breaking change which fixes an issue)
- User interface (UI)
- New feature (non-breaking change which adds functionality)
- Breaking change (a fix or feature that would cause existing functionality to not work as expected)
- Breaking change (fix or feature that would cause existing functionality to not work as expected)
- Other
- This change requires a documentation update
@@ -24,8 +24,9 @@ Please delete any options that are not relevant.
- [ ] My code follows the style guidelines of this project
- [ ] I ran ESLint and other linters for modified files
- [ ] I have performed a self-review of my own code and tested it
- [ ] I have commented my code, particularly in hard-to-understand areas (including JSDoc for methods)
- [ ] My changes generates no new warnings
- [ ] I have commented my code, particularly in hard-to-understand areas
(including JSDoc for methods)
- [ ] My changes generate no new warnings
- [ ] My code needed automated testing. I have added them (this is optional task)
## Screenshots (if any)

View File

@@ -5,11 +5,11 @@ name: Auto Test
on:
push:
branches: [ master, 1.23.X ]
branches: [ master ]
paths-ignore:
- '*.md'
pull_request:
branches: [ master, 1.23.X ]
branches: [ master, 2.0.X ]
paths-ignore:
- '*.md'
@@ -22,44 +22,46 @@ jobs:
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest, ARM64]
node: [ 18, 20 ]
node: [ 14, 20.5 ]
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node }}
- run: npm install npm@9 -g
- run: npm install
- run: npm run build
- run: npm run test-backend
- run: npm test
env:
HEADLESS_TEST: 1
JUST_FOR_TEST: ${{ secrets.JUST_FOR_TEST }}
# As a lot of dev dependencies are not supported on ARMv7, we have to test it separately and just test if `npm ci --production` works
armv7-simple-test:
needs: [ ]
needs: [ check-linters ]
runs-on: ${{ matrix.os }}
timeout-minutes: 15
if: ${{ github.repository == 'louislam/uptime-kuma' }}
strategy:
matrix:
os: [ ARMv7 ]
node: [ 18, 20 ]
node: [ 14, 20 ]
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node }}
- run: npm install npm@9 -g
- run: npm ci --production
check-linters:
@@ -67,27 +69,42 @@ jobs:
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Use Node.js 20
uses: actions/setup-node@v4
uses: actions/setup-node@v3
with:
node-version: 20
- run: npm install
- run: npm run lint:prod
- run: npm run lint
e2e-test:
needs: [ ]
runs-on: ARM64
# TODO: Temporarily disable, as it cannot pass the test in 2.0.0 yet
# e2e-tests:
# needs: [ check-linters ]
# runs-on: ubuntu-latest
# steps:
# - run: git config --global core.autocrlf false # Mainly for Windows
# - uses: actions/checkout@v3
#
# - name: Use Node.js 14
# uses: actions/setup-node@v3
# with:
# node-version: 14
# - run: npm install
# - run: npm run build
# - run: npm run cy:test
frontend-unit-tests:
needs: [ check-linters ]
runs-on: ubuntu-latest
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Use Node.js 20
uses: actions/setup-node@v4
- name: Use Node.js 14
uses: actions/setup-node@v3
with:
node-version: 20
node-version: 14
- run: npm install
- run: npx playwright install
- run: npm run build
- run: npm run test-e2e
- run: npm run cy:run:unit

View File

@@ -11,13 +11,13 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
node-version: [18]
node-version: [16]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'

View File

@@ -1,43 +0,0 @@
name: "CodeQL"
on:
push:
branches: [ "master", "1.23.X"]
pull_request:
branches: [ "master", "1.23.X"]
schedule:
- cron: '16 22 * * 0'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
timeout-minutes: 360
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript-typescript' ]
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

View File

@@ -1,25 +0,0 @@
name: Merge Conflict Labeler
on:
push:
branches:
- master
pull_request_target:
branches:
- master
types: [synchronize]
jobs:
label:
name: Labeling
runs-on: ubuntu-latest
if: ${{ github.repository == 'louislam/uptime-kuma' }}
permissions:
contents: read
pull-requests: write
steps:
- name: Apply label
uses: eps1lon/actions-label-merge-conflict@v3
with:
dirtyLabel: 'needs:resolve-merge-conflict'
repoToken: '${{ secrets.GITHUB_TOKEN }}'

View File

@@ -0,0 +1,27 @@
name: json-yaml-validate
on:
push:
branches:
- master
pull_request:
branches:
- master
- 2.0.X
workflow_dispatch:
permissions:
contents: read
pull-requests: write # enable write permissions for pull request comments
jobs:
json-yaml-validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: json-yaml-validate
id: json-yaml-validate
uses: GrantBirki/json-yaml-validate@v1.3.0
with:
comment: "true" # enable comment mode
exclude_file: ".github/config/exclude.txt" # gitignore style file for exclusions

View File

@@ -1,4 +1,4 @@
name: 'Automatically close stale issues'
name: 'Automatically close stale issues and PRs'
on:
workflow_dispatch:
schedule:
@@ -9,34 +9,14 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
- uses: actions/stale@v7
with:
stale-issue-message: |-
We are clearing up our old `help`-issues and your issue has been open for 60 days with no activity.
If no comment is made and the stale label is not removed, this issue will be closed in 7 days.
days-before-stale: 60
days-before-close: 7
days-before-pr-stale: -1
days-before-pr-close: -1
stale-issue-message: 'We are clearing up our old issues and your ticket has been open for 3 months with no activity. Remove stale label or comment or this will be closed in 2 days.'
close-issue-message: 'This issue was closed because it has been stalled for 2 days with no activity.'
days-before-stale: 90
days-before-close: 2
days-before-pr-stale: 999999999
days-before-pr-close: 1
exempt-issue-labels: 'News,Medium,High,discussion,bug,doc,feature-request'
exempt-issue-assignees: 'louislam'
operations-per-run: 200
- uses: actions/stale@v9
with:
stale-issue-message: |-
This issue was marked as `cannot-reproduce` by a maintainer.
If an issue is non-reproducible, we cannot fix it, as we do not know what the underlying issue is.
If you have any ideas how we can reproduce this issue, we would love to hear them.
We don't have a good way to deal with truely unreproducible issues and are going to close this issue in a month.
If think there might be other differences in our environment or in how we tried to reproduce this, we would appreciate any ideas.
close-issue-message: |-
This issue will be closed as no way to reproduce it has been found.
If you/somebody finds a way how to (semi-reliably) reproduce this, we can reopen this issue. ^^
days-before-stale: 180
days-before-close: 30
days-before-pr-stale: -1
days-before-pr-close: -1
any-of-issue-labels: 'cannot-reproduce'
operations-per-run: 200

View File

@@ -1,43 +0,0 @@
name: validate
on:
push:
branches:
- master
pull_request:
branches:
- master
- 1.23.X
workflow_dispatch:
permissions:
contents: read
pull-requests: write # enable write permissions for pull request comments
jobs:
json-yaml-validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: json-yaml-validate
id: json-yaml-validate
uses: GrantBirki/json-yaml-validate@v2.4.0
with:
comment: "true" # enable comment mode
exclude_file: ".github/config/exclude.txt" # gitignore style file for exclusions
# General validations
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Use Node.js 20
uses: actions/setup-node@v4
with:
node-version: 20
- name: Validate language JSON files
run: node ./extra/check-lang-json.js
- name: Validate knex migrations filename
run: node ./extra/check-knex-filenames.mjs

3
.gitignore vendored
View File

@@ -15,6 +15,9 @@ dist-ssr
/tmp
.env
cypress/videos
cypress/screenshots
/extra/healthcheck.exe
/extra/healthcheck
/extra/healthcheck-armv7

View File

@@ -1,216 +1,94 @@
# Project Info
First of all, I want to thank everyone who has submitted issues or shared pull requests for Uptime Kuma.
I never thought the GitHub community would be so nice!
Because of this, I also never thought that other people would actually read and edit my code.
Parts of the code are not very well-structured or commented, sorry about that.
First of all, I want to thank everyone who made pull requests for Uptime Kuma. I never thought the GitHub Community would be so nice! Because of this, I also never thought that other people would actually read and edit my code. It is not very well structured or commented, sorry about that.
The project was created with `vite.js` and is written in `vue3`.
Our backend lives in the `server`-directory and mostly communicates via websockets.
Both frontend and backend share the same `package.json`.
The project was created with vite.js (vue3). Then I created a subdirectory called "server" for the server part. Both frontend and backend share the same package.json.
For production, the frontend is built into the `dist`-directory and the server (`express.js`) exposes the `dist` directory as the root of the endpoint.
For development, we run vite in development mode on another port.
The frontend code builds into "dist" directory. The server (express.js) exposes the "dist" directory as the root of the endpoint. This is how production is working.
## Key Technical Skills
- Node.js (You should know about promise, async/await and arrow function etc.)
- Socket.io
- SCSS
- Vue.js
- Bootstrap
- SQLite
## Directories
- `config` (dev config files)
- `data` (App data)
- `db` (Base database and migration scripts)
- `dist` (Frontend build)
- `docker` (Dockerfiles)
- `extra` (Extra useful scripts)
- `public` (Frontend resources for dev only)
- `server` (Server source code)
- `src` (Frontend source code)
- `test` (unit test)
- config (dev config files)
- data (App data)
- db (Base database and migration scripts)
- dist (Frontend build)
- docker (Dockerfiles)
- extra (Extra useful scripts)
- public (Frontend resources for dev only)
- server (Server source code)
- src (Frontend source code)
- test (unit test)
## Can I create a pull request for Uptime Kuma?
Yes or no, it depends on what you will try to do.
Both yours and our maintainers' time is precious, and we don't want to waste either.
Yes or no, it depends on what you will try to do. Since I don't want to waste your time, be sure to **create an empty draft pull request or open an issue, so we can have a discussion first**. Especially for a large pull request or you don't know if it will be merged or not.
If you have any questions about any process/.. is not clear, you are likely not alone => please ask them ^^
Here are some references:
Different guidelines exist for different types of pull requests (PRs):
- <details><summary><b>security fixes</b></summary>
<p>
Submitting security fixes is something that may put the community at risk.
Please read through our [security policy](SECURITY.md) and submit vulnerabilities via an [advisory](https://github.com/louislam/uptime-kuma/security/advisories/new) + [issue](https://github.com/louislam/uptime-kuma/issues/new?assignees=&labels=help&template=security.md) instead.
We encourage you to submit how to fix a vulnerability if you know how to, this is not required.
Following the security policy allows us to properly test, fix bugs.
This review allows us to notice, if there are any changes necessary to unrelated parts like the documentation.
[**PLEASE SEE OUR SECURITY POLICY.**](SECURITY.md)
</p>
</details>
- <details><summary><b>small, non-breaking bug fixes</b></summary>
<p>
If you come across a bug and think you can solve, we appreciate your work.
Please make sure that you follow these rules:
- keep the PR as small as possible, fix only one thing at a time => keeping it reviewable
- test that your code does what you claim it does.
<sub>Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>translations / internationalisation (i18n)</b></summary>
<p>
We use weblate to localise this project into many languages.
If you are unhappy with a translation this is the best start.
On how to translate using weblate, please see [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md).
There are two cases in which a change cannot be done in weblate and requires a PR:
- A text may not be currently localisable. In this case, **adding a new language key** via `$t("languageKey")` might be nessesary
- language keys need to be **added to `en.json`** to be visible in weblate. If this has not happened, a PR is appreciated.
- **Adding a new language** requires a new file see [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md)
<sub>Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>new notification providers</b></summary>
<p>
To set up a new notification provider these files need to be modified/created:
- `server/notification-providers/PROVIDER_NAME.js` is where the heart of the notification provider lives.
- Both `monitorJSON` and `heartbeatJSON` can be `null` for some events.
If both are `null`, this is a general testing message, but if just `heartbeatJSON` is `null` this is a certificate expiry.
- Please wrap the axios call into a
```js
try {
let result = await axios.post(...);
if (result.status === ...) ...
} catch (error) {
this.throwGeneralAxiosError(error);
}
```
- `server/notification.js` is where the backend of the notification provider needs to be registered.
*If you have an idea how we can skip this step, we would love to hear about it ^^*
- `src/components/NotificationDialog.vue` you need to decide if the provider is a regional or a global one and add it with a name to the respective list
- `src/components/notifications/PROVIDER_NAME.vue` is where the frontend of each provider lives.
Please make sure that you have:
- used `HiddenInput` for secret credentials
- included all the necessary helptexts/placeholder/.. to make sure the notification provider is simple to setup for new users.
- include all translations (`{{ $t("Translation key") }}`, [`i18n-t keypath="Translation key">`](https://vue-i18n.intlify.dev/guide/advanced/component.html)) in `src/lang/en.json` to enable our translators to translate this
- `src/components/notifications/index.js` is where the frontend of the provider needs to be registered.
*If you have an idea how we can skip this step, we would love to hear about it ^^*
### ✅ Usually accepted
Offering notifications is close to the core of what we are as an uptime monitor.
Therefore, making sure that they work is also really important.
Because testing notification providers is quite time intensive, we mostly offload this onto the person contributing a notification provider.
To make sure you have tested the notification provider, please include screenshots of the following events in the pull-request description:
- `UP`/`DOWN`
- Certificate Expiry via https://expired.badssl.com/
- Testing (the test button on the notification provider setup page)
Using the following way to format this is encouraged:
```md
| Event | Before | After |
------------------
| `UP` | paste-image-here | paste-image-here |
| `DOWN` | paste-image-here | paste-image-here |
| Certificate-expiry | paste-image-here | paste-image-here |
| Testing | paste-image-here | paste-image-here |
```
- Bug fix
- Security fix
- Adding notification providers
- Adding new language files (see [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md))
- Adding new language keys: `$t("...")`
<sub>Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>new monitoring types</b></summary>
<p>
### ⚠️ Discussion required
To set up a new notification provider these files need to be modified/created:
- `server/monitor-types/MONITORING_TYPE.js` is the core of each monitor.
the `async check(...)`-function should:
- throw an error for each fault that is detected with an actionable error message
- in the happy-path, you should set `heartbeat.msg` to a successful message and set `heartbeat.status = UP`
- `server/uptime-kuma-server.js` is where the monitoring backend needs to be registered.
*If you have an idea how we can skip this step, we would love to hear about it ^^*
- `src/pages/EditMonitor.vue` is the shared frontend users interact with.
Please make sure that you have:
- used `HiddenInput` for secret credentials
- included all the necessary helptexts/placeholder/.. to make sure the notification provider is simple to setup for new users.
- include all translations (`{{ $t("Translation key") }}`, [`i18n-t keypath="Translation key">`](https://vue-i18n.intlify.dev/guide/advanced/component.html)) in `src/lang/en.json` to enable our translators to translate this
-
- Large pull requests
- New features
### ❌ Won't be merged
<sub>Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>new features/ major changes / breaking bugfixes</b></summary>
<p>
be sure to **create an empty draft pull request or open an issue, so we can have a discussion first**.
This is especially important for a large pull request or when you don't know if it will be merged or not.
<sub>Because of the large impact of this work, only senior maintainers may merge PRs in this area.</sub>
</p>
</details>
- A dedicated PR for translating existing languages (see [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md))
- Do not pass the auto-test
- Any breaking changes
- Duplicated pull requests
- Buggy
- UI/UX is not close to Uptime Kuma
- Modifications or deletions of existing logic without a valid reason.
- Adding functions that is completely out of scope
- Converting existing code into other programming languages
- Unnecessarily large code changes that are hard to review and cause conflicts with other PRs.
The following rules are essential for making your PR mergable:
- Merging multiple issues by a huge PR is more difficult to review and causes conflicts with other PRs. Please
- (if possible) **create one PR for one issue** or
- (if not possible) **explain which issues a PR addresses and why this PR should not be broken apart**
- Make sure your **PR passes our continuous integration**.
PRs will not be merged unless all CI-Checks are green.
- **Breaking changes** (unless for a good reason and discussed beforehand) will not get merged / not get merged quickly.
Such changes require a major version release.
- **Test your code** before submitting a PR.
Buggy PRs will not be merged.
- Make sure the **UI/UX is close to Uptime Kuma**.
- **Think about the maintainability**:
Don't add functionality that is completely **out of scope**.
Keep in mind that we need to be able to maintain the functionality.
- Don't modify or delete existing logic without a valid reason.
- Don't convert existing code into other programming languages for no reason.
The above cases may not cover all possible situations.
I ([@louislam](https://github.com/louislam)) have the final say.
If your pull request does not meet my expectations, I will reject it, no matter how much time you spent on it.
Therefore, it is essential to have a discussion beforehand.
I (@louislam) have the final say. If your pull request does not meet my expectations, I will reject it, no matter how much time you spend on it. Therefore, it is essential to have a discussion beforehand.
I will assign your pull request to a [milestone](https://github.com/louislam/uptime-kuma/milestones), if I plan to review and merge it.
Please don't rush or ask for an ETA.
We have to understand the pull request, make sure it has no breaking changes and stick to the vision of this project, especially for large pull requests.
## I'd like to work on an issue. How do I do that?
We have found that assigning people to issues is management-overhead that we don't need.
A short comment that you want to try your hand at this issue is appreciated to save other devs time.
If you come across any problem during development, feel free to leave a comment with what you are stuck on.
Also, please don't rush or ask for an ETA, because I have to understand the pull request, make sure it is no breaking changes and stick to my vision of this project, especially for large pull requests.
### Recommended Pull Request Guideline
Before diving deep into coding, having a discussion first by creating an empty pull request for discussion is preferred.
The rationale behind this is that we can align the direction and scope of the feature to eliminate any conflicts with existing and planned work, and can help by pointing out any potential pitfalls.
Before deep into coding, discussion first is preferred. Creating an empty pull request for discussion would be recommended.
1. Fork the project
2. Clone your fork repo to local
3. Create a new branch
4. Create an empty commit: `git commit -m "<YOUR TASK NAME>" --allow-empty`
5. Push to your fork repo
6. Prepare a pull request: https://github.com/louislam/uptime-kuma/compare
7. Write a proper description. You can mention @louislam in it, so @louislam will get the notification.
8. Create your pull request as a Draft
9. Wait for the discussion
1. Clone your fork repo to local
1. Create a new branch
1. Create an empty commit
`git commit -m "[empty commit] pull request for <YOUR TASK NAME>" --allow-empty`
1. Push to your fork repo
1. Create a pull request: https://github.com/louislam/uptime-kuma/compare
1. Write a proper description
1. Click "Change to draft"
1. Discussion
## Project Styles
I personally do not like something that requires a lot of configuration before you can finally start the app.
The goal is to make the Uptime Kuma installation as easy as installing a mobile app.
I personally do not like something that requires so many configurations before you can finally start the app. I hope Uptime Kuma installation will be as easy as like installing a mobile app.
- Easy to install for non-Docker users
- no native build dependency is needed (for `x86_64`/`armv7`/`arm64`)
- no extra configuration and
- no extra effort required to get it running
- Single container for Docker users
- no complex docker-compose file
- mapping the volume and exposing the port should be the only requirements
- Easy to install for non-Docker users, no native build dependency is needed (for x86_64/armv7/arm64), no extra config, and no extra effort required to get it running
- Single container for Docker users, no very complex docker-compose file. Just map the volume and expose the port, then good to go
- Settings should be configurable in the frontend. Environment variables are discouraged, unless it is related to startup such as `DATA_DIR`
- Easy to use
- The web UI styling should be consistent and nice
@@ -230,12 +108,18 @@ The goal is to make the Uptime Kuma installation as easy as installing a mobile
## Tools
- [`Node.js`](https://nodejs.org/) >= 18
- [`npm`](https://www.npmjs.com/) >= 9.3
- [`Node.js`](https://nodejs.org/) >= 14
- [`npm`](https://www.npmjs.com/) >= 8.5
- [`git`](https://git-scm.com/)
- IDE that supports [`ESLint`](https://eslint.org/) and EditorConfig (I am using [`IntelliJ IDEA`](https://www.jetbrains.com/idea/))
- A SQLite GUI tool (f.ex. [`SQLite Expert Personal`](https://www.sqliteexpert.com/download.html) or [`DBeaver Community`](https://dbeaver.io/download/))
### GitHub Codespace
If you don't want to setup an local environment, you can now develop on GitHub Codespace, read more:
https://github.com/louislam/uptime-kuma/tree/master/.devcontainer
## Git Branches
- `master`: 2.X.X development. If you want to add a new feature, your pull request should base on this.
@@ -260,7 +144,7 @@ Port `3000` and port `3001` will be used.
npm run dev
```
But sometimes you may want to restart the server without restarting the frontend. In that case, you can run these commands in two terminals:
But sometimes, you would like to restart the server, but not the frontend, you can run these commands in two terminals:
```bash
npm run start-frontend-dev
@@ -271,25 +155,25 @@ npm run start-server-dev
It binds to `0.0.0.0:3001` by default.
The backend is an `express.js` server with `socket.io` integrated.
It uses `socket.io` to communicate with clients, and most server logic is encapsulated in the `socket.io` handlers.
`express.js` is also used to serve:
It is mainly a socket.io app + express.js.
- as an entry point for redirecting to a status page or the dashboard
- the frontend built files (`index.html`, `*.js`, `*.css`, etc.)
- internal APIs of the status page
express.js is used for:
### Structure in `/server/`
- entry point such as redirecting to a status page or the dashboard
- serving the frontend built files (index.html, .js and .css etc.)
- serving internal APIs of the status page
- `jobs/` (Jobs that are running in another process)
- `model/` (Object model, auto-mapping to the database table name)
- `modules/` (Modified 3rd-party modules)
- `monitor_types/` (Monitor Types)
- `notification-providers/` (individual notification logic)
- `routers/` (Express Routers)
- `socket-handler/` (Socket.io Handlers)
- `server.js` (Server entry point)
- `uptime-kuma-server.js` (UptimeKumaServer class, main logic should be here, but some still in `server.js`)
### Structure in /server/
- jobs/ (Jobs that are running in another process)
- model/ (Object model, auto-mapping to the database table name)
- modules/ (Modified 3rd-party modules)
- monitor_types (Monitor Types)
- notification-providers/ (individual notification logic)
- routers/ (Express Routers)
- socket-handler (Socket.io Handlers)
- server.js (Server entry point)
- uptime-kuma-server.js (UptimeKumaServer class, main logic should be here, but some still in `server.js`)
## Frontend Dev Server
@@ -328,15 +212,14 @@ npm test
## Dependencies
Both frontend and backend share the same `package.json`.
However, the frontend dependencies are eventually not used in the production environment, because it is usually also baked into `dist` files. So:
Both frontend and backend share the same package.json. However, the frontend dependencies are eventually not used in the production environment, because it is usually also baked into dist files. So:
- Frontend dependencies = "devDependencies"
- Examples: `vue`, `chart.js`
- Examples: vue, chart.js
- Backend dependencies = "dependencies"
- Examples: `socket.io`, `sqlite3`
- Examples: socket.io, sqlite3
- Development dependencies = "devDependencies"
- Examples: `eslint`, `sass`
- Examples: eslint, sass
### Update Dependencies
@@ -348,9 +231,9 @@ If for security / bug / other reasons, a library must be updated, breaking chang
## Translations
Please add **all** the strings which are translatable to `src/lang/en.json` (if translation keys are omitted, they can not be translated.)
Please add **all** the strings which are translatable to `src/lang/en.json` (If translation keys are omitted, they can not be translated).
**Don't include any other languages in your initial pull request** (even if this is your mother tongue), to avoid merge-conflicts between weblate and `master`.
**Don't include any other languages in your initial Pull-Request** (even if this is your mother tongue), to avoid merge-conflicts between weblate and `master`.
The translations can then (after merging a PR into `master`) be translated by awesome people donating their language skills.
If you want to help by translating Uptime Kuma into your language, please visit the [instructions on how to translate using weblate](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md).
@@ -362,7 +245,7 @@ My mother language is not English and my grammar is not that great.
## Wiki
Since there is no way to make a pull request to the wiki, I have set up another repo to do that.
Since there is no way to make a pull request to wiki's repo, I have set up another repo to do that.
https://github.com/louislam/uptime-kuma-wiki
@@ -407,106 +290,54 @@ https://github.com/louislam/uptime-kuma-wiki
Check the latest issues and pull requests:
https://github.com/louislam/uptime-kuma/issues?q=sort%3Aupdated-desc
### What is a maintainer and what are their roles?
### Release Procedures
This project has multiple maintainers who specialise in different areas.
Currently, there are 3 maintainers:
1. Draft a release note
2. Make sure the repo is cleared
3. If the healthcheck is updated, remember to re-compile it: `npm run build-docker-builder-go`
4. `npm run release-final` with env vars: `VERSION` and `GITHUB_TOKEN`
5. Wait until the `Press any key to continue`
6. `git push`
7. Publish the release note as 1.X.X
8. Press any key to continue
9. Deploy to the demo server: `npm run deploy-demo-server`
| Person | Role | Main Area |
|-------------------|-------------------|------------------|
| `@louislam` | senior maintainer | major features |
| `@chakflying` | junior maintainer | fixing bugs |
| `@commanderstorm` | junior maintainer | issue-management |
Checking:
### Procedures
- Check all tags is fine on https://hub.docker.com/r/louislam/uptime-kuma/tags
- Try the Docker image with tag 1.X.X (Clean install / amd64 / arm64 / armv7)
- Try clean installation with Node.js
We have a few procedures we follow. These are documented here:
- <details><summary>Set up a Docker Builder</summary>
<p>
### Release Beta Procedures
- amd64, armv7 using local.
- arm64 using remote arm64 cpu, as the emulator is too slow and can no longer pass the `npm ci` command.
1. Add the public key to the remote server.
2. Add the remote context. The remote machine must be arm64 and installed Docker CE.
```
docker context create oracle-arm64-jp --docker "host=ssh://root@100.107.174.88"
```
3. Create a new builder.
```
docker buildx create --name kuma-builder --platform linux/amd64,linux/arm/v7
docker buildx use kuma-builder
docker buildx inspect --bootstrap
```
4. Append the remote context to the builder.
```
docker buildx create --append --name kuma-builder --platform linux/arm64 oracle-arm64-jp
```
5. Verify the builder and check if the builder is using `kuma-builder`.
```
docker buildx inspect kuma-builder
docker buildx ls
```
</p>
</details>
- <details><summary>Release</summary>
<p>
1. Draft a release note, check "This is a pre-release"
2. Make sure the repo is cleared
3. `npm run release-beta` with env vars: `VERSION` and `GITHUB_TOKEN`
4. Wait until the `Press any key to continue`
5. Publish the release note as 1.X.X-beta.X
6. Press any key to continue
1. Draft a release note
2. Make sure the repo is cleared
3. If the healthcheck is updated, remember to re-compile it: `npm run build-docker-builder-go`
4. `npm run release-final` with env vars: `VERSION` and `GITHUB_TOKEN`
5. Wait until the `Press any key to continue`
6. `git push`
7. Publish the release note as `1.X.X`
8. Press any key to continue
9. Deploy to the demo server: `npm run deploy-demo-server`
### Release Wiki
These Items need to be checked:
#### Setup Repo
- [ ] Check all tags is fine on https://hub.docker.com/r/louislam/uptime-kuma/tags
- [ ] Try the Docker image with tag 1.X.X (Clean install / amd64 / arm64 / armv7)
- [ ] Try clean installation with Node.js
</p>
</details>
- <details><summary>Release Beta</summary>
<p>
```bash
git clone https://github.com/louislam/uptime-kuma-wiki.git
cd uptime-kuma-wiki
git remote add production https://github.com/louislam/uptime-kuma.wiki.git
```
1. Draft a release note, check `This is a pre-release`
2. Make sure the repo is cleared
3. `npm run release-beta` with env vars: `VERSION` and `GITHUB_TOKEN`
4. Wait until the `Press any key to continue`
5. Publish the release note as `1.X.X-beta.X`
6. Press any key to continue
</p>
</details>
- <details><summary>Release Wiki</summary>
<p>
#### Push to Production Wiki
**Setup Repo**
```bash
git clone https://github.com/louislam/uptime-kuma-wiki.git
cd uptime-kuma-wiki
git remote add production https://github.com/louislam/uptime-kuma.wiki.git
```
**Push to Production Wiki**
```bash
git pull
git push production master
```
</p>
</details>
- <details><summary>Change the base of a pull request such as <code>master</code> to <code>1.23.X</code></summary>
<p>
```bash
git rebase --onto <new parent> <old parent>
```
</p>
</details>
```bash
git pull
git push production master
```
## Useful Commands
Change the base of a pull request such as `master` to `1.23.X`
```bash
git rebase --onto <new parent> <old parent>
```

View File

@@ -6,7 +6,7 @@
Uptime Kuma is an easy-to-use self-hosted monitoring tool.
<a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/stars/louislam/uptime-kuma?style=flat" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/pulls/louislam/uptime-kuma" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/v/louislam/uptime-kuma/latest?label=docker%20image%20ver." /></a> <a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/last-commit/louislam/uptime-kuma" /></a> <a target="_blank" href="https://opencollective.com/uptime-kuma"><img src="https://opencollective.com/uptime-kuma/total/badge.svg?label=Open%20Collective%20Backers&color=brightgreen" /></a>
<a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/stars/louislam/uptime-kuma" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/pulls/louislam/uptime-kuma" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/v/louislam/uptime-kuma/latest?label=docker%20image%20ver." /></a> <a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/last-commit/louislam/uptime-kuma" /></a> <a target="_blank" href="https://opencollective.com/uptime-kuma"><img src="https://opencollective.com/uptime-kuma/total/badge.svg?label=Open%20Collective%20Backers&color=brightgreen" /></a>
[![GitHub Sponsors](https://img.shields.io/github/sponsors/louislam?label=GitHub%20Sponsors)](https://github.com/sponsors/louislam) <a href="https://weblate.kuma.pet/projects/uptime-kuma/uptime-kuma/">
<img src="https://weblate.kuma.pet/widgets/uptime-kuma/-/svg-badge.svg" alt="Translation status" />
</a>
@@ -17,9 +17,9 @@ Uptime Kuma is an easy-to-use self-hosted monitoring tool.
Try it!
Demo Server (Location: Frankfurt - Germany): https://demo.kuma.pet/start-demo
- Tokyo Demo Server: https://demo.uptime.kuma.pet (Sponsored by [Uptime Kuma Sponsors](https://github.com/louislam/uptime-kuma#%EF%B8%8F-sponsors))
It is a temporary live demo, all data will be deleted after 10 minutes. Sponsored by [Uptime Kuma Sponsors](https://github.com/louislam/uptime-kuma#%EF%B8%8F-sponsors).
It is a temporary live demo, all data will be deleted after 10 minutes. Use the one that is closer to you, but I suggest that you should install and try it out for the best demo experience.
## ⭐ Features
@@ -43,17 +43,9 @@ It is a temporary live demo, all data will be deleted after 10 minutes. Sponsore
docker run -d --restart=always -p 3001:3001 -v uptime-kuma:/app/data --name uptime-kuma louislam/uptime-kuma:1
```
Uptime Kuma is now running on <http://0.0.0.0:3001>.
⚠️ Please use a **local volume** only. Other types such as NFS are not supported.
> [!WARNING]
> File Systems like **NFS** (Network File System) are **NOT** supported. Please map to a local directory or volume.
> [!NOTE]
> If you want to limit exposure to localhost (without exposing port for other users or to use a [reverse proxy](https://github.com/louislam/uptime-kuma/wiki/Reverse-Proxy)), you can expose the port like this:
>
> ```bash
> docker run -d --restart=always -p 127.0.0.1:3001:3001 -v uptime-kuma:/app/data --name uptime-kuma louislam/uptime-kuma:1
> ```
Uptime Kuma is now running on http://localhost:3001
### 💪🏻 Non-Docker
@@ -63,12 +55,15 @@ Requirements:
- ✅ Major Linux distros such as Debian, Ubuntu, CentOS, Fedora and ArchLinux etc.
- ✅ Windows 10 (x64), Windows Server 2012 R2 (x64) or higher
- ❌ Replit / Heroku
- [Node.js](https://nodejs.org/en/download/) 18 / 20.4
- [npm](https://docs.npmjs.com/cli/) 9
- [Node.js](https://nodejs.org/en/download/) 14 / 16 / 18 / 20.4
- [npm](https://docs.npmjs.com/cli/) >= 7
- [Git](https://git-scm.com/downloads)
- [pm2](https://pm2.keymetrics.io/) - For running Uptime Kuma in the background
```bash
# Update your npm
npm install npm@9 -g
git clone https://github.com/louislam/uptime-kuma.git
cd uptime-kuma
npm run setup
@@ -96,6 +91,10 @@ pm2 monit
pm2 save && pm2 startup
```
### Windows Portable (x64)
https://github.com/louislam/uptime-kuma/releases/download/1.23.1/uptime-kuma-windows-x64-portable-1.23.1-2.zip
### Advanced Installation
If you need more options or need to browse via a reverse proxy, please read:
@@ -114,6 +113,10 @@ I will assign requests/issues to the next milestone.
https://github.com/louislam/uptime-kuma/milestones
Project Plan:
https://github.com/users/louislam/projects/4/views/1
## ❤️ Sponsors
Thank you so much! (GitHub Sponsors will be updated manually. OpenCollective sponsors will be updated automatically, the list will be cached by GitHub though. It may need some time to be updated)
@@ -140,33 +143,28 @@ Telegram Notification Sample:
## Motivation
- I was looking for a self-hosted monitoring tool like "Uptime Robot", but it is hard to find a suitable one. One of the closest ones is statping. Unfortunately, it is not stable and no longer maintained.
- Wanted to build a fancy UI.
- I was looking for a self-hosted monitoring tool like "Uptime Robot", but it is hard to find a suitable one. One of the close ones is statping. Unfortunately, it is not stable and no longer maintained.
- Want to build a fancy UI.
- Learn Vue 3 and vite.js.
- Show the power of Bootstrap 5.
- Try to use WebSocket with SPA instead of a REST API.
- Try to use WebSocket with SPA instead of REST API.
- Deploy my first Docker image to Docker Hub.
If you love this project, please consider giving it a ⭐.
If you love this project, please consider giving me a ⭐.
## 🗣️ Discussion / Ask for Help
⚠️ For any general or technical questions, please don't send me an email, as I am unable to provide support in that manner. I will not respond if you ask questions there.
⚠️ For any general or technical questions, please don't send me an email, as I am unable to provide support in that manner. I will not respond if you asked such questions.
I recommend using Google, GitHub Issues, or Uptime Kuma's subreddit for finding answers to your question. If you cannot find the information you need, feel free to ask:
I recommend using Google, GitHub Issues, or Uptime Kuma's Subreddit for finding answers to your question. If you cannot find the information you need, feel free to ask:
- [GitHub Issues](https://github.com/louislam/uptime-kuma/issues)
- [Subreddit (r/UptimeKuma)](https://www.reddit.com/r/UptimeKuma/)
- [Subreddit r/Uptime kuma](https://www.reddit.com/r/UptimeKuma/)
My Reddit account: [u/louislamlam](https://reddit.com/u/louislamlam)
You can mention me if you ask a question on the subreddit.
My Reddit account: [u/louislamlam](https://reddit.com/u/louislamlam).
You can mention me if you ask a question on Reddit.
## Contributions
### Create Pull Requests
We DO NOT accept all types of pull requests and do not want to waste your time. Please be sure that you have read and follow pull request rules:
[CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma](https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma)
## Contribute
### Test Pull Requests
@@ -190,6 +188,8 @@ If you want to translate Uptime Kuma into your language, please visit [Weblate R
### Spelling & Grammar
Feel free to correct the grammar in the documentation or code.
My mother language is not English and my grammar is not that great.
My mother language is not english and my grammar is not that great.
### Create Pull Requests
If you want to modify Uptime Kuma, please read this guide and follow the rules here: https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md

View File

@@ -3,7 +3,7 @@
## Reporting a Vulnerability
1. Please report security issues to https://github.com/louislam/uptime-kuma/security/advisories/new.
2. Please also create an empty security issue to alert me, as GitHub Advisories do not send a notification, I probably will miss it without this. https://github.com/louislam/uptime-kuma/issues/new?assignees=&labels=help&template=security.md
1. Please also create an empty security issue to alert me, as GitHub Advisories do not send a notification, I probably will miss it without this. https://github.com/louislam/uptime-kuma/issues/new?assignees=&labels=help&template=security.md
Do not use the public issue tracker or discuss it in public as it will cause more damage.
@@ -19,12 +19,12 @@ You should use or upgrade to the latest version of Uptime Kuma. All `1.X.X` vers
### Upgradable Docker Tags
| Tag | Supported |
|-|-|
| Tag | Supported |
| ------- | ------------------ |
| 1 | :white_check_mark: |
| 1-debian | :white_check_mark: |
| latest | :white_check_mark: |
| debian | :white_check_mark: |
| 1-alpine | ⚠️ Deprecated |
| alpine | ⚠️ Deprecated |
| All other tags | ❌ |
| All other tags | ❌ |

7
babel.config.js Normal file
View File

@@ -0,0 +1,7 @@
const config = {};
if (process.env.TEST_FRONTEND) {
config.presets = [ "@babel/preset-env" ];
}
module.exports = config;

View File

@@ -1,9 +0,0 @@
services:
uptime-kuma:
image: louislam/uptime-kuma:1
volumes:
- ./data:/app/data
ports:
# <Host Port>:<Container Port>
- 3001:3001
restart: unless-stopped

28
config/cypress.config.js Normal file
View File

@@ -0,0 +1,28 @@
const { defineConfig } = require("cypress");
module.exports = defineConfig({
projectId: "vyjuem",
e2e: {
experimentalStudio: true,
setupNodeEvents(on, config) {
},
fixturesFolder: "test/cypress/fixtures",
screenshotsFolder: "test/cypress/screenshots",
videosFolder: "test/cypress/videos",
downloadsFolder: "test/cypress/downloads",
supportFile: "test/cypress/support/e2e.js",
baseUrl: "http://localhost:3002",
defaultCommandTimeout: 10000,
pageLoadTimeout: 60000,
viewportWidth: 1920,
viewportHeight: 1080,
specPattern: [
"test/cypress/e2e/setup.cy.js",
"test/cypress/e2e/**/*.js"
],
},
env: {
baseUrl: "http://localhost:3002",
},
});

View File

@@ -0,0 +1,10 @@
const { defineConfig } = require("cypress");
module.exports = defineConfig({
e2e: {
supportFile: false,
specPattern: [
"test/cypress/unit/**/*.js"
],
}
});

View File

@@ -1,66 +0,0 @@
import { defineConfig, devices } from "@playwright/test";
const port = 30001;
export const url = `http://localhost:${port}`;
export default defineConfig({
// Look for test files in the "tests" directory, relative to this configuration file.
testDir: "../test/e2e/specs",
outputDir: "../private/playwright-test-results",
fullyParallel: false,
locale: "en-US",
// Fail the build on CI if you accidentally left test.only in the source code.
forbidOnly: !!process.env.CI,
// Retry on CI only.
retries: process.env.CI ? 2 : 0,
// Opt out of parallel tests on CI.
workers: 1,
// Reporter to use
reporter: [
[
"html", {
outputFolder: "../private/playwright-report",
open: "never",
}
],
],
use: {
// Base URL to use in actions like `await page.goto('/')`.
baseURL: url,
// Collect trace when retrying the failed test.
trace: "on-first-retry",
},
// Configure projects for major browsers.
projects: [
{
name: "run-once setup",
testMatch: /setup-process\.once\.js/,
use: { ...devices["Desktop Chrome"] },
},
{
name: "specs",
use: { ...devices["Desktop Chrome"] },
dependencies: [ "run-once setup" ],
},
/*
{
name: "firefox",
use: { browserName: "firefox" }
},*/
],
// Run your local dev server before starting the tests.
webServer: {
command: `node extra/remove-playwright-test-data.js && cross-env NODE_ENV=development node server/server.js --port=${port} --data-dir=./data/playwright-test`,
url,
reuseExistingServer: false,
cwd: "../",
},
});

View File

@@ -1,8 +1,9 @@
import legacy from "@vitejs/plugin-legacy";
import vue from "@vitejs/plugin-vue";
import { defineConfig } from "vite";
import visualizer from "rollup-plugin-visualizer";
import viteCompression from "vite-plugin-compression";
import VueDevTools from "vite-plugin-vue-devtools";
import commonjs from "vite-plugin-commonjs";
const postCssScss = require("postcss-scss");
const postcssRTLCSS = require("postcss-rtlcss");
@@ -16,10 +17,16 @@ export default defineConfig({
},
define: {
"FRONTEND_VERSION": JSON.stringify(process.env.npm_package_version),
"process.env": {},
"DEVCONTAINER": JSON.stringify(process.env.DEVCONTAINER),
"GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN": JSON.stringify(process.env.GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN),
"CODESPACE_NAME": JSON.stringify(process.env.CODESPACE_NAME),
},
plugins: [
commonjs(),
vue(),
legacy({
targets: [ "since 2015" ],
}),
visualizer({
filename: "tmp/dist-stats.html"
}),
@@ -31,7 +38,6 @@ export default defineConfig({
algorithm: "brotliCompress",
filter: viteCompressionFilter,
}),
VueDevTools(),
],
css: {
postcss: {

View File

@@ -275,7 +275,7 @@ async function createTables() {
table.boolean("active").notNullable().defaultTo(true);
table.integer("user_id").unsigned();
table.boolean("is_default").notNullable().defaultTo(false);
table.text("config", "longtext");
table.text("config");
});
// monitor_notification
@@ -318,10 +318,7 @@ async function createTables() {
// monitor_tls_info
await knex.schema.createTable("monitor_tls_info", (table) => {
table.increments("id");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("monitor_id").unsigned().notNullable(); //TODO: no fk ?
table.text("info_json");
});
@@ -496,11 +493,8 @@ ALTER TABLE monitor
await knex.schema.table("monitor", function (table) {
table.string("kafka_producer_topic", 255);
table.text("kafka_producer_brokers");
// patch-fix-kafka-producer-booleans.sql
table.boolean("kafka_producer_ssl").defaultTo(0).notNullable();
table.boolean("kafka_producer_allow_auto_topic_creation").defaultTo(0).notNullable();
table.integer("kafka_producer_ssl");
table.string("kafka_producer_allow_auto_topic_creation", 255);
table.text("kafka_producer_sasl_options");
table.text("kafka_producer_message");
});

View File

@@ -1,15 +0,0 @@
exports.up = function (knex) {
// Add new column heartbeat.retries
return knex.schema
.alterTable("heartbeat", function (table) {
table.integer("retries").notNullable().defaultTo(0);
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("heartbeat", function (table) {
table.dropColumn("retries");
});
};

View File

@@ -1,16 +0,0 @@
exports.up = function (knex) {
// Add new column monitor.mqtt_check_type
return knex.schema
.alterTable("monitor", function (table) {
table.string("mqtt_check_type", 255).notNullable().defaultTo("keyword");
});
};
exports.down = function (knex) {
// Drop column monitor.mqtt_check_type
return knex.schema
.alterTable("monitor", function (table) {
table.dropColumn("mqtt_check_type");
});
};

View File

@@ -1,14 +0,0 @@
exports.up = function (knex) {
// update monitor.push_token to 32 length
return knex.schema
.alterTable("monitor", function (table) {
table.string("push_token", 32).alter();
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("monitor", function (table) {
table.string("push_token", 20).alter();
});
};

View File

@@ -1,21 +0,0 @@
exports.up = function (knex) {
return knex.schema
.createTable("remote_browser", function (table) {
table.increments("id");
table.string("name", 255).notNullable();
table.string("url", 255).notNullable();
table.integer("user_id").unsigned();
}).alterTable("monitor", function (table) {
// Add new column monitor.remote_browser
table.integer("remote_browser").nullable().defaultTo(null).unsigned()
.index()
.references("id")
.inTable("remote_browser");
});
};
exports.down = function (knex) {
return knex.schema.dropTable("remote_browser").alterTable("monitor", function (table) {
table.dropColumn("remote_browser");
});
};

View File

@@ -1,12 +0,0 @@
exports.up = function (knex) {
return knex.schema
.alterTable("status_page", function (table) {
table.integer("auto_refresh_interval").defaultTo(300).unsigned();
});
};
exports.down = function (knex) {
return knex.schema.alterTable("status_page", function (table) {
table.dropColumn("auto_refresh_interval");
});
};

View File

@@ -1,24 +0,0 @@
exports.up = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds");
table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds");
})
.alterTable("stat_minutely", function (table) {
table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds");
table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds");
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.dropColumn("ping_min");
table.dropColumn("ping_max");
})
.alterTable("stat_minutely", function (table) {
table.dropColumn("ping_min");
table.dropColumn("ping_max");
});
};

View File

@@ -1,26 +0,0 @@
exports.up = function (knex) {
return knex.schema
.createTable("stat_hourly", function (table) {
table.increments("id");
table.comment("This table contains the hourly aggregate statistics for each monitor");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("timestamp")
.notNullable()
.comment("Unix timestamp rounded down to the nearest hour");
table.float("ping").notNullable().comment("Average ping in milliseconds");
table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds");
table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds");
table.smallint("up").notNullable();
table.smallint("down").notNullable();
table.unique([ "monitor_id", "timestamp" ]);
});
};
exports.down = function (knex) {
return knex.schema
.dropTable("stat_hourly");
};

View File

@@ -1,26 +0,0 @@
exports.up = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.text("extras").defaultTo(null).comment("Extra statistics during this time period");
})
.alterTable("stat_minutely", function (table) {
table.text("extras").defaultTo(null).comment("Extra statistics during this time period");
})
.alterTable("stat_hourly", function (table) {
table.text("extras").defaultTo(null).comment("Extra statistics during this time period");
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.dropColumn("extras");
})
.alterTable("stat_minutely", function (table) {
table.dropColumn("extras");
})
.alterTable("stat_hourly", function (table) {
table.dropColumn("extras");
});
};

View File

@@ -1,16 +0,0 @@
exports.up = function (knex) {
return knex.schema
.alterTable("monitor", function (table) {
table.string("snmp_oid").defaultTo(null);
table.enum("snmp_version", [ "1", "2c", "3" ]).defaultTo("2c");
table.string("json_path_operator").defaultTo(null);
});
};
exports.down = function (knex) {
return knex.schema.alterTable("monitor", function (table) {
table.dropColumn("snmp_oid");
table.dropColumn("snmp_version");
table.dropColumn("json_path_operator");
});
};

View File

@@ -1,13 +0,0 @@
exports.up = function (knex) {
return knex.schema
.alterTable("monitor", function (table) {
table.boolean("cache_bust").notNullable().defaultTo(false);
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("monitor", function (table) {
table.dropColumn("cache_bust");
});
};

View File

@@ -1,12 +0,0 @@
exports.up = function (knex) {
return knex.schema
.alterTable("monitor", function (table) {
table.text("conditions").notNullable().defaultTo("[]");
});
};
exports.down = function (knex) {
return knex.schema.alterTable("monitor", function (table) {
table.dropColumn("conditions");
});
};

View File

@@ -1,17 +0,0 @@
exports.up = function (knex) {
return knex.schema.alterTable("monitor", function (table) {
table.text("rabbitmq_nodes");
table.string("rabbitmq_username");
table.string("rabbitmq_password");
});
};
exports.down = function (knex) {
return knex.schema.alterTable("monitor", function (table) {
table.dropColumn("rabbitmq_nodes");
table.dropColumn("rabbitmq_username");
table.dropColumn("rabbitmq_password");
});
};

View File

@@ -1,13 +0,0 @@
// Update info_json column to LONGTEXT mainly for MariaDB
exports.up = function (knex) {
return knex.schema
.alterTable("monitor_tls_info", function (table) {
table.text("info_json", "longtext").alter();
});
};
exports.down = function (knex) {
return knex.schema.alterTable("monitor_tls_info", function (table) {
table.text("info_json", "text").alter();
});
};

View File

@@ -1,34 +0,0 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
-- Rename COLUMNs to another one (suffixed by `_old`)
ALTER TABLE monitor
RENAME COLUMN kafka_producer_ssl TO kafka_producer_ssl_old;
ALTER TABLE monitor
RENAME COLUMN kafka_producer_allow_auto_topic_creation TO kafka_producer_allow_auto_topic_creation_old;
-- Add correct COLUMNs
ALTER TABLE monitor
ADD COLUMN kafka_producer_ssl BOOLEAN default 0 NOT NULL;
ALTER TABLE monitor
ADD COLUMN kafka_producer_allow_auto_topic_creation BOOLEAN default 0 NOT NULL;
-- These SQL is still not fully safe. See https://github.com/louislam/uptime-kuma/issues/4039.
-- Set bring old values from `_old` COLUMNs to correct ones
-- UPDATE monitor SET kafka_producer_allow_auto_topic_creation = monitor.kafka_producer_allow_auto_topic_creation_old
-- WHERE monitor.kafka_producer_allow_auto_topic_creation_old IS NOT NULL;
-- UPDATE monitor SET kafka_producer_ssl = monitor.kafka_producer_ssl_old
-- WHERE monitor.kafka_producer_ssl_old IS NOT NULL;
-- Remove old COLUMNs
ALTER TABLE monitor
DROP COLUMN kafka_producer_allow_auto_topic_creation_old;
ALTER TABLE monitor
DROP COLUMN kafka_producer_ssl_old;
COMMIT;

View File

@@ -1,18 +0,0 @@
BEGIN TRANSACTION;
PRAGMA writable_schema = TRUE;
UPDATE
SQLITE_MASTER
SET
sql = replace(sql,
'monitor_id INTEGER NOT NULL',
'monitor_id INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE'
)
WHERE
name = 'monitor_tls_info'
AND type = 'table';
PRAGMA writable_schema = RESET;
COMMIT;

View File

@@ -1,10 +0,0 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
-- SQLite: Change the data type of the column "config" from VARCHAR to TEXT
ALTER TABLE notification RENAME COLUMN config TO config_old;
ALTER TABLE notification ADD COLUMN config TEXT;
UPDATE notification SET config = config_old;
ALTER TABLE notification DROP COLUMN config_old;
COMMIT;

View File

@@ -1,7 +0,0 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
UPDATE monitor SET timeout = (interval * 0.8)
WHERE timeout IS NULL OR timeout <= 0;
COMMIT;

View File

@@ -1,18 +0,0 @@
BEGIN TRANSACTION;
PRAGMA writable_schema = TRUE;
UPDATE
SQLITE_MASTER
SET
sql = replace(sql,
'monitor_id INTEGER NOT NULL',
'monitor_id INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE'
)
WHERE
name = 'monitor_tls_info'
AND type = 'table';
PRAGMA writable_schema = RESET;
COMMIT;

View File

@@ -1,18 +1,9 @@
# Download Apprise deb package
FROM node:20-bookworm-slim AS download-apprise
WORKDIR /app
COPY ./extra/download-apprise.mjs ./download-apprise.mjs
RUN apt update && \
apt --yes --no-install-recommends install curl && \
npm install cheerio semver && \
node ./download-apprise.mjs
# Base Image (Slim)
# If the image changed, the second stage image should be changed too
FROM node:20-bookworm-slim AS base2-slim
ARG TARGETPLATFORM
# Specify --no-install-recommends to skip unused dependencies, make the base much smaller!
# apprise = for notifications (From testing repo)
# sqlite3 = for debugging
# iputils-ping = for ping
# util-linux = for setpriv (Should be dropped in 2.0.0?)
@@ -21,10 +12,10 @@ ARG TARGETPLATFORM
# ca-certificates = keep the cert up-to-date
# sudo = for start service nscd with non-root user
# nscd = for better DNS caching
RUN apt update && \
apt --yes --no-install-recommends install \
sqlite3 \
ca-certificates \
RUN echo "deb http://deb.debian.org/debian testing main" >> /etc/apt/sources.list && \
apt update && \
apt --yes --no-install-recommends -t testing install apprise sqlite3 ca-certificates && \
apt --yes --no-install-recommends -t stable install \
iputils-ping \
util-linux \
dumb-init \
@@ -34,16 +25,6 @@ RUN apt update && \
rm -rf /var/lib/apt/lists/* && \
apt --yes autoremove
# apprise = for notifications (Install from the deb package, as the stable one is too old) (workaround for #4867)
# Switching to testing repo is no longer working, as the testing repo is not bookworm anymore.
# python3-paho-mqtt (#4859)
# TODO: no idea how to delete the deb file after installation as it becomes a layer already
COPY --from=download-apprise /app/apprise.deb ./apprise.deb
RUN apt update && \
apt --yes --no-install-recommends install ./apprise.deb python3-paho-mqtt && \
rm -rf /var/lib/apt/lists/* && \
rm -f apprise.deb && \
apt --yes autoremove
# Install cloudflared
RUN curl https://pkg.cloudflare.com/cloudflare-main.gpg --output /usr/share/keyrings/cloudflare-main.gpg && \
@@ -61,9 +42,7 @@ COPY ./docker/etc/sudoers /etc/sudoers
# Full Base Image
# MariaDB, Chromium and fonts
# Make sure to reuse the slim image here. Uncomment the above line if you want to build it from scratch.
# FROM base2-slim AS base2
FROM louislam/uptime-kuma:base2-slim AS base2
FROM base2-slim AS base2
ENV UPTIME_KUMA_ENABLE_EMBEDDED_MARIADB=1
RUN apt update && \
apt --yes --no-install-recommends install chromium fonts-indic fonts-noto fonts-noto-cjk mariadb-server && \

15
docker/docker-compose.yml Normal file
View File

@@ -0,0 +1,15 @@
version: '3.8'
services:
uptime-kuma:
image: louislam/uptime-kuma:1
container_name: uptime-kuma
volumes:
- uptime-kuma:/app/data
ports:
- "3001:3001" # <Host Port>:<Container Port>
restart: always
volumes:
uptime-kuma:

View File

@@ -27,6 +27,7 @@ RUN mkdir ./data
# ⭐ Main Image
############################################
FROM $BASE_IMAGE AS release
USER node
WORKDIR /app
LABEL org.opencontainers.image.source="https://github.com/louislam/uptime-kuma"
@@ -41,20 +42,12 @@ HEALTHCHECK --interval=60s --timeout=30s --start-period=180s --retries=5 CMD ext
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["node", "server/server.js"]
############################################
# Rootless Image
############################################
FROM release AS rootless
USER node
############################################
# Mark as Nightly
############################################
FROM release AS nightly
RUN npm run mark-as-nightly
FROM nightly AS nightly-rootless
USER node
RUN npm run mark-as-nightly
############################################
# Build an image for testing pr

68
extra/api-spec.json5 Normal file
View File

@@ -0,0 +1,68 @@
[
{
"name": "getPushExample",
"description": "Get a push example.",
"params": [
{
"name": "language",
"type": "string",
"description": "The programming language such as `javascript-fetch` or `python`. See the directory ./extra/push-examples for a list of available languages."
}
],
"returnType": "response-json",
"okReturn": [
{
"name": "code",
"type": "string",
"description": "The push example."
}
],
"possibleErrorReasons": [
"The parameter `language` is not available"
],
},
{
"name": "checkApprise",
"description": "Check if the apprise library is installed.",
"params": [],
"returnType": "boolean",
},
{
"name": "getSettings",
"description": "",
"params": [],
"returnType": "response-json",
"okReturn": [
{
"name": "data",
"type": "object",
"description": "The setting object. It does not contain default values."
}
],
"possibleErrorReasons": [],
},
{
"name": "changePassword",
"description": "",
"params": [
{
"name": "password",
"type": "object",
"description": "The password object with the following properties: `currentPassword` and `newPassword`"
}
],
"returnType": "response-json",
"okReturn": [
{
"name": "data",
"type": "object",
"description": "The setting object. It does not contain default values."
}
],
"possibleErrorReasons": [
"Incorrect current password",
"Invalid new password",
"Password is too weak"
],
}
]

View File

@@ -1,72 +0,0 @@
import fs from "fs";
const dir = "./db/knex_migrations";
// Get the file list (ending with .js) from the directory
const files = fs.readdirSync(dir).filter((file) => file !== "README.md");
// They are wrong, but they had been merged, so allowed.
const exceptionList = [
"2024-08-24-000-add-cache-bust.js",
"2024-10-1315-rabbitmq-monitor.js",
];
// Correct format: YYYY-MM-DD-HHmm-description.js
for (const file of files) {
if (exceptionList.includes(file)) {
continue;
}
// Check ending with .js
if (!file.endsWith(".js")) {
console.error(`It should end with .js: ${file}`);
process.exit(1);
}
const parts = file.split("-");
// Should be at least 5 parts
if (parts.length < 5) {
console.error(`Invalid format: ${file}`);
process.exit(1);
}
// First part should be a year >= 2024
const year = parseInt(parts[0], 10);
if (isNaN(year) || year < 2023) {
console.error(`Invalid year: ${file}`);
process.exit(1);
}
// Second part should be a month
const month = parseInt(parts[1], 10);
if (isNaN(month) || month < 1 || month > 12) {
console.error(`Invalid month: ${file}`);
process.exit(1);
}
// Third part should be a day
const day = parseInt(parts[2], 10);
if (isNaN(day) || day < 1 || day > 31) {
console.error(`Invalid day: ${file}`);
process.exit(1);
}
// Fourth part should be HHmm
const time = parts[3];
// Check length is 4
if (time.length !== 4) {
console.error(`Invalid time: ${file}`);
process.exit(1);
}
const hour = parseInt(time.substring(0, 2), 10);
const minute = parseInt(time.substring(2), 10);
if (isNaN(hour) || hour < 0 || hour > 23 || isNaN(minute) || minute < 0 || minute > 59) {
console.error(`Invalid time: ${file}`);
process.exit(1);
}
}
console.log("All knex filenames are correct.");

View File

@@ -1,27 +0,0 @@
// For #5231
const fs = require("fs");
let path = "./src/lang";
// list directories in the lang directory
let jsonFileList = fs.readdirSync(path);
for (let jsonFile of jsonFileList) {
if (!jsonFile.endsWith(".json")) {
continue;
}
let jsonPath = path + "/" + jsonFile;
let originalContent = fs.readFileSync(jsonPath, "utf8");
let langData = JSON.parse(originalContent);
let formattedContent = JSON.stringify(langData, null, 4) + "\n";
if (originalContent !== formattedContent) {
console.error(`File ${jsonFile} is not formatted correctly.`);
process.exit(1);
}
}
console.log("All lang json files are formatted correctly.");

View File

@@ -37,7 +37,7 @@ const github = require("@actions/github");
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
body: `@${username}: Hello! :wave:\n\nThis issue is being automatically closed because it does not follow the issue template. Please **DO NOT open blank issues and use our [issue-templates](https://github.com/louislam/uptime-kuma/issues/new/choose) instead**.\nBlank Issues do not contain the context nessesary for a good discussions.`
body: `@${username}: Hello! :wave:\n\nThis issue is being automatically closed because it does not follow the issue template. Please DO NOT open a blank issue.`
});
// Close the issue

View File

@@ -1,57 +0,0 @@
// Go to http://ftp.debian.org/debian/pool/main/a/apprise/ using fetch api, where it is a apache directory listing page
// Use cheerio to parse the html and get the latest version of Apprise
// call curl to download the latest version of Apprise
// Target file: the latest version of Apprise, which the format is apprise_{VERSION}_all.deb
import * as cheerio from "cheerio";
import semver from "semver";
import * as childProcess from "child_process";
const baseURL = "http://ftp.debian.org/debian/pool/main/a/apprise/";
const response = await fetch(baseURL);
if (!response.ok) {
throw new Error("Failed to fetch page of Apprise Debian repository.");
}
const html = await response.text();
const $ = cheerio.load(html);
// Get all the links in the page
const linkElements = $("a");
// Filter the links which match apprise_{VERSION}_all.deb
const links = [];
const pattern = /apprise_(.*?)_all.deb/;
for (let i = 0; i < linkElements.length; i++) {
const link = linkElements[i];
if (link.attribs.href.match(pattern) && !link.attribs.href.includes("~")) {
links.push({
filename: link.attribs.href,
version: link.attribs.href.match(pattern)[1],
});
}
}
console.log(links);
// semver compare and download
let latestLink = {
filename: "",
version: "0.0.0",
};
for (const link of links) {
if (semver.gt(link.version, latestLink.version)) {
latestLink = link;
}
}
const downloadURL = baseURL + latestLink.filename;
console.log(`Downloading ${downloadURL}...`);
let result = childProcess.spawnSync("curl", [ downloadURL, "--output", "apprise.deb" ]);
console.log(result.stdout?.toString());
console.error(result.stderr?.toString());
process.exit(result.status !== null ? result.status : 1);

View File

@@ -4,6 +4,7 @@ const tar = require("tar");
const packageJSON = require("../package.json");
const fs = require("fs");
const rmSync = require("./fs-rmSync.js");
const version = packageJSON.version;
const filename = "dist.tar.gz";
@@ -28,9 +29,8 @@ function download(url) {
if (fs.existsSync("./dist")) {
if (fs.existsSync("./dist-backup")) {
fs.rmSync("./dist-backup", {
recursive: true,
force: true,
rmSync("./dist-backup", {
recursive: true
});
}
@@ -43,9 +43,8 @@ function download(url) {
tarStream.on("close", () => {
if (fs.existsSync("./dist-backup")) {
fs.rmSync("./dist-backup", {
recursive: true,
force: true,
rmSync("./dist-backup", {
recursive: true
});
}
console.log("Done");

23
extra/fs-rmSync.js Normal file
View File

@@ -0,0 +1,23 @@
const fs = require("fs");
/**
* Detect if `fs.rmSync` is available
* to avoid the runtime deprecation warning triggered for using `fs.rmdirSync` with `{ recursive: true }` in Node.js v16,
* or the `recursive` property removing completely in the future Node.js version.
* See the link below.
* @todo Once we drop the support for Node.js v14 (or at least versions before v14.14.0), we can safely replace this function with `fs.rmSync`, since `fs.rmSync` was add in Node.js v14.14.0 and currently we supports all the Node.js v14 versions that include the versions before the v14.14.0, and this function have almost the same signature with `fs.rmSync`.
* @link https://nodejs.org/docs/latest-v16.x/api/deprecations.html#dep0147-fsrmdirpath--recursive-true- the deprecation infomation of `fs.rmdirSync`
* @link https://nodejs.org/docs/latest-v16.x/api/fs.html#fsrmsyncpath-options the document of `fs.rmSync`
* @param {fs.PathLike} path Valid types for path values in "fs".
* @param {fs.RmDirOptions} options options for `fs.rmdirSync`, if `fs.rmSync` is available and property `recursive` is true, it will automatically have property `force` with value `true`.
* @returns {void}
*/
const rmSync = (path, options) => {
if (typeof fs.rmSync === "function") {
if (options.recursive) {
options.force = true;
}
return fs.rmSync(path, options);
}
return fs.rmdirSync(path, options);
};
module.exports = rmSync;

View File

@@ -6,7 +6,7 @@
* ⚠️ Deprecated: Changed to healthcheck.go, it will be deleted in the future.
* This script should be run after a period of time (180s), because the server may need some time to prepare.
*/
const FBSD = /^freebsd/.test(process.platform);
const { FBSD } = require("../server/util-server");
process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";

276
extra/install.batsh Normal file
View File

@@ -0,0 +1,276 @@
// install.sh is generated by ./extra/install.batsh, do not modify it directly.
// "npm run compile-install-script" to compile install.sh
// The command is working on Windows PowerShell and Docker for Windows only.
// curl -o kuma_install.sh https://raw.githubusercontent.com/louislam/uptime-kuma/master/install.sh && sudo bash kuma_install.sh
println("=====================");
println("Uptime Kuma Install Script");
println("=====================");
println("Supported OS: Ubuntu >= 16.04, Debian and CentOS/RHEL 7/8");
println("---------------------------------------");
println("This script is designed for Linux and basic usage.");
println("For advanced usage, please go to https://github.com/louislam/uptime-kuma/wiki/Installation");
println("---------------------------------------");
println("");
println("Local - Install Uptime Kuma on your current machine with git, Node.js and pm2");
println("Docker - Install Uptime Kuma Docker container");
println("");
if ("$1" != "") {
type = "$1";
} else {
call("read", "-p", "Which installation method do you prefer? [DOCKER/local]: ", "type");
}
defaultPort = "3001";
function checkNode() {
bash("nodeVersion=$(node -e 'console.log(process.versions.node.split(`.`)[0])')");
println("Node Version: " ++ nodeVersion);
if (nodeVersion <= "12") {
println("Error: Required Node.js 14");
call("exit", "1");
}
}
function deb() {
bash("nodeCheck=$(node -v)");
bash("apt --yes update");
if (nodeCheck != "") {
checkNode();
} else {
// Old nodejs binary name is "nodejs"
bash("check=$(nodejs --version)");
if (check != "") {
println("Error: 'node' command is not found, but 'nodejs' command is found. Your NodeJS should be too old.");
bash("exit 1");
}
bash("curlCheck=$(curl --version)");
if (curlCheck == "") {
println("Installing Curl");
bash("apt --yes install curl");
}
println("Installing Node.js 16");
bash("curl -sL https://deb.nodesource.com/setup_16.x | bash - > log.txt");
bash("apt --yes install nodejs");
bash("node -v");
bash("nodeCheckAgain=$(node -v)");
if (nodeCheckAgain == "") {
println("Error during Node.js installation");
bash("exit 1");
}
}
bash("check=$(git --version)");
if (check == "") {
println("Installing Git");
bash("apt --yes install git");
}
}
if (type == "local") {
defaultInstallPath = "/opt/uptime-kuma";
if (exists("/etc/redhat-release")) {
os = call("cat", "/etc/redhat-release");
distribution = "rhel";
} else if (exists("/etc/issue")) {
bash("os=$(head -n1 /etc/issue | cut -f 1 -d ' ')");
if (os == "Ubuntu") {
distribution = "ubuntu";
// Get ubuntu version
bash(". /etc/lsb-release");
version = DISTRIB_RELEASE;
}
if (os == "Debian") {
distribution = "debian";
}
}
bash("arch=$(uname -i)");
println("Your OS: " ++ os);
println("Distribution: " ++ distribution);
println("Version: " ++ version);
println("Arch: " ++ arch);
if ("$3" != "") {
port = "$3";
} else {
call("read", "-p", "Listening Port [$defaultPort]: ", "port");
if (port == "") {
port = defaultPort;
}
}
if ("$2" != "") {
installPath = "$2";
} else {
call("read", "-p", "Installation Path [$defaultInstallPath]: ", "installPath");
if (installPath == "") {
installPath = defaultInstallPath;
}
}
// CentOS
if (distribution == "rhel") {
bash("nodeCheck=$(node -v)");
if (nodeCheck != "") {
checkNode();
} else {
bash("dnfCheck=$(dnf --version)");
// Use yum
if (dnfCheck == "") {
bash("curlCheck=$(curl --version)");
if (curlCheck == "") {
println("Installing Curl");
bash("yum -y -q install curl");
}
println("Installing Node.js 16");
bash("curl -sL https://rpm.nodesource.com/setup_16.x | bash - > log.txt");
bash("yum install -y -q nodejs");
} else {
bash("curlCheck=$(curl --version)");
if (curlCheck == "") {
println("Installing Curl");
bash("dnf -y install curl");
}
println("Installing Node.js 16");
bash("curl -sL https://rpm.nodesource.com/setup_16.x | bash - > log.txt");
bash("dnf install -y nodejs");
}
bash("node -v");
bash("nodeCheckAgain=$(node -v)");
if (nodeCheckAgain == "") {
println("Error during Node.js installation");
bash("exit 1");
}
}
bash("check=$(git --version)");
if (check == "") {
println("Installing Git");
bash("yum -y -q install git");
}
// Ubuntu
} else if (distribution == "ubuntu") {
deb();
// Debian
} else if (distribution == "debian") {
deb();
} else {
// Unknown distribution
error = 0;
bash("check=$(git --version)");
if (check == "") {
error = 1;
println("Error: git is not found!");
println("help: an installation guide is available at https://git-scm.com/book/en/v2/Getting-Started-Installing-Git");
}
bash("check=$(node -v)");
if (check == "") {
error = 1;
println("Error: node is not found");
println("help: an installation guide is available at https://nodejs.org/en/download");
}
if (error > 0) {
println("Please install above missing software");
bash("exit 1");
}
}
bash("check=$(pm2 --version)");
if (check == "") {
println("Installing PM2");
bash("npm install pm2 -g && pm2 install pm2-logrotate");
bash("pm2 startup");
}
// Check again
bash("check=$(pm2 --version)");
if (check == "") {
println("Error: pm2 is not found!");
println("help: an installation guide is available at https://pm2.keymetrics.io/docs/usage/quick-start/");
bash("exit 1");
}
bash("mkdir -p $installPath");
bash("cd $installPath");
bash("git clone https://github.com/louislam/uptime-kuma.git .");
bash("npm run setup");
bash("pm2 start server/server.js --name uptime-kuma -- --port=$port");
} else {
defaultVolume = "uptime-kuma";
bash("check=$(docker -v)");
if (check == "") {
println("Error: docker is not found!");
println("help: an installation guide is available at https://docs.docker.com/desktop/");
bash("exit 1");
}
bash("check=$(docker info)");
bash("if [[ \"$check\" == *\"Is the docker daemon running\"* ]]; then
\"echo\" \"Error: docker is not running\"
\"echo\" \"help: a troubleshooting guide is available at https://docs.docker.com/config/daemon/troubleshoot/\"
\"exit\" \"1\"
fi");
if ("$3" != "") {
port = "$3";
} else {
call("read", "-p", "Expose Port [$defaultPort]: ", "port");
if (port == "") {
port = defaultPort;
}
}
if ("$2" != "") {
volume = "$2";
} else {
call("read", "-p", "Volume Name [$defaultVolume]: ", "volume");
if (volume == "") {
volume = defaultVolume;
}
}
println("Port: $port");
println("Volume: $volume");
bash("docker volume create $volume");
bash("docker run -d --restart=always -p $port:3001 -v $volume:/app/data --name uptime-kuma louislam/uptime-kuma:1");
}
println("http://localhost:$port");

View File

@@ -1,44 +0,0 @@
// Generate on GitHub
const input = `
* Add Korean translation by @Alanimdeo in https://github.com/louislam/dockge/pull/86
`;
const template = `
### 🆕 New Features
### 💇‍♀️ Improvements
### 🐞 Bug Fixes
### ⬆️ Security Fixes
### 🦎 Translation Contributions
### Others
- Other small changes, code refactoring and comment/doc updates in this repo:
`;
const lines = input.split("\n").filter((line) => line.trim() !== "");
for (const line of lines) {
// Split the last " by "
const usernamePullRequesURL = line.split(" by ").pop();
if (!usernamePullRequesURL) {
console.log("Unable to parse", line);
continue;
}
const [ username, pullRequestURL ] = usernamePullRequesURL.split(" in ");
const pullRequestID = "#" + pullRequestURL.split("/").pop();
let message = line.split(" by ").shift();
if (!message) {
console.log("Unable to parse", line);
continue;
}
message = message.split("* ").pop();
console.log("-", pullRequestID, message, `(Thanks ${username})`);
}
console.log(template);

View File

@@ -1,25 +0,0 @@
// For #5231
const fs = require("fs");
let path = "../src/lang";
// list directories in the lang directory
let jsonFileList = fs.readdirSync(path);
for (let jsonFile of jsonFileList) {
if (!jsonFile.endsWith(".json")) {
continue;
}
let jsonPath = path + "/" + jsonFile;
let langData = JSON.parse(fs.readFileSync(jsonPath, "utf8"));
for (let key in langData) {
if (langData[key] === "") {
delete langData[key];
}
}
fs.writeFileSync(jsonPath, JSON.stringify(langData, null, 4) + "\n");
}

View File

@@ -1,6 +0,0 @@
const fs = require("fs");
fs.rmSync("./data/playwright-test", {
recursive: true,
force: true,
});

View File

@@ -1,24 +0,0 @@
const { R } = require("redbean-node");
const Database = require("../server/database");
const args = require("args-parser")(process.argv);
const { Settings } = require("../server/settings");
const main = async () => {
console.log("Connecting the database");
Database.initDataDir(args);
await Database.connect(false, false, true);
console.log("Deleting all data from aggregate tables");
await R.exec("DELETE FROM stat_minutely");
await R.exec("DELETE FROM stat_hourly");
await R.exec("DELETE FROM stat_daily");
console.log("Resetting the aggregate table state");
await Settings.set("migrateAggregateTableState", "");
await Database.close();
console.log("Done");
};
main();

View File

@@ -5,25 +5,18 @@ const { R } = require("redbean-node");
const readline = require("readline");
const { initJWTSecret } = require("../server/util-server");
const User = require("../server/model/user");
const { io } = require("socket.io-client");
const { localWebSocketURL } = require("../server/config");
const args = require("args-parser")(process.argv);
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
const main = async () => {
if ("dry-run" in args) {
console.log("Dry run mode, no changes will be made.");
}
console.log("Connecting the database");
Database.initDataDir(args);
await Database.connect(false, false, true);
try {
Database.initDataDir(args);
await Database.connect(false, false, true);
// No need to actually reset the password for testing, just make sure no connection problem. It is ok for now.
if (!process.env.TEST_BACKEND) {
const user = await R.findOne("user");
@@ -34,36 +27,21 @@ const main = async () => {
console.log("Found user: " + user.username);
while (true) {
let password;
let confirmPassword;
// When called with "--new-password" argument for unattended modification (e.g. npm run reset-password -- --new_password=secret)
if ("new-password" in args) {
console.log("Using password from argument");
console.warn("\x1b[31m%s\x1b[0m", "Warning: the password might be stored, in plain text, in your shell's history");
password = confirmPassword = args["new-password"] + "";
} else {
password = await question("New Password: ");
confirmPassword = await question("Confirm New Password: ");
}
let password = await question("New Password: ");
let confirmPassword = await question("Confirm New Password: ");
if (password === confirmPassword) {
if (!("dry-run" in args)) {
await User.resetPassword(user.id, password);
await User.resetPassword(user.id, password);
// Reset all sessions by reset jwt secret
await initJWTSecret();
// Reset all sessions by reset jwt secret
await initJWTSecret();
// Disconnect all other socket clients of the user
await disconnectAllSocketClients(user.username, password);
}
break;
} else {
console.log("Passwords do not match, please try again.");
}
}
console.log("Password reset successfully.");
}
} catch (e) {
console.error("Error: " + e.message);
@@ -88,50 +66,6 @@ function question(question) {
});
}
/**
* Disconnect all socket clients of the user
* @param {string} username Username
* @param {string} password Password
* @returns {Promise<void>} Promise
*/
function disconnectAllSocketClients(username, password) {
return new Promise((resolve) => {
console.log("Connecting to " + localWebSocketURL + " to disconnect all other socket clients");
// Disconnect all socket connections
const socket = io(localWebSocketURL, {
reconnection: false,
timeout: 5000,
});
socket.on("connect", () => {
socket.emit("login", {
username,
password,
}, (res) => {
if (res.ok) {
console.log("Logged in.");
socket.emit("disconnectOtherSocketClients");
} else {
console.warn("Login failed.");
console.warn("Please restart the server to disconnect all sessions.");
}
socket.close();
});
});
socket.on("connect_error", function () {
// The localWebSocketURL is not guaranteed to be working for some complicated Uptime Kuma setup
// Ask the user to restart the server manually
console.warn("Failed to connect to " + localWebSocketURL);
console.warn("Please restart the server to disconnect all sessions manually.");
resolve();
});
socket.on("disconnect", () => {
resolve();
});
});
}
if (!process.env.TEST_BACKEND) {
main();
}

View File

@@ -2,6 +2,7 @@
import fs from "fs";
import util from "util";
import rmSync from "../fs-rmSync.js";
/**
* Copy across the required language files
@@ -15,10 +16,7 @@ import util from "util";
*/
function copyFiles(langCode, baseLang) {
if (fs.existsSync("./languages")) {
fs.rmSync("./languages", {
recursive: true,
force: true,
});
rmSync("./languages", { recursive: true });
}
fs.mkdirSync("./languages");
@@ -95,9 +93,6 @@ console.log("Updating: " + langCode);
copyFiles(langCode, baseLangCode);
await updateLanguage(langCode, baseLangCode);
fs.rmSync("./languages", {
recursive: true,
force: true,
});
rmSync("./languages", { recursive: true });
console.log("Done. Fixing formatting by ESLint...");

228
install.sh Normal file
View File

@@ -0,0 +1,228 @@
# install.sh is generated by ./extra/install.batsh, do not modify it directly.
# "npm run compile-install-script" to compile install.sh
# The command is working on Windows PowerShell and Docker for Windows only.
# curl -o kuma_install.sh https://raw.githubusercontent.com/louislam/uptime-kuma/master/install.sh && sudo bash kuma_install.sh
"echo" "-e" "====================="
"echo" "-e" "Uptime Kuma Install Script"
"echo" "-e" "====================="
"echo" "-e" "Supported OS: Ubuntu >= 16.04, Debian and CentOS/RHEL 7/8"
"echo" "-e" "---------------------------------------"
"echo" "-e" "This script is designed for Linux and basic usage."
"echo" "-e" "For advanced usage, please go to https://github.com/louislam/uptime-kuma/wiki/Installation"
"echo" "-e" "---------------------------------------"
"echo" "-e" ""
"echo" "-e" "Local - Install Uptime Kuma on your current machine with git, Node.js and pm2"
"echo" "-e" "Docker - Install Uptime Kuma Docker container"
"echo" "-e" ""
if [ "$1" != "" ]; then
type="$1"
else
"read" "-p" "Which installation method do you prefer? [DOCKER/local]: " "type"
fi
defaultPort="3001"
function checkNode {
local _0
nodeVersion=$(node -e 'console.log(process.versions.node.split(`.`)[0])')
"echo" "-e" "Node Version: ""$nodeVersion"
_0="12"
if [ $(($nodeVersion <= $_0)) == 1 ]; then
"echo" "-e" "Error: Required Node.js 14"
"exit" "1"
fi
}
function deb {
nodeCheck=$(node -v)
apt --yes update
if [ "$nodeCheck" != "" ]; then
"checkNode"
else
# Old nodejs binary name is "nodejs"
check=$(nodejs --version)
if [ "$check" != "" ]; then
"echo" "-e" "Error: 'node' command is not found, but 'nodejs' command is found. Your NodeJS should be too old."
exit 1
fi
curlCheck=$(curl --version)
if [ "$curlCheck" == "" ]; then
"echo" "-e" "Installing Curl"
apt --yes install curl
fi
"echo" "-e" "Installing Node.js 16"
curl -sL https://deb.nodesource.com/setup_16.x | bash - > log.txt
apt --yes install nodejs
node -v
nodeCheckAgain=$(node -v)
if [ "$nodeCheckAgain" == "" ]; then
"echo" "-e" "Error during Node.js installation"
exit 1
fi
fi
check=$(git --version)
if [ "$check" == "" ]; then
"echo" "-e" "Installing Git"
apt --yes install git
fi
}
if [ "$type" == "local" ]; then
defaultInstallPath="/opt/uptime-kuma"
if [ -e "/etc/redhat-release" ]; then
os=$("cat" "/etc/redhat-release")
distribution="rhel"
else
if [ -e "/etc/issue" ]; then
os=$(head -n1 /etc/issue | cut -f 1 -d ' ')
if [ "$os" == "Ubuntu" ]; then
distribution="ubuntu"
# Get ubuntu version
. /etc/lsb-release
version="$DISTRIB_RELEASE"
fi
if [ "$os" == "Debian" ]; then
distribution="debian"
fi
fi
fi
arch=$(uname -i)
"echo" "-e" "Your OS: ""$os"
"echo" "-e" "Distribution: ""$distribution"
"echo" "-e" "Version: ""$version"
"echo" "-e" "Arch: ""$arch"
if [ "$3" != "" ]; then
port="$3"
else
"read" "-p" "Listening Port [$defaultPort]: " "port"
if [ "$port" == "" ]; then
port="$defaultPort"
fi
fi
if [ "$2" != "" ]; then
installPath="$2"
else
"read" "-p" "Installation Path [$defaultInstallPath]: " "installPath"
if [ "$installPath" == "" ]; then
installPath="$defaultInstallPath"
fi
fi
# CentOS
if [ "$distribution" == "rhel" ]; then
nodeCheck=$(node -v)
if [ "$nodeCheck" != "" ]; then
"checkNode"
else
dnfCheck=$(dnf --version)
# Use yum
if [ "$dnfCheck" == "" ]; then
curlCheck=$(curl --version)
if [ "$curlCheck" == "" ]; then
"echo" "-e" "Installing Curl"
yum -y -q install curl
fi
"echo" "-e" "Installing Node.js 16"
curl -sL https://rpm.nodesource.com/setup_16.x | bash - > log.txt
yum install -y -q nodejs
else
curlCheck=$(curl --version)
if [ "$curlCheck" == "" ]; then
"echo" "-e" "Installing Curl"
dnf -y install curl
fi
"echo" "-e" "Installing Node.js 16"
curl -sL https://rpm.nodesource.com/setup_16.x | bash - > log.txt
dnf install -y nodejs
fi
node -v
nodeCheckAgain=$(node -v)
if [ "$nodeCheckAgain" == "" ]; then
"echo" "-e" "Error during Node.js installation"
exit 1
fi
fi
check=$(git --version)
if [ "$check" == "" ]; then
"echo" "-e" "Installing Git"
yum -y -q install git
fi
# Ubuntu
else
if [ "$distribution" == "ubuntu" ]; then
"deb"
# Debian
else
if [ "$distribution" == "debian" ]; then
"deb"
else
# Unknown distribution
error=$((0))
check=$(git --version)
if [ "$check" == "" ]; then
error=$((1))
"echo" "-e" "Error: git is not found!"
"echo" "-e" "help: an installation guide is available at https://git-scm.com/book/en/v2/Getting-Started-Installing-Git"
fi
check=$(node -v)
if [ "$check" == "" ]; then
error=$((1))
"echo" "-e" "Error: node is not found"
"echo" "-e" "help: an installation guide is available at https://nodejs.org/en/download"
fi
if [ $(($error > 0)) == 1 ]; then
"echo" "-e" "Please install above missing software"
exit 1
fi
fi
fi
fi
check=$(pm2 --version)
if [ "$check" == "" ]; then
"echo" "-e" "Installing PM2"
npm install pm2 -g && pm2 install pm2-logrotate
pm2 startup
fi
# Check again
check=$(pm2 --version)
if [ "$check" == "" ]; then
"echo" "-e" "Error: pm2 is not found!"
"echo" "-e" "help: an installation guide is available at https://pm2.keymetrics.io/docs/usage/quick-start/"
exit 1
fi
mkdir -p $installPath
cd $installPath
git clone https://github.com/louislam/uptime-kuma.git .
npm run setup
pm2 start server/server.js --name uptime-kuma -- --port=$port
else
defaultVolume="uptime-kuma"
check=$(docker -v)
if [ "$check" == "" ]; then
"echo" "-e" "Error: docker is not found!"
"echo" "-e" "help: an installation guide is available at https://docs.docker.com/desktop/"
exit 1
fi
check=$(docker info)
if [[ "$check" == *"Is the docker daemon running"* ]]; then
"echo" "Error: docker is not running"
"echo" "help: a troubleshooting guide is available at https://docs.docker.com/config/daemon/troubleshoot/"
"exit" "1"
fi
if [ "$3" != "" ]; then
port="$3"
else
"read" "-p" "Expose Port [$defaultPort]: " "port"
if [ "$port" == "" ]; then
port="$defaultPort"
fi
fi
if [ "$2" != "" ]; then
volume="$2"
else
"read" "-p" "Volume Name [$defaultVolume]: " "volume"
if [ "$volume" == "" ]; then
volume="$defaultVolume"
fi
fi
"echo" "-e" "Port: $port"
"echo" "-e" "Volume: $volume"
docker volume create $volume
docker run -d --restart=always -p $port:3001 -v $volume:/app/data --name uptime-kuma louislam/uptime-kuma:1
fi
"echo" "-e" "http://localhost:$port"

20918
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,85 +1,90 @@
{
"name": "uptime-kuma",
"version": "2.0.0-dev",
"version": "1.23.2",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/louislam/uptime-kuma.git"
},
"engines": {
"node": "18 || >= 20.4.0"
"node": "14 || 16 || 18 || >= 20.4.0"
},
"scripts": {
"install-legacy": "npm install",
"update-legacy": "npm update",
"lint:js": "eslint --ext \".js,.vue\" --ignore-path .gitignore .",
"lint:js-prod": "npm run lint:js -- --max-warnings 0",
"lint-fix:js": "eslint --ext \".js,.vue\" --fix --ignore-path .gitignore .",
"lint:style": "stylelint \"**/*.{vue,css,scss}\" --ignore-path .gitignore",
"lint-fix:style": "stylelint \"**/*.{vue,css,scss}\" --fix --ignore-path .gitignore",
"lint": "npm run lint:js && npm run lint:style",
"lint:prod": "npm run lint:js-prod && npm run lint:style",
"dev": "concurrently -k -r \"wait-on tcp:3000 && npm run start-server-dev \" \"npm run start-frontend-dev\"",
"start-frontend-dev": "cross-env NODE_ENV=development vite --host --config ./config/vite.config.js",
"start-frontend-devcontainer": "cross-env NODE_ENV=development DEVCONTAINER=1 vite --host --config ./config/vite.config.js",
"start": "npm run start-server",
"start-server": "node server/server.js",
"start-server-dev": "cross-env NODE_ENV=development node server/server.js",
"start-server-dev:watch": "cross-env NODE_ENV=development node --watch server/server.js",
"build": "vite build --config ./config/vite.config.js",
"test": "npm run test-backend && npm run test-e2e",
"test": "node test/prepare-test-server.js && npm run test-backend",
"test-with-build": "npm run build && npm test",
"test-backend": "cross-env TEST_BACKEND=1 node --test test/backend-test",
"test-e2e": "playwright test --config ./config/playwright.config.js",
"test-e2e-ui": "playwright test --config ./config/playwright.config.js --ui --ui-port=51063",
"playwright-codegen": "playwright codegen localhost:3000 --save-storage=./private/e2e-auth.json",
"playwright-show-report": "playwright show-report ./private/playwright-report",
"test-backend": "node test/backend-test-entry.js && npm run jest-backend",
"test-backend:14": "cross-env TEST_BACKEND=1 NODE_OPTIONS=\"--experimental-abortcontroller --no-warnings\" node--test test/backend-test",
"test-backend:18": "cross-env TEST_BACKEND=1 node --test test/backend-test",
"jest-backend": "cross-env TEST_BACKEND=1 jest --runInBand --detectOpenHandles --forceExit --config=./config/jest-backend.config.js",
"tsc": "tsc",
"vite-preview-dist": "vite preview --host --config ./config/vite.config.js",
"build-docker": "npm run build && npm run build-docker-full && npm run build-docker-slim",
"build-docker-base": "docker buildx build -f docker/debian-base.dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:base2 --target base2 . --push",
"build-docker-base-slim": "docker buildx build -f docker/debian-base.dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:base2-slim --target base2-slim . --push",
"build-docker-builder-go": "docker buildx build -f docker/builder-go.dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:builder-go . --push",
"build-docker-slim": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:next-slim -t louislam/uptime-kuma:2-slim -t louislam/uptime-kuma:$VERSION-slim --target release --build-arg BASE_IMAGE=louislam/uptime-kuma:base2-slim . --push",
"build-docker-full": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:next -t louislam/uptime-kuma:2 -t louislam/uptime-kuma:$VERSION --target release . --push",
"build-docker-slim": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2-slim -t louislam/uptime-kuma:$VERSION-slim --target release --build-arg BASE_IMAGE=louislam/uptime-kuma:base2-slim . --push",
"build-docker-full": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2 -t louislam/uptime-kuma:$VERSION --target release . --push",
"build-docker-nightly": "node ./extra/test-docker.js && npm run build && docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:nightly2 --target nightly . --push",
"build-docker-slim-rootless": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2-slim-rootless -t louislam/uptime-kuma:$VERSION-slim-rootless --target rootless --build-arg BASE_IMAGE=louislam/uptime-kuma:base2-slim . --push",
"build-docker-full-rootless": "node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:2-rootless -t louislam/uptime-kuma:$VERSION-rootless --target rootless . --push",
"build-docker-nightly-rootless": "node ./extra/test-docker.js && npm run build && docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:nightly2-rootless --target nightly-rootless . --push",
"build-docker-nightly-local": "npm run build && docker build -f docker/dockerfile -t louislam/uptime-kuma:nightly2 --target nightly .",
"build-docker-pr-test": "docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64 -t louislam/uptime-kuma:pr-test2 --target pr-test2 . --push",
"upload-artifacts": "docker buildx build -f docker/dockerfile --platform linux/amd64 -t louislam/uptime-kuma:upload-artifact --build-arg VERSION --build-arg GITHUB_TOKEN --target upload-artifact . --progress plain",
"setup": "git checkout 1.23.14 && npm ci --production && npm run download-dist",
"setup": "git checkout 1.23.2 && npm ci --production && npm run download-dist",
"download-dist": "node extra/download-dist.js",
"mark-as-nightly": "node extra/mark-as-nightly.js",
"reset-password": "node extra/reset-password.js",
"remove-2fa": "node extra/remove-2fa.js",
"compile-install-script": "@powershell -NoProfile -ExecutionPolicy Unrestricted -Command ./extra/compile-install-script.ps1",
"test-install-script-rockylinux": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/rockylinux.dockerfile .",
"test-install-script-centos7": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/centos7.dockerfile .",
"test-install-script-debian": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/debian.dockerfile .",
"test-install-script-debian-buster": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/debian-buster.dockerfile .",
"test-install-script-ubuntu": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/ubuntu.dockerfile .",
"test-install-script-ubuntu1804": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/ubuntu1804.dockerfile .",
"test-install-script-ubuntu1604": "npm run compile-install-script && docker build --progress plain -f test/test_install_script/ubuntu1604.dockerfile .",
"simple-dns-server": "node extra/simple-dns-server.js",
"simple-mqtt-server": "node extra/simple-mqtt-server.js",
"simple-mongo": "docker run --rm -p 27017:27017 mongo",
"simple-postgres": "docker run --rm -p 5432:5432 -e POSTGRES_PASSWORD=postgres postgres",
"simple-mariadb": "docker run --rm -p 3306:3306 -e MYSQL_ROOT_PASSWORD=mariadb# mariadb",
"update-language-files": "cd extra/update-language-files && node index.js && cross-env-shell eslint ../../src/languages/$npm_config_language.js --fix",
"release-final": "node ./extra/test-docker.js && node extra/update-version.js && npm run build-docker && node ./extra/press-any-key.js && npm run upload-artifacts && node ./extra/update-wiki-version.js",
"release-beta": "node ./extra/test-docker.js && node extra/beta/update-version.js && npm run build && node ./extra/env2arg.js docker buildx build -f docker/dockerfile --platform linux/amd64,linux/arm64,linux/arm/v7 -t louislam/uptime-kuma:$VERSION -t louislam/uptime-kuma:beta . --target release --push && node ./extra/press-any-key.js && npm run upload-artifacts",
"git-remove-tag": "git tag -d",
"build-dist-and-restart": "npm run build && npm run start-server-dev",
"start-pr-test": "node extra/checkout-pr.js && npm install && npm run dev",
"cy:test": "node test/prepare-test-server.js && node server/server.js --port=3002 --data-dir=./data/test/ --e2e",
"cy:run": "npx cypress run --browser chrome --headless --config-file ./config/cypress.config.js",
"cy:run:unit": "npx cypress run --browser chrome --headless --config-file ./config/cypress.frontend.config.js",
"cypress-open": "concurrently -k -r \"node test/prepare-test-server.js && node server/server.js --port=3002 --data-dir=./data/test/\" \"cypress open --config-file ./config/cypress.config.js\"",
"build-healthcheck-armv7": "cross-env GOOS=linux GOARCH=arm GOARM=7 go build -x -o ./extra/healthcheck-armv7 ./extra/healthcheck.go",
"deploy-demo-server": "node extra/deploy-demo-server.js",
"sort-contributors": "node extra/sort-contributors.js",
"quick-run-nightly": "docker run --rm --env NODE_ENV=development -p 3001:3001 louislam/uptime-kuma:nightly2",
"start-dev-container": "cd docker && docker-compose -f docker-compose-dev.yml up --force-recreate",
"rebase-pr-to-1.23.X": "node extra/rebase-pr.js 1.23.X",
"reset-migrate-aggregate-table-state": "node extra/reset-migrate-aggregate-table-state.js"
"rebase-pr-to-1.23.X": "node extra/rebase-pr.js 1.23.X"
},
"dependencies": {
"@grpc/grpc-js": "~1.8.22",
"@grpc/grpc-js": "~1.7.3",
"@louislam/ping": "~0.4.4-mod.1",
"@louislam/sqlite3": "15.1.6",
"@vvo/tzdb": "^6.125.0",
"args-parser": "~1.3.0",
"axios": "~0.28.1",
"axios": "~0.27.0",
"axios-ntlm": "1.3.0",
"badge-maker": "~3.3.1",
"bcryptjs": "~2.4.3",
"cacheable-lookup": "~6.0.4",
"chardet": "~1.4.0",
"check-password-strength": "^2.0.5",
"cheerio": "~1.0.0-rc.12",
@@ -87,82 +92,73 @@
"command-exists": "~1.2.9",
"compare-versions": "~3.6.0",
"compression": "~1.7.4",
"croner": "~8.1.0",
"croner": "~6.0.5",
"dayjs": "~1.11.5",
"dev-null": "^0.1.1",
"dotenv": "~16.0.3",
"express": "~4.21.0",
"express": "~4.17.3",
"express-basic-auth": "~1.2.1",
"express-static-gzip": "~2.1.7",
"feed": "^4.2.2",
"form-data": "~4.0.0",
"gamedig": "^4.2.0",
"html-escaper": "^3.0.3",
"http-cookie-agent": "~5.0.4",
"gamedig": "~4.0.5",
"http-graceful-shutdown": "~3.1.7",
"http-proxy-agent": "~5.0.0",
"https-proxy-agent": "~5.0.1",
"iconv-lite": "~0.6.3",
"isomorphic-ws": "^5.0.0",
"jsesc": "~3.0.2",
"json5": "~2.2.3",
"jsonata": "^2.0.3",
"jsonschema": "~1.4.1",
"jsonwebtoken": "~9.0.0",
"jwt-decode": "~3.1.2",
"kafkajs": "^2.2.4",
"knex": "^2.4.2",
"limiter": "~2.1.0",
"liquidjs": "^10.7.0",
"marked": "^14.0.0",
"mitt": "~3.0.1",
"mongodb": "~4.17.1",
"mqtt": "~4.3.7",
"mssql": "~11.0.0",
"mysql2": "~3.9.6",
"mssql": "~8.1.4",
"mysql2": "~2.3.3",
"nanoid": "~3.3.4",
"net-snmp": "^3.11.2",
"node-cloudflared-tunnel": "~1.0.9",
"node-radius-client": "~1.0.0",
"nodemailer": "~6.9.13",
"nodemailer": "~6.6.5",
"nostr-tools": "^1.13.1",
"notp": "~2.0.3",
"openid-client": "^5.4.2",
"password-hash": "~1.2.2",
"pg": "~8.11.3",
"pg-connection-string": "~2.6.2",
"playwright-core": "~1.39.0",
"pg": "~8.8.0",
"pg-connection-string": "~2.5.0",
"playwright-core": "~1.35.1",
"prom-client": "~13.2.0",
"prometheus-api-metrics": "~3.2.1",
"promisify-child-process": "~4.1.2",
"protobufjs": "~7.2.4",
"qs": "~6.10.4",
"redbean-node": "~0.3.0",
"redis": "~4.5.1",
"semver": "~7.5.4",
"socket.io": "~4.8.0",
"socket.io-client": "~4.8.0",
"socket.io": "~4.7.2",
"socket.io-client": "~4.7.2",
"socks-proxy-agent": "6.1.1",
"tar": "~6.2.1",
"tar": "~6.1.11",
"tcp-ping": "~0.1.1",
"thirty-two": "~1.0.2",
"tough-cookie": "~4.1.3",
"ws": "^8.13.0"
"ws": "~8.11.0"
},
"devDependencies": {
"@actions/github": "~5.1.1",
"@actions/github": "~5.0.1",
"@babel/eslint-parser": "^7.22.7",
"@babel/preset-env": "^7.15.8",
"@fortawesome/fontawesome-svg-core": "~1.2.36",
"@fortawesome/free-regular-svg-icons": "~5.15.4",
"@fortawesome/free-solid-svg-icons": "~5.15.4",
"@fortawesome/vue-fontawesome": "~3.0.0-5",
"@playwright/test": "~1.39.0",
"@popperjs/core": "~2.10.2",
"@testcontainers/hivemq": "^10.13.1",
"@testcontainers/rabbitmq": "^10.13.2",
"@types/bootstrap": "~5.1.9",
"@types/node": "^20.8.6",
"@typescript-eslint/eslint-plugin": "^6.7.5",
"@typescript-eslint/parser": "^6.7.5",
"@vitejs/plugin-vue": "~5.0.1",
"@vue/compiler-sfc": "~3.4.2",
"@vitejs/plugin-legacy": "~4.1.0",
"@vitejs/plugin-vue": "~4.2.3",
"@vue/compiler-sfc": "~3.3.4",
"@vuepic/vue-datepicker": "~3.4.8",
"aedes": "^0.46.3",
"bootstrap": "5.1.3",
@@ -172,14 +168,16 @@
"core-js": "~3.26.1",
"cronstrue": "~2.24.0",
"cross-env": "~7.0.3",
"cypress": "^13.2.0",
"delay": "^5.0.0",
"dns2": "~2.0.1",
"dompurify": "~3.1.7",
"dompurify": "~2.4.3",
"eslint": "~8.14.0",
"eslint-plugin-jsdoc": "~46.4.6",
"eslint-plugin-vue": "~8.7.1",
"favico.js": "~0.3.10",
"get-port-please": "^3.1.1",
"jest": "~29.6.1",
"marked": "~4.2.5",
"node-ssh": "~13.1.0",
"postcss-html": "~1.5.0",
"postcss-rtlcss": "~3.7.2",
@@ -192,13 +190,13 @@
"stylelint-config-standard": "~25.0.0",
"terser": "~5.15.0",
"test": "~3.3.0",
"testcontainers": "^10.13.1",
"timezones-list": "~3.0.1",
"typescript": "~4.4.4",
"v-pagination-3": "~0.1.7",
"vite": "~5.2.8",
"vite": "~4.4.1",
"vite-plugin-commonjs": "^0.8.0",
"vite-plugin-compression": "^0.5.1",
"vite-plugin-vue-devtools": "^7.0.15",
"vue": "~3.4.2",
"vue": "~3.3.4",
"vue-chartjs": "~5.2.0",
"vue-confirm-dialog": "~1.0.2",
"vue-contenteditable": "~3.0.4",
@@ -207,10 +205,10 @@
"vue-multiselect": "~3.0.0-alpha.2",
"vue-prism-editor": "~2.0.0-alpha.2",
"vue-qrcode": "~1.0.0",
"vue-router": "~4.2.5",
"vue-router": "~4.0.14",
"vue-toastification": "~2.0.0-rc.5",
"vuedraggable": "~4.1.0",
"wait-on": "^7.2.0",
"wait-on": "^6.0.1",
"whatwg-url": "~12.0.1"
}
}

View File

@@ -1,9 +1,10 @@
<svg width="640" height="640" viewBox="0 0 640 640" xml:space="preserve" xmlns="http://www.w3.org/2000/svg">
<g transform="matrix(1 0 0 1 320 320)">
<linearGradient id="S3" gradientUnits="userSpaceOnUse" gradientTransform="matrix(1 0 0 1 -319.99875 -320.0001577393)" x1="259.78" y1="261.15" x2="463.85" y2="456.49">
<svg width="640" height="640" viewBox="0 0 640 640" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M490.4 235.64C544.09 358.38 544.09 435.34 490.4 466.5C409.85 513.24 199.96 527.49 139.54 455.64C99.2601 407.74 99.2601 334.4 139.54 235.64C180.5 168.18 238.71 134.45 314.17 134.45C389.64 134.45 448.38 168.18 490.4 235.64Z" fill="url(#paint0_linear_381_799)"/>
<path d="M490.4 235.64C544.09 358.38 544.09 435.34 490.4 466.5C409.85 513.24 199.96 527.49 139.54 455.64C99.2601 407.74 99.2601 334.4 139.54 235.64C180.5 168.18 238.71 134.45 314.17 134.45C389.64 134.45 448.38 168.18 490.4 235.64Z" stroke="#F2F2F2" stroke-opacity="0.51" stroke-width="200"/>
<defs>
<linearGradient id="paint0_linear_381_799" x1="259.78" y1="261.15" x2="463.85" y2="456.49" gradientUnits="userSpaceOnUse">
<stop stop-color="#5CDD8B"/>
<stop offset="1" stop-color="#86E6A9"/>
</linearGradient>
<path style="stroke: rgb(242,242,242); stroke-opacity: 0.51; stroke-width: 200; stroke-dasharray: none; stroke-linecap: butt; stroke-dashoffset: 0; stroke-linejoin: miter; stroke-miterlimit: 4; fill: url(#S3); fill-rule: nonzero; opacity: 1;" transform=" translate(0, 0)" d="M 170.40125 -84.36016 C 224.09125 38.37984 224.09125 115.33984 170.40125 146.49984 C 89.85125000000001 193.23984000000002 -120.03875 207.48984000000002 -180.45875 135.63984 C -220.73875 87.73983999999999 -220.73875 14.399839999999998 -180.45875 -84.36016000000001 C -139.49875 -151.82016 -81.28875000000001 -185.55016 -5.828750000000014 -185.55016 C 69.64124999999999 -185.55016 128.38125 -151.82016000000002 170.40124999999998 -84.36016000000001 z" stroke-linecap="round" />
</g>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 893 B

View File

@@ -36,20 +36,32 @@ exports.login = async function (username, password) {
return null;
};
/**
* uk prefix + key ID is before _
* @param {string} key API Key
* @returns {{clear: string, index: string}} Parsed API key
*/
exports.parseAPIKey = function (key) {
let index = key.substring(2, key.indexOf("_"));
let clear = key.substring(key.indexOf("_") + 1, key.length);
return {
index,
clear,
};
};
/**
* Validate a provided API key
* @param {string} key API key to verify
* @returns {boolean} API is ok?
* @returns {Promise<boolean>} API is ok?
*/
async function verifyAPIKey(key) {
if (typeof key !== "string") {
return false;
}
// uk prefix + key ID is before _
let index = key.substring(2, key.indexOf("_"));
let clear = key.substring(key.indexOf("_") + 1, key.length);
const { index, clear } = exports.parseAPIKey(key);
let hash = await R.findOne("api_key", " id=? ", [ index ]);
if (hash === null) {
@@ -65,6 +77,28 @@ async function verifyAPIKey(key) {
return hash && passwordHash.verify(clear, hash.key);
}
/**
* @param {string} key API key to verify
* @returns {Promise<void>}
* @throws {Error} If API key is invalid or rate limit exceeded
*/
async function verifyAPIKeyWithRateLimit(key) {
const pass = await apiRateLimiter.pass(null, 0);
if (pass) {
await apiRateLimiter.removeTokens(1);
const valid = await verifyAPIKey(key);
if (!valid) {
const errMsg = "Failed API auth attempt: invalid API Key";
log.warn("api-auth", errMsg);
throw new Error(errMsg);
}
} else {
const errMsg = "Failed API auth attempt: rate limit exceeded";
log.warn("api-auth", errMsg);
throw new Error(errMsg);
}
}
/**
* Callback for basic auth authorizers
* @callback authCallback
@@ -80,22 +114,10 @@ async function verifyAPIKey(key) {
* @returns {void}
*/
function apiAuthorizer(username, password, callback) {
// API Rate Limit
apiRateLimiter.pass(null, 0).then((pass) => {
if (pass) {
verifyAPIKey(password).then((valid) => {
if (!valid) {
log.warn("api-auth", "Failed API auth attempt: invalid API Key");
}
callback(null, valid);
// Only allow a set number of api requests per minute
// (currently set to 60)
apiRateLimiter.removeTokens(1);
});
} else {
log.warn("api-auth", "Failed API auth attempt: rate limit exceeded");
callback(null, false);
}
verifyAPIKeyWithRateLimit(password).then(() => {
callback(null, true);
}).catch(() => {
callback(null, false);
});
}
@@ -130,7 +152,7 @@ function userAuthorizer(username, password, callback) {
* @param {express.Request} req Express request object
* @param {express.Response} res Express response object
* @param {express.NextFunction} next Next handler in chain
* @returns {Promise<void>}
* @returns {void}
*/
exports.basicAuth = async function (req, res, next) {
const middleware = basicAuth({
@@ -153,27 +175,51 @@ exports.basicAuth = async function (req, res, next) {
* @param {express.Request} req Express request object
* @param {express.Response} res Express response object
* @param {express.NextFunction} next Next handler in chain
* @returns {Promise<void>}
* @returns {void}
*/
exports.apiAuth = async function (req, res, next) {
if (!await Settings.get("disableAuth")) {
let usingAPIKeys = await Settings.get("apiKeysEnabled");
let middleware;
if (usingAPIKeys) {
middleware = basicAuth({
authorizer: apiAuthorizer,
authorizeAsync: true,
challenge: true,
});
} else {
middleware = basicAuth({
authorizer: userAuthorizer,
authorizeAsync: true,
challenge: true,
exports.basicAuthMiddleware = async function (req, res, next) {
let middleware = basicAuth({
authorizer: apiAuthorizer,
authorizeAsync: true,
challenge: true,
});
middleware(req, res, next);
};
// Get the API key from the header Authorization and verify it
exports.headerAuthMiddleware = async function (req, res, next) {
const authorizationHeader = req.header("Authorization");
let key = null;
if (authorizationHeader && typeof authorizationHeader === "string") {
const arr = authorizationHeader.split(" ");
if (arr.length === 2) {
const type = arr[0];
if (type === "Bearer") {
key = arr[1];
}
}
}
if (key) {
try {
await verifyAPIKeyWithRateLimit(key);
res.locals.apiKeyID = exports.parseAPIKey(key).index;
next();
} catch (e) {
res.status(401);
res.json({
ok: false,
msg: e.message,
});
}
middleware(req, res, next);
} else {
next();
await apiRateLimiter.removeTokens(1);
res.status(401);
res.json({
ok: false,
msg: "No API Key provided, please provide an API Key in the \"Authorization\" header",
});
}
};

View File

@@ -0,0 +1,88 @@
const https = require("https");
const http = require("http");
const CacheableLookup = require("cacheable-lookup");
const { Settings } = require("./settings");
const { log } = require("../src/util");
class CacheableDnsHttpAgent {
static cacheable = new CacheableLookup();
static httpAgentList = {};
static httpsAgentList = {};
static enable = false;
/**
* Register/Disable cacheable to global agents
* @returns {void}
*/
static async update() {
log.debug("CacheableDnsHttpAgent", "update");
let isEnable = await Settings.get("dnsCache");
if (isEnable !== this.enable) {
log.debug("CacheableDnsHttpAgent", "value changed");
if (isEnable) {
log.debug("CacheableDnsHttpAgent", "enable");
this.cacheable.install(http.globalAgent);
this.cacheable.install(https.globalAgent);
} else {
log.debug("CacheableDnsHttpAgent", "disable");
this.cacheable.uninstall(http.globalAgent);
this.cacheable.uninstall(https.globalAgent);
}
}
this.enable = isEnable;
}
/**
* Attach cacheable to HTTP agent
* @param {http.Agent} agent Agent to install
* @returns {void}
*/
static install(agent) {
this.cacheable.install(agent);
}
/**
* @param {https.AgentOptions} agentOptions Options to pass to HTTPS agent
* @returns {https.Agent} The new HTTPS agent
*/
static getHttpsAgent(agentOptions) {
if (!this.enable) {
return new https.Agent(agentOptions);
}
let key = JSON.stringify(agentOptions);
if (!(key in this.httpsAgentList)) {
this.httpsAgentList[key] = new https.Agent(agentOptions);
this.cacheable.install(this.httpsAgentList[key]);
}
return this.httpsAgentList[key];
}
/**
* @param {http.AgentOptions} agentOptions Options to pass to the HTTP agent
* @returns {https.Agents} The new HTTP agent
*/
static getHttpAgent(agentOptions) {
if (!this.enable) {
return new http.Agent(agentOptions);
}
let key = JSON.stringify(agentOptions);
if (!(key in this.httpAgentList)) {
this.httpAgentList[key] = new http.Agent(agentOptions);
this.cacheable.install(this.httpAgentList[key]);
}
return this.httpAgentList[key];
}
}
module.exports = {
CacheableDnsHttpAgent,
};

View File

@@ -8,7 +8,6 @@ const server = UptimeKumaServer.getInstance();
const io = server.io;
const { setting } = require("./util-server");
const checkVersion = require("./check-version");
const Database = require("./database");
/**
* Send list of notification providers to client
@@ -145,20 +144,17 @@ async function sendInfo(socket, hideVersion = false) {
let version;
let latestVersion;
let isContainer;
let dbType;
if (!hideVersion) {
version = checkVersion.version;
latestVersion = checkVersion.latestVersion;
isContainer = (process.env.UPTIME_KUMA_IS_CONTAINER === "1");
dbType = Database.dbConfig.type;
}
socket.emit("info", {
version,
latestVersion,
isContainer,
dbType,
primaryBaseURL: await setting("primaryBaseURL"),
serverTimezone: await server.getTimezone(),
serverTimezoneOffset: server.getTimezoneOffset(),
@@ -189,56 +185,6 @@ async function sendDockerHostList(socket) {
return list;
}
/**
* Send list of docker hosts to client
* @param {Socket} socket Socket.io socket instance
* @returns {Promise<Bean[]>} List of docker hosts
*/
async function sendRemoteBrowserList(socket) {
const timeLogger = new TimeLogger();
let result = [];
let list = await R.find("remote_browser", " user_id = ? ", [
socket.userID,
]);
for (let bean of list) {
result.push(bean.toJSON());
}
io.to(socket.userID).emit("remoteBrowserList", result);
timeLogger.print("Send Remote Browser List");
return list;
}
/**
* Send list of monitor types to client
* @param {Socket} socket Socket.io socket instance
* @returns {Promise<void>}
*/
async function sendMonitorTypeList(socket) {
const result = Object.entries(UptimeKumaServer.monitorTypeList).map(([ key, type ]) => {
return [ key, {
supportsConditions: type.supportsConditions,
conditionVariables: type.conditionVariables.map(v => {
return {
id: v.id,
operators: v.operators.map(o => {
return {
id: o.id,
caption: o.caption,
};
}),
};
}),
}];
});
io.to(socket.userID).emit("monitorTypeList", Object.fromEntries(result));
}
module.exports = {
sendNotificationList,
sendImportantHeartbeatList,
@@ -246,7 +192,5 @@ module.exports = {
sendProxyList,
sendAPIKeyList,
sendInfo,
sendDockerHostList,
sendRemoteBrowserList,
sendMonitorTypeList,
sendDockerHostList
};

View File

@@ -1,46 +1,29 @@
const isFreeBSD = /^freebsd/.test(process.platform);
// Interop with browser
const args = (typeof process !== "undefined") ? require("args-parser")(process.argv) : {};
// If host is omitted, the server will accept connections on the unspecified IPv6 address (::) when IPv6 is available and the unspecified IPv4 address (0.0.0.0) otherwise.
// Dual-stack support for (::)
// Also read HOST if not FreeBSD, as HOST is a system environment variable in FreeBSD
let hostEnv = isFreeBSD ? null : process.env.HOST;
const hostname = args.host || process.env.UPTIME_KUMA_HOST || hostEnv;
const port = [ args.port, process.env.UPTIME_KUMA_PORT, process.env.PORT, 3001 ]
.map(portValue => parseInt(portValue))
.find(portValue => !isNaN(portValue));
const sslKey = args["ssl-key"] || process.env.UPTIME_KUMA_SSL_KEY || process.env.SSL_KEY || undefined;
const sslCert = args["ssl-cert"] || process.env.UPTIME_KUMA_SSL_CERT || process.env.SSL_CERT || undefined;
const sslKeyPassphrase = args["ssl-key-passphrase"] || process.env.UPTIME_KUMA_SSL_KEY_PASSPHRASE || process.env.SSL_KEY_PASSPHRASE || undefined;
const isSSL = sslKey && sslCert;
/**
* Get the local WebSocket URL
* @returns {string} The local WebSocket URL
*/
function getLocalWebSocketURL() {
const protocol = isSSL ? "wss" : "ws";
const host = hostname || "localhost";
return `${protocol}://${host}:${port}`;
}
const localWebSocketURL = getLocalWebSocketURL();
const demoMode = args["demo"] || false;
const badgeConstants = {
naColor: "#999",
defaultUpColor: "#66c20a",
defaultWarnColor: "#eed202",
defaultDownColor: "#c2290a",
defaultPendingColor: "#f8a306",
defaultMaintenanceColor: "#1747f5",
defaultPingColor: "blue", // as defined by badge-maker / shields.io
defaultStyle: "flat",
defaultPingValueSuffix: "ms",
defaultPingLabelSuffix: "h",
defaultUptimeValueSuffix: "%",
defaultUptimeLabelSuffix: "h",
defaultCertExpValueSuffix: " days",
defaultCertExpLabelSuffix: "h",
// Values Come From Default Notification Times
defaultCertExpireWarnDays: "14",
defaultCertExpireDownDays: "7"
};
module.exports = {
args,
hostname,
port,
sslKey,
sslCert,
sslKeyPassphrase,
isSSL,
localWebSocketURL,
demoMode,
badgeConstants,
};

View File

@@ -6,50 +6,28 @@ const knex = require("knex");
const path = require("path");
const { EmbeddedMariaDB } = require("./embedded-mariadb");
const mysql = require("mysql2/promise");
const { Settings } = require("./settings");
const { UptimeCalculator } = require("./uptime-calculator");
const dayjs = require("dayjs");
const { SimpleMigrationServer } = require("./utils/simple-migration-server");
/**
* Database & App Data Folder
*/
class Database {
/**
* Boostrap database for SQLite
* @type {string}
*/
static templatePath = "./db/kuma.db";
/**
* Data Dir (Default: ./data)
* @type {string}
*/
static dataDir;
/**
* User Upload Dir (Default: ./data/upload)
* @type {string}
*/
static uploadDir;
/**
* Chrome Screenshot Dir (Default: ./data/screenshots)
* @type {string}
*/
static screenshotDir;
/**
* SQLite file path (Default: ./data/kuma.db)
* @type {string}
*/
static sqlitePath;
/**
* For storing Docker TLS certs (Default: ./data/docker-tls)
* @type {string}
*/
static dockerTLSDir;
/**
@@ -106,11 +84,7 @@ class Database {
"patch-add-certificate-expiry-status-page.sql": true,
"patch-monitor-oauth-cc.sql": true,
"patch-add-timeout-monitor.sql": true,
"patch-add-gamedig-given-port.sql": true,
"patch-notification-config.sql": true,
"patch-fix-kafka-producer-booleans.sql": true,
"patch-timeout.sql": true,
"patch-monitor-tls-info-add-fk.sql": true, // The last file so far converted to a knex migration file
"patch-add-gamedig-given-port.sql": true, // The last file so far converted to a knex migration file
};
/**
@@ -156,7 +130,7 @@ class Database {
fs.mkdirSync(Database.dockerTLSDir, { recursive: true });
}
log.info("server", `Data Dir: ${Database.dataDir}`);
log.info("db", `Data Dir: ${Database.dataDir}`);
}
/**
@@ -213,9 +187,9 @@ class Database {
let config = {};
let mariadbPoolConfig = {
min: 0,
max: 10,
idleTimeoutMillis: 30000,
afterCreate: function (conn, done) {
}
};
log.info("db", `Database Type: ${dbConfig.type}`);
@@ -268,14 +242,7 @@ class Database {
user: dbConfig.username,
password: dbConfig.password,
database: dbConfig.dbName,
timezone: "Z",
typeCast: function (field, next) {
if (field.type === "DATETIME") {
// Do not perform timezone conversion
return field.string();
}
return next();
},
timezone: "+00:00",
},
pool: mariadbPoolConfig,
};
@@ -289,14 +256,6 @@ class Database {
socketPath: embeddedMariaDB.socketPath,
user: "node",
database: "kuma",
timezone: "Z",
typeCast: function (field, next) {
if (field.type === "DATETIME") {
// Do not perform timezone conversion
return field.string();
}
return next();
},
},
pool: mariadbPoolConfig,
};
@@ -358,10 +317,10 @@ class Database {
await R.exec("PRAGMA synchronous = NORMAL");
if (!noLog) {
log.debug("db", "SQLite config:");
log.debug("db", await R.getAll("PRAGMA journal_mode"));
log.debug("db", await R.getAll("PRAGMA cache_size"));
log.debug("db", "SQLite Version: " + await R.getCell("SELECT sqlite_version()"));
log.info("db", "SQLite config:");
log.info("db", await R.getAll("PRAGMA journal_mode"));
log.info("db", await R.getAll("PRAGMA cache_size"));
log.info("db", "SQLite Version: " + await R.getCell("SELECT sqlite_version()"));
}
}
@@ -383,11 +342,9 @@ class Database {
/**
* Patch the database
* @param {number} port Start the migration server for aggregate tables on this port if provided
* @param {string} hostname Start the migration server for aggregate tables on this hostname if provided
* @returns {Promise<void>}
* @returns {void}
*/
static async patch(port = undefined, hostname = undefined) {
static async patch() {
// Still need to keep this for old versions of Uptime Kuma
if (Database.dbConfig.type === "sqlite") {
await this.patchSqlite();
@@ -397,23 +354,9 @@ class Database {
// https://knexjs.org/guide/migrations.html
// https://gist.github.com/NigelEarle/70db130cc040cc2868555b29a0278261
try {
// Disable foreign key check for SQLite
// Known issue of knex: https://github.com/drizzle-team/drizzle-orm/issues/1813
if (Database.dbConfig.type === "sqlite") {
await R.exec("PRAGMA foreign_keys = OFF");
}
await R.knex.migrate.latest({
directory: Database.knexMigrationsPath,
});
// Enable foreign key check for SQLite
if (Database.dbConfig.type === "sqlite") {
await R.exec("PRAGMA foreign_keys = ON");
}
await this.migrateAggregateTable(port, hostname);
} catch (e) {
// Allow missing patch files for downgrade or testing pr.
if (e.message.includes("the following files are missing:")) {
@@ -446,15 +389,13 @@ class Database {
version = 0;
}
if (version !== this.latestVersion) {
log.info("db", "Your database version: " + version);
log.info("db", "Latest database version: " + this.latestVersion);
}
log.info("db", "Your database version: " + version);
log.info("db", "Latest database version: " + this.latestVersion);
if (version === this.latestVersion) {
log.debug("db", "Database patch not needed");
log.info("db", "Database patch not needed");
} else if (version > this.latestVersion) {
log.warn("db", "Warning: Database version is newer than expected");
log.info("db", "Warning: Database version is newer than expected");
} else {
log.info("db", "Database patch is needed");
@@ -490,7 +431,7 @@ class Database {
* @returns {Promise<void>}
*/
static async patchSqlite2() {
log.debug("db", "Database Patch 2.0 Process");
log.info("db", "Database Patch 2.0 Process");
let databasePatchedFiles = await setting("databasePatchedFiles");
if (! databasePatchedFiles) {
@@ -731,173 +672,6 @@ class Database {
}
}
/**
* Migrate the old data in the heartbeat table to the new format (stat_daily, stat_hourly, stat_minutely)
* It should be run once while upgrading V1 to V2
*
* Normally, it should be in transaction, but UptimeCalculator wasn't designed to be in transaction before that.
* I don't want to heavily modify the UptimeCalculator, so it is not in transaction.
* Run `npm run reset-migrate-aggregate-table-state` to reset, in case the migration is interrupted.
* @param {number} port Start the migration server on this port if provided
* @param {string} hostname Start the migration server on this hostname if provided
* @returns {Promise<void>}
*/
static async migrateAggregateTable(port, hostname = undefined) {
log.debug("db", "Enter Migrate Aggregate Table function");
// Add a setting for 2.0.0-dev users to skip this migration
if (process.env.SET_MIGRATE_AGGREGATE_TABLE_TO_TRUE === "1") {
log.warn("db", "SET_MIGRATE_AGGREGATE_TABLE_TO_TRUE is set to 1, skipping aggregate table migration forever (for 2.0.0-dev users)");
await Settings.set("migrateAggregateTableState", "migrated");
}
let migrateState = await Settings.get("migrateAggregateTableState");
// Skip if already migrated
// If it is migrating, it possibly means the migration was interrupted, or the migration is in progress
if (migrateState === "migrated") {
log.debug("db", "Migrated aggregate table already, skip");
return;
} else if (migrateState === "migrating") {
log.warn("db", "Aggregate table migration is already in progress, or it was interrupted");
throw new Error("Aggregate table migration is already in progress");
}
/**
* Start migration server for displaying the migration status
* @type {SimpleMigrationServer}
*/
let migrationServer;
let msg;
if (port) {
migrationServer = new SimpleMigrationServer();
await migrationServer.start(port, hostname);
}
log.info("db", "Migrating Aggregate Table");
log.info("db", "Getting list of unique monitors");
// Get a list of unique monitors from the heartbeat table, using raw sql
let monitors = await R.getAll(`
SELECT DISTINCT monitor_id
FROM heartbeat
ORDER BY monitor_id ASC
`);
// Stop if stat_* tables are not empty
for (let table of [ "stat_minutely", "stat_hourly", "stat_daily" ]) {
let countResult = await R.getRow(`SELECT COUNT(*) AS count FROM ${table}`);
let count = countResult.count;
if (count > 0) {
log.warn("db", `Aggregate table ${table} is not empty, migration will not be started (Maybe you were using 2.0.0-dev?)`);
await migrationServer?.stop();
return;
}
}
await Settings.set("migrateAggregateTableState", "migrating");
let progressPercent = 0;
let part = 100 / monitors.length;
let i = 1;
for (let monitor of monitors) {
// Get a list of unique dates from the heartbeat table, using raw sql
let dates = await R.getAll(`
SELECT DISTINCT DATE(time) AS date
FROM heartbeat
WHERE monitor_id = ?
ORDER BY date ASC
`, [
monitor.monitor_id
]);
for (let date of dates) {
// New Uptime Calculator
let calculator = new UptimeCalculator();
calculator.monitorID = monitor.monitor_id;
calculator.setMigrationMode(true);
// Get all the heartbeats for this monitor and date
let heartbeats = await R.getAll(`
SELECT status, ping, time
FROM heartbeat
WHERE monitor_id = ?
AND DATE(time) = ?
ORDER BY time ASC
`, [ monitor.monitor_id, date.date ]);
if (heartbeats.length > 0) {
msg = `[DON'T STOP] Migrating monitor data ${monitor.monitor_id} - ${date.date} [${progressPercent.toFixed(2)}%][${i}/${monitors.length}]`;
log.info("db", msg);
migrationServer?.update(msg);
}
for (let heartbeat of heartbeats) {
await calculator.update(heartbeat.status, parseFloat(heartbeat.ping), dayjs(heartbeat.time));
}
progressPercent += (Math.round(part / dates.length * 100) / 100);
// Lazy to fix the floating point issue, it is acceptable since it is just a progress bar
if (progressPercent > 100) {
progressPercent = 100;
}
}
i++;
}
msg = "Clearing non-important heartbeats";
log.info("db", msg);
migrationServer?.update(msg);
await Database.clearHeartbeatData(true);
await Settings.set("migrateAggregateTableState", "migrated");
await migrationServer?.stop();
if (monitors.length > 0) {
log.info("db", "Aggregate Table Migration Completed");
} else {
log.info("db", "No data to migrate");
}
}
/**
* Remove all non-important heartbeats from heartbeat table, keep last 24-hour or {KEEP_LAST_ROWS} rows for each monitor
* @param {boolean} detailedLog Log detailed information
* @returns {Promise<void>}
*/
static async clearHeartbeatData(detailedLog = false) {
let monitors = await R.getAll("SELECT id FROM monitor");
const sqlHourOffset = Database.sqlHourOffset();
for (let monitor of monitors) {
if (detailedLog) {
log.info("db", "Deleting non-important heartbeats for monitor " + monitor.id);
}
await R.exec(`
DELETE FROM heartbeat
WHERE monitor_id = ?
AND important = 0
AND time < ${sqlHourOffset}
AND id NOT IN (
SELECT id
FROM heartbeat
WHERE monitor_id = ?
ORDER BY time DESC
LIMIT ?
)
`, [
monitor.id,
-24,
monitor.id,
100,
]);
}
}
}
module.exports = Database;

View File

@@ -1,10 +1,10 @@
const axios = require("axios");
const { R } = require("redbean-node");
const version = require("../package.json").version;
const https = require("https");
const fs = require("fs");
const path = require("path");
const Database = require("./database");
const { axiosAbortSignal } = require("./util-server");
class DockerHost {
@@ -65,16 +65,15 @@ class DockerHost {
/**
* Fetches the amount of containers on the Docker host
* @param {object} dockerHost Docker host to check for
* @returns {Promise<number>} Total amount of containers on the host
* @returns {number} Total amount of containers on the host
*/
static async testDockerHost(dockerHost) {
const options = {
url: "/containers/json?all=true",
timeout: 5000,
headers: {
"Accept": "*/*",
"User-Agent": "Uptime-Kuma/" + version
},
signal: axiosAbortSignal(6000),
};
if (dockerHost.dockerType === "socket") {
@@ -84,33 +83,26 @@ class DockerHost {
options.httpsAgent = new https.Agent(DockerHost.getHttpsAgentOptions(dockerHost.dockerType, options.baseURL));
}
try {
let res = await axios.request(options);
let res = await axios.request(options);
if (Array.isArray(res.data)) {
if (Array.isArray(res.data)) {
if (res.data.length > 1) {
if (res.data.length > 1) {
if ("ImageID" in res.data[0]) {
return res.data.length;
} else {
throw new Error("Invalid Docker response, is it Docker really a daemon?");
}
} else {
if ("ImageID" in res.data[0]) {
return res.data.length;
} else {
throw new Error("Invalid Docker response, is it Docker really a daemon?");
}
} else {
throw new Error("Invalid Docker response, is it Docker really a daemon?");
}
} catch (e) {
if (e.code === "ECONNABORTED" || e.name === "CanceledError") {
throw new Error("Connection to Docker daemon timed out.");
} else {
throw e;
return res.data.length;
}
} else {
throw new Error("Invalid Docker response, is it Docker really a daemon?");
}
}
/**

View File

@@ -1,5 +1,4 @@
const jsesc = require("jsesc");
const { escape } = require("html-escaper");
/**
* Returns a string that represents the javascript that is required to insert the Google Analytics scripts
@@ -8,18 +7,15 @@ const { escape } = require("html-escaper");
* @returns {string} HTML script tags to inject into page
*/
function getGoogleAnalyticsScript(tagId) {
let escapedTagIdJS = jsesc(tagId, { isScriptContext: true });
let escapedTagId = jsesc(tagId, { isScriptContext: true });
if (escapedTagIdJS) {
escapedTagIdJS = escapedTagIdJS.trim();
if (escapedTagId) {
escapedTagId = escapedTagId.trim();
}
// Escape the tag ID for use in an HTML attribute.
let escapedTagIdHTMLAttribute = escape(tagId);
return `
<script async src="https://www.googletagmanager.com/gtag/js?id=${escapedTagIdHTMLAttribute}"></script>
<script>window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date());gtag('config', '${escapedTagIdJS}'); </script>
<script async src="https://www.googletagmanager.com/gtag/js?id=${escapedTagId}"></script>
<script>window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date());gtag('config', '${escapedTagId}'); </script>
`;
}

View File

@@ -1,22 +1,21 @@
const { R } = require("redbean-node");
const { log } = require("../../src/util");
const { setSetting, setting } = require("../util-server");
const Database = require("../database");
const { Settings } = require("../settings");
const dayjs = require("dayjs");
const DEFAULT_KEEP_PERIOD = 365;
const DEFAULT_KEEP_PERIOD = 180;
/**
* Clears old data from the heartbeat table and the stat_daily of the database.
* Clears old data from the heartbeat table of the database.
* @returns {Promise<void>} A promise that resolves when the data has been cleared.
*/
const clearOldData = async () => {
await Database.clearHeartbeatData();
let period = await Settings.get("keepDataPeriodDays");
let period = await setting("keepDataPeriodDays");
// Set Default Period
if (period == null) {
await Settings.set("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
await setSetting("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
period = DEFAULT_KEEP_PERIOD;
}
@@ -26,28 +25,23 @@ const clearOldData = async () => {
parsedPeriod = parseInt(period);
} catch (_) {
log.warn("clearOldData", "Failed to parse setting, resetting to default..");
await Settings.set("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
await setSetting("keepDataPeriodDays", DEFAULT_KEEP_PERIOD, "general");
parsedPeriod = DEFAULT_KEEP_PERIOD;
}
if (parsedPeriod < 1) {
log.info("clearOldData", `Data deletion has been disabled as period is less than 1. Period is ${parsedPeriod} days.`);
} else {
log.debug("clearOldData", `Clearing Data older than ${parsedPeriod} days...`);
const sqlHourOffset = Database.sqlHourOffset();
try {
// Heartbeat
await R.exec("DELETE FROM heartbeat WHERE time < " + sqlHourOffset, [
parsedPeriod * -24,
]);
let timestamp = dayjs().subtract(parsedPeriod, "day").utc().startOf("day").unix();
// stat_daily
await R.exec("DELETE FROM stat_daily WHERE timestamp < ? ", [
timestamp,
]);
await R.exec(
"DELETE FROM heartbeat WHERE time < " + sqlHourOffset,
[ parsedPeriod * -24 ]
);
if (Database.dbConfig.type === "sqlite") {
await R.exec("PRAGMA optimize;");
@@ -56,8 +50,6 @@ const clearOldData = async () => {
log.error("clearOldData", `Failed to clear old data: ${e.message}`);
}
}
log.debug("clearOldData", "Data cleared.");
};
module.exports = {

View File

@@ -9,7 +9,7 @@ class Group extends BeanModel {
* @param {boolean} showTags Should the JSON include monitor tags
* @param {boolean} certExpiry Should JSON include info about
* certificate expiry?
* @returns {Promise<object>} Object ready to parse
* @returns {object} Object ready to parse
*/
async toPublicJSON(showTags = false, certExpiry = false) {
let monitorBeanList = await this.getMonitorList();
@@ -29,7 +29,7 @@ class Group extends BeanModel {
/**
* Get all monitors
* @returns {Promise<Bean[]>} List of monitors
* @returns {Bean[]} List of monitors
*/
async getMonitorList() {
return R.convertToBeans("monitor", await R.getAll(`

View File

@@ -29,14 +29,13 @@ class Heartbeat extends BeanModel {
*/
toJSON() {
return {
monitorID: this._monitorId,
status: this._status,
time: this._time,
msg: this._msg,
ping: this._ping,
important: this._important,
duration: this._duration,
retries: this._retries,
monitorID: this.monitor_id,
status: this.status,
time: this.time,
msg: this.msg,
ping: this.ping,
important: this.important,
duration: this.duration,
};
}

View File

@@ -11,7 +11,7 @@ class Maintenance extends BeanModel {
/**
* Return an object that ready to parse to JSON for public
* Only show necessary data to public
* @returns {Promise<object>} Object ready to parse
* @returns {object} Object ready to parse
*/
async toPublicJSON() {
@@ -98,7 +98,7 @@ class Maintenance extends BeanModel {
/**
* Return an object that ready to parse to JSON
* @param {string} timezone If not specified, the timeRange will be in UTC
* @returns {Promise<object>} Object ready to parse
* @returns {object} Object ready to parse
*/
async toJSON(timezone = null) {
return this.toPublicJSON(timezone);
@@ -143,7 +143,7 @@ class Maintenance extends BeanModel {
* Convert data from socket to bean
* @param {Bean} bean Bean to fill in
* @param {object} obj Data to fill bean with
* @returns {Promise<Bean>} Filled bean
* @returns {Bean} Filled bean
*/
static async jsonToBean(bean, obj) {
if (obj.id) {
@@ -189,9 +189,9 @@ class Maintenance extends BeanModel {
/**
* Throw error if cron is invalid
* @param {string|Date} cron Pattern or date
* @returns {void}
* @returns {Promise<void>}
*/
static validateCron(cron) {
static async validateCron(cron) {
let job = new Cron(cron, () => {});
job.stop();
}
@@ -239,7 +239,19 @@ class Maintenance extends BeanModel {
this.beanMeta.status = "under-maintenance";
clearTimeout(this.beanMeta.durationTimeout);
let duration = this.inferDuration(customDuration);
// Check if duration is still in the window. If not, use the duration from the current time to the end of the window
let duration;
if (customDuration > 0) {
duration = customDuration;
} else if (this.end_date) {
let d = dayjs(this.end_date).diff(dayjs(), "second");
if (d < this.duration) {
duration = d * 1000;
}
} else {
duration = this.duration * 1000;
}
UptimeKumaServer.getInstance().sendMaintenanceListByUserID(this.user_id);
@@ -251,21 +263,9 @@ class Maintenance extends BeanModel {
};
// Create Cron
if (this.strategy === "recurring-interval") {
// For recurring-interval, Croner needs to have interval and startAt
const startDate = dayjs(this.startDate);
const [ hour, minute ] = this.startTime.split(":");
const startDateTime = startDate.hour(hour).minute(minute);
this.beanMeta.job = new Cron(this.cron, {
timezone: await this.getTimezone(),
interval: this.interval_day * 24 * 60 * 60,
startAt: startDateTime.toISOString(),
}, startEvent);
} else {
this.beanMeta.job = new Cron(this.cron, {
timezone: await this.getTimezone(),
}, startEvent);
}
this.beanMeta.job = new Cron(this.cron, {
timezone: await this.getTimezone(),
}, startEvent);
// Continue if the maintenance is still in the window
let runningTimeslot = this.getRunningTimeslot();
@@ -311,24 +311,6 @@ class Maintenance extends BeanModel {
}
}
/**
* Calculate the maintenance duration
* @param {number} customDuration - The custom duration in milliseconds.
* @returns {number} The inferred duration in milliseconds.
*/
inferDuration(customDuration) {
// Check if duration is still in the window. If not, use the duration from the current time to the end of the window
if (customDuration > 0) {
return customDuration;
} else if (this.end_date) {
let d = dayjs(this.end_date).diff(dayjs(), "second");
if (d < this.duration) {
return d * 1000;
}
}
return this.duration * 1000;
}
/**
* Stop the maintenance
* @returns {void}
@@ -342,7 +324,7 @@ class Maintenance extends BeanModel {
/**
* Is this maintenance currently active
* @returns {Promise<boolean>} The maintenance is active?
* @returns {boolean} The maintenance is active?
*/
async isUnderMaintenance() {
return (await this.getStatus()) === "under-maintenance";
@@ -350,7 +332,7 @@ class Maintenance extends BeanModel {
/**
* Get the timezone of the maintenance
* @returns {Promise<string>} timezone
* @returns {string} timezone
*/
async getTimezone() {
if (!this.timezone || this.timezone === "SAME_AS_SERVER") {
@@ -361,7 +343,7 @@ class Maintenance extends BeanModel {
/**
* Get offset for timezone
* @returns {Promise<string>} offset
* @returns {string} offset
*/
async getTimezoneOffset() {
return dayjs.tz(dayjs(), await this.getTimezone()).format("Z");
@@ -369,7 +351,7 @@ class Maintenance extends BeanModel {
/**
* Get the current status of the maintenance
* @returns {Promise<string>} Current status
* @returns {string} Current status
*/
async getStatus() {
if (!this.active) {
@@ -413,8 +395,10 @@ class Maintenance extends BeanModel {
} else if (!this.strategy.startsWith("recurring-")) {
this.cron = "";
} else if (this.strategy === "recurring-interval") {
// For intervals, the pattern is calculated in the run function as the interval-option is set
this.cron = "* * * * *";
let array = this.start_time.split(":");
let hour = parseInt(array[0]);
let minute = parseInt(array[1]);
this.cron = minute + " " + hour + " */" + this.interval_day + " * *";
this.duration = this.calcDuration();
log.debug("maintenance", "Cron: " + this.cron);
log.debug("maintenance", "Duration: " + this.duration);

View File

@@ -1,11 +1,12 @@
const https = require("https");
const dayjs = require("dayjs");
const axios = require("axios");
const { Prometheus } = require("../prometheus");
const { log, UP, DOWN, PENDING, MAINTENANCE, flipStatus, MAX_INTERVAL_SECOND, MIN_INTERVAL_SECOND,
SQL_DATETIME_FORMAT, evaluateJsonQuery
SQL_DATETIME_FORMAT
} = require("../../src/util");
const { tcping, ping, checkCertificate, checkStatusCode, getTotalClientInRoom, setting, mssqlQuery, postgresQuery, mysqlQuery, setSetting, httpNtlm, radius, grpcQuery,
redisPingAsync, kafkaProducerAsync, getOidcTokenClientCredentials, rootCertificatesFingerprints, axiosAbortSignal
const { tcping, ping, checkCertificate, checkStatusCode, getTotalClientInRoom, setting, mssqlQuery, postgresQuery, mysqlQuery, mqttAsync, setSetting, httpNtlm, radius, grpcQuery,
redisPingAsync, mongodbPing, kafkaProducerAsync, getOidcTokenClientCredentials,
} = require("../util-server");
const { R } = require("redbean-node");
const { BeanModel } = require("redbean-node/dist/bean-model");
@@ -15,17 +16,12 @@ const { demoMode } = require("../config");
const version = require("../../package.json").version;
const apicache = require("../modules/apicache");
const { UptimeKumaServer } = require("../uptime-kuma-server");
const { CacheableDnsHttpAgent } = require("../cacheable-dns-http-agent");
const { DockerHost } = require("../docker");
const Gamedig = require("gamedig");
const jsonata = require("jsonata");
const jwt = require("jsonwebtoken");
const crypto = require("crypto");
const { UptimeCalculator } = require("../uptime-calculator");
const { CookieJar } = require("tough-cookie");
const { HttpsCookieAgent } = require("http-cookie-agent/http");
const https = require("https");
const http = require("http");
const rootCertificates = rootCertificatesFingerprints();
/**
* status:
@@ -42,7 +38,7 @@ class Monitor extends BeanModel {
* @param {boolean} showTags Include tags in JSON
* @param {boolean} certExpiry Include certificate expiry info in
* JSON
* @returns {Promise<object>} Object ready to parse
* @returns {object} Object ready to parse
*/
async toPublicJSON(showTags = false, certExpiry = false) {
let obj = {
@@ -60,7 +56,7 @@ class Monitor extends BeanModel {
obj.tags = await this.getTags();
}
if (certExpiry && (this.type === "http" || this.type === "keyword" || this.type === "json-query") && this.getURLProtocol() === "https:") {
if (certExpiry && this.type === "http" && this.getURLProtocol() === "https:") {
const { certExpiryDaysRemaining, validCert } = await this.getCertExpiry(this.id);
obj.certExpiryDaysRemaining = certExpiryDaysRemaining;
obj.validCert = validCert;
@@ -71,12 +67,23 @@ class Monitor extends BeanModel {
/**
* Return an object that ready to parse to JSON
* @param {object} preloadData to prevent n+1 problems, we query the data in a batch outside of this function
* @param {boolean} includeSensitiveData Include sensitive data in
* JSON
* @returns {object} Object ready to parse
*/
toJSON(preloadData = {}, includeSensitiveData = true) {
async toJSON(includeSensitiveData = true) {
let notificationIDList = {};
let list = await R.find("monitor_notification", " monitor_id = ? ", [
this.id,
]);
for (let bean of list) {
notificationIDList[bean.notification_id] = true;
}
const tags = await this.getTags();
let screenshot = null;
@@ -84,25 +91,21 @@ class Monitor extends BeanModel {
screenshot = "/screenshots/" + jwt.sign(this.id, UptimeKumaServer.getInstance().jwtSecret) + ".png";
}
const path = preloadData.paths.get(this.id) || [];
const pathName = path.join(" / ");
let data = {
id: this.id,
name: this.name,
description: this.description,
path,
pathName,
pathName: await this.getPathName(),
parent: this.parent,
childrenIDs: preloadData.childrenIDs.get(this.id) || [],
childrenIDs: await Monitor.getAllChildrenIDs(this.id),
url: this.url,
method: this.method,
hostname: this.hostname,
port: this.port,
maxretries: this.maxretries,
weight: this.weight,
active: preloadData.activeStatus.get(this.id),
forceInactive: preloadData.forceInactive.get(this.id),
active: await this.isActive(),
forceInactive: !await Monitor.isParentActive(this.id),
type: this.type,
timeout: this.timeout,
interval: this.interval,
@@ -122,12 +125,11 @@ class Monitor extends BeanModel {
docker_container: this.docker_container,
docker_host: this.docker_host,
proxyId: this.proxy_id,
notificationIDList: preloadData.notifications.get(this.id) || {},
tags: preloadData.tags.get(this.id) || [],
maintenance: preloadData.maintenanceStatus.get(this.id),
notificationIDList,
tags: tags,
maintenance: await Monitor.isUnderMaintenance(this.id),
mqttTopic: this.mqttTopic,
mqttSuccessMessage: this.mqttSuccessMessage,
mqttCheckType: this.mqttCheckType,
databaseQuery: this.databaseQuery,
authMethod: this.authMethod,
grpcUrl: this.grpcUrl,
@@ -144,17 +146,10 @@ class Monitor extends BeanModel {
expectedValue: this.expectedValue,
kafkaProducerTopic: this.kafkaProducerTopic,
kafkaProducerBrokers: JSON.parse(this.kafkaProducerBrokers),
kafkaProducerSsl: this.getKafkaProducerSsl(),
kafkaProducerAllowAutoTopicCreation: this.getKafkaProducerAllowAutoTopicCreation(),
kafkaProducerSsl: this.kafkaProducerSsl === "1" && true || false,
kafkaProducerAllowAutoTopicCreation: this.kafkaProducerAllowAutoTopicCreation === "1" && true || false,
kafkaProducerMessage: this.kafkaProducerMessage,
screenshot,
cacheBust: this.getCacheBust(),
remote_browser: this.remote_browser,
snmpOid: this.snmpOid,
jsonPathOperator: this.jsonPathOperator,
snmpVersion: this.snmpVersion,
rabbitmqNodes: JSON.parse(this.rabbitmqNodes),
conditions: JSON.parse(this.conditions),
};
if (includeSensitiveData) {
@@ -184,8 +179,6 @@ class Monitor extends BeanModel {
tlsCert: this.tlsCert,
tlsKey: this.tlsKey,
kafkaProducerSaslOptions: JSON.parse(this.kafkaProducerSaslOptions),
rabbitmqUsername: this.rabbitmqUsername,
rabbitmqPassword: this.rabbitmqPassword,
};
}
@@ -193,6 +186,16 @@ class Monitor extends BeanModel {
return data;
}
/**
* Checks if the monitor is active based on itself and its parents
* @returns {Promise<boolean>} Is the monitor active?
*/
async isActive() {
const parentActive = await Monitor.isParentActive(this.id);
return (this.active === 1) && parentActive;
}
/**
* Get all tags applied to this monitor
* @returns {Promise<LooseObject<any>[]>} List of tags on the
@@ -231,12 +234,12 @@ class Monitor extends BeanModel {
/**
* Encode user and password to Base64 encoding
* for HTTP "basic" auth, as per RFC-7617
* @param {string|null} user - The username (nullable if not changed by a user)
* @param {string|null} pass - The password (nullable if not changed by a user)
* @returns {string} Encoded Base64 string
* @param {string} user Username to encode
* @param {string} pass Password to encode
* @returns {string} Encoded username:password
*/
encodeBase64(user, pass) {
return Buffer.from(`${user || ""}:${pass || ""}`).toString("base64");
return Buffer.from(user + ":" + pass).toString("base64");
}
/**
@@ -279,14 +282,6 @@ class Monitor extends BeanModel {
return Boolean(this.grpcEnableTls);
}
/**
* Parse to boolean
* @returns {boolean} if cachebusting is enabled
*/
getCacheBust() {
return Boolean(this.cacheBust);
}
/**
* Get accepted status codes
* @returns {object} Accepted status codes
@@ -303,28 +298,12 @@ class Monitor extends BeanModel {
return Boolean(this.gamedigGivenPortOnly);
}
/**
* Parse to boolean
* @returns {boolean} Kafka Producer Ssl enabled?
*/
getKafkaProducerSsl() {
return Boolean(this.kafkaProducerSsl);
}
/**
* Parse to boolean
* @returns {boolean} Kafka Producer Allow Auto Topic Creation Enabled?
*/
getKafkaProducerAllowAutoTopicCreation() {
return Boolean(this.kafkaProducerAllowAutoTopicCreation);
}
/**
* Start monitor
* @param {Server} io Socket server instance
* @returns {Promise<void>}
* @returns {void}
*/
async start(io) {
start(io) {
let previousBeat = null;
let retries = 0;
@@ -353,9 +332,6 @@ class Monitor extends BeanModel {
previousBeat = await R.findOne("heartbeat", " monitor_id = ? ORDER BY time DESC", [
this.id,
]);
if (previousBeat) {
retries = previousBeat.retries;
}
}
const isFirstBeat = !previousBeat;
@@ -370,12 +346,6 @@ class Monitor extends BeanModel {
bean.status = flipStatus(bean.status);
}
// Runtime patch timeout if it is 0
// See https://github.com/louislam/uptime-kuma/pull/3961#issuecomment-1804149144
if (!this.timeout || this.timeout <= 0) {
this.timeout = this.interval * 1000 * 0.8;
}
try {
if (await Monitor.isUnderMaintenance(this.id)) {
bean.msg = "Monitor under maintenance";
@@ -431,7 +401,9 @@ class Monitor extends BeanModel {
if (this.auth_method === "oauth2-cc") {
try {
if (this.oauthAccessToken === undefined || new Date(this.oauthAccessToken.expires_at * 1000) <= new Date()) {
this.oauthAccessToken = await this.makeOidcTokenClientCredentialsRequest();
log.debug("monitor", `[${this.name}] The oauth access-token undefined or expired. Requesting a new one`);
this.oauthAccessToken = await getOidcTokenClientCredentials(this.oauth_token_url, this.oauth_client_id, this.oauth_client_secret, this.oauth_scopes, this.oauth_auth_method);
log.debug("monitor", `[${this.name}] Obtained oauth access-token. Expires at ${new Date(this.oauthAccessToken.expires_at * 1000)}`);
}
oauth2AuthHeader = {
"Authorization": this.oauthAccessToken.token_type + " " + this.oauthAccessToken.access_token,
@@ -444,7 +416,6 @@ class Monitor extends BeanModel {
const httpsAgentOptions = {
maxCachedSessions: 0, // Use Custom agent to disable session reuse (https://github.com/nodejs/node/issues/3940)
rejectUnauthorized: !this.getIgnoreTls(),
secureOptions: crypto.constants.SSL_OP_LEGACY_SERVER_CONNECT,
};
log.debug("monitor", `[${this.name}] Prepare Options for axios`);
@@ -476,6 +447,7 @@ class Monitor extends BeanModel {
timeout: this.timeout * 1000,
headers: {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": "Uptime-Kuma/" + version,
...(contentType ? { "Content-Type": contentType } : {}),
...(basicAuthHeader),
...(oauth2AuthHeader),
@@ -485,21 +457,12 @@ class Monitor extends BeanModel {
validateStatus: (status) => {
return checkStatusCode(status, this.getAcceptedStatuscodes());
},
signal: axiosAbortSignal((this.timeout + 10) * 1000),
};
if (bodyValue) {
options.data = bodyValue;
}
if (this.cacheBust) {
const randomFloatString = Math.random().toString(36);
const cacheBust = randomFloatString.substring(2);
options.params = {
uptime_kuma_cachebuster: cacheBust,
};
}
if (this.proxy_id) {
const proxy = await R.load("proxy", this.proxy_id);
@@ -515,12 +478,7 @@ class Monitor extends BeanModel {
}
if (!options.httpsAgent) {
let jar = new CookieJar();
let httpsCookieAgentOptions = {
...httpsAgentOptions,
cookies: { jar }
};
options.httpsAgent = new HttpsCookieAgent(httpsCookieAgentOptions);
options.httpsAgent = new https.Agent(httpsAgentOptions);
}
if (this.auth_method === "mtls") {
@@ -535,18 +493,6 @@ class Monitor extends BeanModel {
}
}
let tlsInfo = {};
// Store tlsInfo when secureConnect event is emitted
// The keylog event listener is a workaround to access the tlsSocket
options.httpsAgent.once("keylog", async (line, tlsSocket) => {
tlsSocket.once("secureConnect", async () => {
tlsInfo = checkCertificate(tlsSocket);
tlsInfo.valid = tlsSocket.authorized || false;
await this.handleTlsInfo(tlsInfo);
});
});
log.debug("monitor", `[${this.name}] Axios Options: ${JSON.stringify(options)}`);
log.debug("monitor", `[${this.name}] Axios Request`);
@@ -556,19 +502,31 @@ class Monitor extends BeanModel {
bean.msg = `${res.status} - ${res.statusText}`;
bean.ping = dayjs().valueOf() - startTime;
// fallback for if kelog event is not emitted, but we may still have tlsInfo,
// e.g. if the connection is made through a proxy
if (this.getUrl()?.protocol === "https:" && tlsInfo.valid === undefined) {
const tlsSocket = res.request.res.socket;
// Check certificate if https is used
let certInfoStartTime = dayjs().valueOf();
if (this.getUrl()?.protocol === "https:") {
log.debug("monitor", `[${this.name}] Check cert`);
try {
let tlsInfoObject = checkCertificate(res);
tlsInfo = await this.updateTlsInfo(tlsInfoObject);
if (tlsSocket) {
tlsInfo = checkCertificate(tlsSocket);
tlsInfo.valid = tlsSocket.authorized || false;
if (!this.getIgnoreTls() && this.isEnabledExpiryNotification()) {
log.debug("monitor", `[${this.name}] call checkCertExpiryNotifications`);
await this.checkCertExpiryNotifications(tlsInfoObject);
}
await this.handleTlsInfo(tlsInfo);
} catch (e) {
if (e.message !== "No TLS certificate in response") {
log.error("monitor", "Caught error");
log.error("monitor", e.message);
}
}
}
if (process.env.TIMELOGGER === "1") {
log.debug("monitor", "Cert Info Query Time: " + (dayjs().valueOf() - certInfoStartTime) + "ms");
}
if (process.env.UPTIME_KUMA_LOG_RESPONSE_BODY_MONITOR_ID === this.id) {
log.info("monitor", res.data);
}
@@ -600,15 +558,21 @@ class Monitor extends BeanModel {
} else if (this.type === "json-query") {
let data = res.data;
const { status, response } = await evaluateJsonQuery(data, this.jsonPath, this.jsonPathOperator, this.expectedValue);
if (status) {
bean.status = UP;
bean.msg = `JSON query passes (comparing ${response} ${this.jsonPathOperator} ${this.expectedValue})`;
} else {
throw new Error(`JSON query does not pass (comparing ${response} ${this.jsonPathOperator} ${this.expectedValue})`);
// convert data to object
if (typeof data === "string") {
data = JSON.parse(data);
}
let expression = jsonata(this.jsonPath);
let result = await expression.evaluate(data);
if (result.toString() === this.expectedValue) {
bean.msg += ", expected value is found";
bean.status = UP;
} else {
throw new Error(bean.msg + ", but value is not equal to expected value, value was: [" + result + "]");
}
}
} else if (this.type === "port") {
@@ -632,7 +596,6 @@ class Monitor extends BeanModel {
// If the previous beat was down or pending we use the regular
// beatInterval/retryInterval in the setTimeout further below
if (previousBeat.status !== (this.isUpsideDown() ? DOWN : UP) || msSinceLastBeat > beatInterval * 1000 + bufferTime) {
bean.duration = Math.round(msSinceLastBeat / 1000);
throw new Error("No heartbeat in the time window");
} else {
let timeout = beatInterval * 1000 - msSinceLastBeat;
@@ -648,7 +611,6 @@ class Monitor extends BeanModel {
return;
}
} else {
bean.duration = beatInterval;
throw new Error("No heartbeat in the time window");
}
@@ -665,13 +627,13 @@ class Monitor extends BeanModel {
timeout: this.timeout * 1000,
headers: {
"Accept": "*/*",
"User-Agent": "Uptime-Kuma/" + version,
},
httpsAgent: new https.Agent({
httpsAgent: CacheableDnsHttpAgent.getHttpsAgent({
maxCachedSessions: 0, // Use Custom agent to disable session reuse (https://github.com/nodejs/node/issues/3940)
rejectUnauthorized: !this.getIgnoreTls(),
secureOptions: crypto.constants.SSL_OP_LEGACY_SERVER_CONNECT,
}),
httpAgent: new http.Agent({
httpAgent: CacheableDnsHttpAgent.getHttpAgent({
maxCachedSessions: 0,
}),
maxRedirects: this.maxredirects,
@@ -712,33 +674,29 @@ class Monitor extends BeanModel {
} else if (this.type === "docker") {
log.debug("monitor", `[${this.name}] Prepare Options for Axios`);
const dockerHost = await R.load("docker_host", this.docker_host);
const options = {
url: `/containers/${this.docker_container}/json`,
timeout: this.interval * 1000 * 0.8,
headers: {
"Accept": "*/*",
"User-Agent": "Uptime-Kuma/" + version,
},
httpsAgent: new https.Agent({
httpsAgent: CacheableDnsHttpAgent.getHttpsAgent({
maxCachedSessions: 0, // Use Custom agent to disable session reuse (https://github.com/nodejs/node/issues/3940)
rejectUnauthorized: !this.getIgnoreTls(),
secureOptions: crypto.constants.SSL_OP_LEGACY_SERVER_CONNECT,
}),
httpAgent: new http.Agent({
httpAgent: CacheableDnsHttpAgent.getHttpAgent({
maxCachedSessions: 0,
}),
};
const dockerHost = await R.load("docker_host", this.docker_host);
if (!dockerHost) {
throw new Error("Failed to load docker host config");
}
if (dockerHost._dockerType === "socket") {
options.socketPath = dockerHost._dockerDaemon;
} else if (dockerHost._dockerType === "tcp") {
options.baseURL = DockerHost.patchDockerURL(dockerHost._dockerDaemon);
options.httpsAgent = new https.Agent(
options.httpsAgent = CacheableDnsHttpAgent.getHttpsAgent(
DockerHost.getHttpsAgentOptions(dockerHost._dockerType, options.baseURL)
);
}
@@ -757,10 +715,18 @@ class Monitor extends BeanModel {
} else {
throw Error("Container State is " + res.data.State.Status);
}
} else if (this.type === "mqtt") {
bean.msg = await mqttAsync(this.hostname, this.mqttTopic, this.mqttSuccessMessage, {
port: this.port,
username: this.mqttUsername,
password: this.mqttPassword,
interval: this.interval,
});
bean.status = UP;
} else if (this.type === "sqlserver") {
let startTime = dayjs().valueOf();
await mssqlQuery(this.databaseConnectionString, this.databaseQuery || "SELECT 1");
await mssqlQuery(this.databaseConnectionString, this.databaseQuery);
bean.msg = "";
bean.status = UP;
@@ -799,7 +765,7 @@ class Monitor extends BeanModel {
} else if (this.type === "postgres") {
let startTime = dayjs().valueOf();
await postgresQuery(this.databaseConnectionString, this.databaseQuery || "SELECT 1");
await postgresQuery(this.databaseConnectionString, this.databaseQuery);
bean.msg = "";
bean.status = UP;
@@ -807,13 +773,18 @@ class Monitor extends BeanModel {
} else if (this.type === "mysql") {
let startTime = dayjs().valueOf();
// Use `radius_password` as `password` field, since there are too many unnecessary fields
// TODO: rename `radius_password` to `password` later for general use
let mysqlPassword = this.radiusPassword;
bean.msg = await mysqlQuery(this.databaseConnectionString, this.databaseQuery || "SELECT 1", mysqlPassword);
bean.msg = await mysqlQuery(this.databaseConnectionString, this.databaseQuery);
bean.status = UP;
bean.ping = dayjs().valueOf() - startTime;
} else if (this.type === "mongodb") {
let startTime = dayjs().valueOf();
await mongodbPing(this.databaseConnectionString);
bean.msg = "";
bean.status = UP;
bean.ping = dayjs().valueOf() - startTime;
} else if (this.type === "radius") {
let startTime = dayjs().valueOf();
@@ -844,7 +815,7 @@ class Monitor extends BeanModel {
} else if (this.type === "redis") {
let startTime = dayjs().valueOf();
bean.msg = await redisPingAsync(this.databaseConnectionString, !this.ignoreTls);
bean.msg = await redisPingAsync(this.databaseConnectionString);
bean.status = UP;
bean.ping = dayjs().valueOf() - startTime;
@@ -890,11 +861,7 @@ class Monitor extends BeanModel {
} catch (error) {
if (error?.name === "CanceledError") {
bean.msg = `timeout by AbortSignal (${this.timeout}s)`;
} else {
bean.msg = error.message;
}
bean.msg = error.message;
// If UP come in here, it must be upside down mode
// Just reset the retries
@@ -904,14 +871,9 @@ class Monitor extends BeanModel {
} else if ((this.maxretries > 0) && (retries < this.maxretries)) {
retries++;
bean.status = PENDING;
} else {
// Continue counting retries during DOWN
retries++;
}
}
bean.retries = retries;
log.debug("monitor", `[${this.name}] Check isImportant`);
let isImportant = Monitor.isImportantBeat(isFirstBeat, previousBeat?.status, bean.status);
@@ -934,7 +896,7 @@ class Monitor extends BeanModel {
log.debug("monitor", `[${this.name}] apicache clear`);
apicache.clear();
await UptimeKumaServer.getInstance().sendMaintenanceListByUserID(this.user_id);
UptimeKumaServer.getInstance().sendMaintenanceListByUserID(this.user_id);
} else {
bean.important = false;
@@ -1054,44 +1016,27 @@ class Monitor extends BeanModel {
}
return res;
} catch (error) {
/**
* Make a single attempt to obtain an new access token in the event that
* the recent api request failed for authentication purposes
*/
if (this.auth_method === "oauth2-cc" && error.response.status === 401 && !finalCall) {
this.oauthAccessToken = await this.makeOidcTokenClientCredentialsRequest();
let oauth2AuthHeader = {
"Authorization": this.oauthAccessToken.token_type + " " + this.oauthAccessToken.access_token,
};
options.headers = { ...(options.headers),
...(oauth2AuthHeader)
};
return this.makeAxiosRequest(options, true);
}
} catch (e) {
// Fix #2253
// Read more: https://stackoverflow.com/questions/1759956/curl-error-18-transfer-closed-with-outstanding-read-data-remaining
if (!finalCall && typeof error.message === "string" && error.message.includes("maxContentLength size of -1 exceeded")) {
if (!finalCall && typeof e.message === "string" && e.message.includes("maxContentLength size of -1 exceeded")) {
log.debug("monitor", "makeAxiosRequest with gzip");
options.headers["Accept-Encoding"] = "gzip, deflate";
return this.makeAxiosRequest(options, true);
} else {
if (typeof error.message === "string" && error.message.includes("maxContentLength size of -1 exceeded")) {
error.message = "response timeout: incomplete response within a interval";
if (typeof e.message === "string" && e.message.includes("maxContentLength size of -1 exceeded")) {
e.message = "response timeout: incomplete response within a interval";
}
throw error;
throw e;
}
}
}
/**
* Stop monitor
* @returns {Promise<void>}
* @returns {void}
*/
async stop() {
stop() {
clearTimeout(this.heartbeatInterval);
this.isStop = true;
@@ -1178,18 +1123,6 @@ class Monitor extends BeanModel {
return checkCertificateResult;
}
/**
* Checks if the monitor is active based on itself and its parents
* @param {number} monitorID ID of monitor to send
* @param {boolean} active is active
* @returns {Promise<boolean>} Is the monitor active?
*/
static async isActive(monitorID, active) {
const parentActive = await Monitor.isParentActive(monitorID);
return (active === 1) && parentActive;
}
/**
* Send statistics to clients
* @param {Server} io Socket server instance
@@ -1204,7 +1137,7 @@ class Monitor extends BeanModel {
if (hasClients) {
// Send 24 hour average ping
let data24h = await uptimeCalculator.get24Hour();
io.to(userID).emit("avgPing", monitorID, (data24h.avgPing) ? Number(data24h.avgPing.toFixed(2)) : null);
io.to(userID).emit("avgPing", monitorID, (data24h.avgPing) ? data24h.avgPing.toFixed(2) : null);
// Send 24 hour uptime
io.to(userID).emit("uptime", monitorID, 24, data24h.uptime);
@@ -1326,10 +1259,7 @@ class Monitor extends BeanModel {
for (let notification of notificationList) {
try {
const heartbeatJSON = bean.toJSON();
const monitorData = [{ id: monitor.id,
active: monitor.active
}];
const preloadData = await Monitor.preparePreloadData(monitorData);
// Prevent if the msg is undefined, notifications such as Discord cannot send out.
if (!heartbeatJSON["msg"]) {
heartbeatJSON["msg"] = "N/A";
@@ -1340,7 +1270,7 @@ class Monitor extends BeanModel {
heartbeatJSON["timezoneOffset"] = UptimeKumaServer.getInstance().getTimezoneOffset();
heartbeatJSON["localDateTime"] = dayjs.utc(heartbeatJSON["time"]).tz(heartbeatJSON["timezone"]).format(SQL_DATETIME_FORMAT);
await Notification.send(JSON.parse(notification.config), msg, monitor.toJSON(preloadData, false), heartbeatJSON);
await Notification.send(JSON.parse(notification.config), msg, await monitor.toJSON(false), heartbeatJSON);
} catch (e) {
log.error("monitor", "Cannot send notification to " + notification.name);
log.error("monitor", e);
@@ -1379,7 +1309,7 @@ class Monitor extends BeanModel {
let notifyDays = await setting("tlsExpiryNotifyDays");
if (notifyDays == null || !Array.isArray(notifyDays)) {
// Reset Default
await setSetting("tlsExpiryNotifyDays", [ 7, 14, 21 ], "general");
setSetting("tlsExpiryNotifyDays", [ 7, 14, 21 ], "general");
notifyDays = [ 7, 14, 21 ];
}
@@ -1388,10 +1318,7 @@ class Monitor extends BeanModel {
let certInfo = tlsInfoObject.certInfo;
while (certInfo) {
let subjectCN = certInfo.subject["CN"];
if (rootCertificates.has(certInfo.fingerprint256)) {
log.debug("monitor", `Known root cert: ${certInfo.certType} certificate "${subjectCN}" (${certInfo.daysRemaining} days valid) on ${targetDays} deadline.`);
break;
} else if (certInfo.daysRemaining > targetDays) {
if (certInfo.daysRemaining > targetDays) {
log.debug("monitor", `No need to send cert notification for ${certInfo.certType} certificate "${subjectCN}" (${certInfo.daysRemaining} days valid) on ${targetDays} deadline.`);
} else {
log.debug("monitor", `call sendCertNotificationByTargetDays for ${targetDays} deadline on certificate ${subjectCN}.`);
@@ -1457,7 +1384,10 @@ class Monitor extends BeanModel {
* @returns {Promise<LooseObject<any>>} Previous heartbeat
*/
static async getPreviousHeartbeat(monitorID) {
return await R.findOne("heartbeat", " id = (select MAX(id) from heartbeat where monitor_id = ?)", [
return await R.getRow(`
SELECT ping, status, time FROM heartbeat
WHERE id = (select MAX(id) from heartbeat where monitor_id = ?)
`, [
monitorID
]);
}
@@ -1502,108 +1432,6 @@ class Monitor extends BeanModel {
}
}
/**
* Gets monitor notification of multiple monitor
* @param {Array} monitorIDs IDs of monitor to get
* @returns {Promise<LooseObject<any>>} object
*/
static async getMonitorNotification(monitorIDs) {
return await R.getAll(`
SELECT monitor_notification.monitor_id, monitor_notification.notification_id
FROM monitor_notification
WHERE monitor_notification.monitor_id IN (${monitorIDs.map((_) => "?").join(",")})
`, monitorIDs);
}
/**
* Gets monitor tags of multiple monitor
* @param {Array} monitorIDs IDs of monitor to get
* @returns {Promise<LooseObject<any>>} object
*/
static async getMonitorTag(monitorIDs) {
return await R.getAll(`
SELECT monitor_tag.monitor_id, monitor_tag.tag_id, tag.name, tag.color
FROM monitor_tag
JOIN tag ON monitor_tag.tag_id = tag.id
WHERE monitor_tag.monitor_id IN (${monitorIDs.map((_) => "?").join(",")})
`, monitorIDs);
}
/**
* prepare preloaded data for efficient access
* @param {Array} monitorData IDs & active field of monitor to get
* @returns {Promise<LooseObject<any>>} object
*/
static async preparePreloadData(monitorData) {
const notificationsMap = new Map();
const tagsMap = new Map();
const maintenanceStatusMap = new Map();
const childrenIDsMap = new Map();
const activeStatusMap = new Map();
const forceInactiveMap = new Map();
const pathsMap = new Map();
if (monitorData.length > 0) {
const monitorIDs = monitorData.map(monitor => monitor.id);
const notifications = await Monitor.getMonitorNotification(monitorIDs);
const tags = await Monitor.getMonitorTag(monitorIDs);
const maintenanceStatuses = await Promise.all(monitorData.map(monitor => Monitor.isUnderMaintenance(monitor.id)));
const childrenIDs = await Promise.all(monitorData.map(monitor => Monitor.getAllChildrenIDs(monitor.id)));
const activeStatuses = await Promise.all(monitorData.map(monitor => Monitor.isActive(monitor.id, monitor.active)));
const forceInactiveStatuses = await Promise.all(monitorData.map(monitor => Monitor.isParentActive(monitor.id)));
const paths = await Promise.all(monitorData.map(monitor => Monitor.getAllPath(monitor.id, monitor.name)));
notifications.forEach(row => {
if (!notificationsMap.has(row.monitor_id)) {
notificationsMap.set(row.monitor_id, {});
}
notificationsMap.get(row.monitor_id)[row.notification_id] = true;
});
tags.forEach(row => {
if (!tagsMap.has(row.monitor_id)) {
tagsMap.set(row.monitor_id, []);
}
tagsMap.get(row.monitor_id).push({
tag_id: row.tag_id,
name: row.name,
color: row.color
});
});
monitorData.forEach((monitor, index) => {
maintenanceStatusMap.set(monitor.id, maintenanceStatuses[index]);
});
monitorData.forEach((monitor, index) => {
childrenIDsMap.set(monitor.id, childrenIDs[index]);
});
monitorData.forEach((monitor, index) => {
activeStatusMap.set(monitor.id, activeStatuses[index]);
});
monitorData.forEach((monitor, index) => {
forceInactiveMap.set(monitor.id, !forceInactiveStatuses[index]);
});
monitorData.forEach((monitor, index) => {
pathsMap.set(monitor.id, paths[index]);
});
}
return {
notifications: notificationsMap,
tags: tagsMap,
maintenanceStatus: maintenanceStatusMap,
childrenIDs: childrenIDsMap,
activeStatus: activeStatusMap,
forceInactive: forceInactiveMap,
paths: pathsMap,
};
}
/**
* Gets Parent of the monitor
* @param {number} monitorID ID of monitor to get
@@ -1635,21 +1463,19 @@ class Monitor extends BeanModel {
}
/**
* Gets the full path
* @param {number} monitorID ID of the monitor to get
* @param {string} name of the monitor to get
* @returns {Promise<string[]>} Full path (includes groups and the name) of the monitor
* Gets Full Path-Name (Groups and Name)
* @returns {Promise<string>} Full path name of this monitor
*/
static async getAllPath(monitorID, name) {
const path = [ name ];
async getPathName() {
let path = this.name;
if (this.parent === null) {
return path;
}
let parent = await Monitor.getParent(monitorID);
let parent = await Monitor.getParent(this.id);
while (parent !== null) {
path.unshift(parent.name);
path = `${parent.name} / ${path}`;
parent = await Monitor.getParent(parent.id);
}
@@ -1679,7 +1505,7 @@ class Monitor extends BeanModel {
}
/**
* Unlinks all children of the group monitor
* Unlinks all children of the the group monitor
* @param {number} groupID ID of group to remove children of
* @returns {Promise<void>}
*/
@@ -1704,37 +1530,6 @@ class Monitor extends BeanModel {
const parentActive = await Monitor.isParentActive(parent.id);
return parent.active && parentActive;
}
/**
* Obtains a new Oidc Token
* @returns {Promise<object>} OAuthProvider client
*/
async makeOidcTokenClientCredentialsRequest() {
log.debug("monitor", `[${this.name}] The oauth access-token undefined or expired. Requesting a new token`);
const oAuthAccessToken = await getOidcTokenClientCredentials(this.oauth_token_url, this.oauth_client_id, this.oauth_client_secret, this.oauth_scopes, this.oauth_auth_method);
if (this.oauthAccessToken?.expires_at) {
log.debug("monitor", `[${this.name}] Obtained oauth access-token. Expires at ${new Date(this.oauthAccessToken?.expires_at * 1000)}`);
} else {
log.debug("monitor", `[${this.name}] Obtained oauth access-token. Time until expiry was not provided`);
}
return oAuthAccessToken;
}
/**
* Store TLS certificate information and check for expiry
* @param {object} tlsInfo Information about the TLS connection
* @returns {Promise<void>}
*/
async handleTlsInfo(tlsInfo) {
await this.updateTlsInfo(tlsInfo);
this.prometheus?.update(null, tlsInfo);
if (!this.getIgnoreTls() && this.isEnabledExpiryNotification()) {
log.debug("monitor", `[${this.name}] call checkCertExpiryNotifications`);
await this.checkCertExpiryNotifications(tlsInfo);
}
}
}
module.exports = Monitor;

View File

@@ -1,17 +0,0 @@
const { BeanModel } = require("redbean-node/dist/bean-model");
class RemoteBrowser extends BeanModel {
/**
* Returns an object that ready to parse to JSON
* @returns {object} Object ready to parse
*/
toJSON() {
return {
id: this.id,
url: this.url,
name: this.name,
};
}
}
module.exports = RemoteBrowser;

View File

@@ -4,11 +4,6 @@ const cheerio = require("cheerio");
const { UptimeKumaServer } = require("../uptime-kuma-server");
const jsesc = require("jsesc");
const googleAnalytics = require("../google-analytics");
const { marked } = require("marked");
const { Feed } = require("feed");
const config = require("../config");
const { STATUS_PAGE_ALL_DOWN, STATUS_PAGE_ALL_UP, STATUS_PAGE_MAINTENANCE, STATUS_PAGE_PARTIAL_DOWN, UP, MAINTENANCE, DOWN } = require("../../src/util");
class StatusPage extends BeanModel {
@@ -18,38 +13,14 @@ class StatusPage extends BeanModel {
*/
static domainMappingList = { };
/**
* Handle responses to RSS pages
* @param {Response} response Response object
* @param {string} slug Status page slug
* @returns {Promise<void>}
*/
static async handleStatusPageRSSResponse(response, slug) {
let statusPage = await R.findOne("status_page", " slug = ? ", [
slug
]);
if (statusPage) {
response.send(await StatusPage.renderRSS(statusPage, slug));
} else {
response.status(404).send(UptimeKumaServer.getInstance().indexHTML);
}
}
/**
* Handle responses to status page
* @param {Response} response Response object
* @param {string} indexHTML HTML to render
* @param {string} slug Status page slug
* @returns {Promise<void>}
* @returns {void}
*/
static async handleStatusPageResponse(response, indexHTML, slug) {
// Handle url with trailing slash (http://localhost:3001/status/)
// The slug comes from the route "/status/:slug". If the slug is empty, express converts it to "index.html"
if (slug === "index.html") {
slug = "default";
}
let statusPage = await R.findOne("status_page", " slug = ? ", [
slug
]);
@@ -61,51 +32,15 @@ class StatusPage extends BeanModel {
}
}
/**
* SSR for RSS feed
* @param {statusPage} statusPage object
* @param {slug} slug from router
* @returns {Promise<string>} the rendered html
*/
static async renderRSS(statusPage, slug) {
const { heartbeats, statusDescription } = await StatusPage.getRSSPageData(statusPage);
let proto = config.isSSL ? "https" : "http";
let host = `${proto}://${config.hostname || "localhost"}:${config.port}/status/${slug}`;
const feed = new Feed({
title: "uptime kuma rss feed",
description: `current status: ${statusDescription}`,
link: host,
language: "en", // optional, used only in RSS 2.0, possible values: http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
updated: new Date(), // optional, default = today
});
heartbeats.forEach(heartbeat => {
feed.addItem({
title: `${heartbeat.name} is down`,
description: `${heartbeat.name} has been down since ${heartbeat.time}`,
id: heartbeat.monitorID,
date: new Date(heartbeat.time),
});
});
return feed.rss2();
}
/**
* SSR for status pages
* @param {string} indexHTML HTML page to render
* @param {StatusPage} statusPage Status page populate HTML with
* @returns {Promise<string>} the rendered html
* @returns {void}
*/
static async renderHTML(indexHTML, statusPage) {
const $ = cheerio.load(indexHTML);
const description155 = marked(statusPage.description ?? "")
.replace(/<[^>]+>/gm, "")
.trim()
.substring(0, 155);
const description155 = statusPage.description?.substring(0, 155) ?? "";
$("title").text(statusPage.title);
$("meta[name=description]").attr("content", description155);
@@ -152,109 +87,6 @@ class StatusPage extends BeanModel {
return $.root().html();
}
/**
* @param {heartbeats} heartbeats from getRSSPageData
* @returns {number} status_page constant from util.ts
*/
static overallStatus(heartbeats) {
if (heartbeats.length === 0) {
return -1;
}
let status = STATUS_PAGE_ALL_UP;
let hasUp = false;
for (let beat of heartbeats) {
if (beat.status === MAINTENANCE) {
return STATUS_PAGE_MAINTENANCE;
} else if (beat.status === UP) {
hasUp = true;
} else {
status = STATUS_PAGE_PARTIAL_DOWN;
}
}
if (! hasUp) {
status = STATUS_PAGE_ALL_DOWN;
}
return status;
}
/**
* @param {number} status from overallStatus
* @returns {string} description
*/
static getStatusDescription(status) {
if (status === -1) {
return "No Services";
}
if (status === STATUS_PAGE_ALL_UP) {
return "All Systems Operational";
}
if (status === STATUS_PAGE_PARTIAL_DOWN) {
return "Partially Degraded Service";
}
if (status === STATUS_PAGE_ALL_DOWN) {
return "Degraded Service";
}
// TODO: show the real maintenance information: title, description, time
if (status === MAINTENANCE) {
return "Under maintenance";
}
return "?";
}
/**
* Get all data required for RSS
* @param {StatusPage} statusPage Status page to get data for
* @returns {object} Status page data
*/
static async getRSSPageData(statusPage) {
// get all heartbeats that correspond to this statusPage
const config = await statusPage.toPublicJSON();
// Public Group List
const showTags = !!statusPage.show_tags;
const list = await R.find("group", " public = 1 AND status_page_id = ? ORDER BY weight ", [
statusPage.id
]);
let heartbeats = [];
for (let groupBean of list) {
let monitorGroup = await groupBean.toPublicJSON(showTags, config?.showCertificateExpiry);
for (const monitor of monitorGroup.monitorList) {
const heartbeat = await R.findOne("heartbeat", "monitor_id = ? ORDER BY time DESC", [ monitor.id ]);
if (heartbeat) {
heartbeats.push({
...monitor,
status: heartbeat.status,
time: heartbeat.time
});
}
}
}
// calculate RSS feed description
let status = StatusPage.overallStatus(heartbeats);
let statusDescription = StatusPage.getStatusDescription(status);
// keep only DOWN heartbeats in the RSS feed
heartbeats = heartbeats.filter(heartbeat => heartbeat.status === DOWN);
return {
heartbeats,
statusDescription
};
}
/**
* Get all status page data in one call
* @param {StatusPage} statusPage Status page to get data for
@@ -400,7 +232,6 @@ class StatusPage extends BeanModel {
description: this.description,
icon: this.getIcon(),
theme: this.theme,
autoRefreshInterval: this.autoRefreshInterval,
published: !!this.published,
showTags: !!this.show_tags,
domainNameList: this.getDomainNameList(),
@@ -423,7 +254,6 @@ class StatusPage extends BeanModel {
title: this.title,
description: this.description,
icon: this.getIcon(),
autoRefreshInterval: this.autoRefreshInterval,
theme: this.theme,
published: !!this.published,
showTags: !!this.show_tags,

View File

@@ -1,8 +1,6 @@
const { BeanModel } = require("redbean-node/dist/bean-model");
const passwordHash = require("../password-hash");
const { R } = require("redbean-node");
const jwt = require("jsonwebtoken");
const { shake256, SHAKE256_LENGTH } = require("../util-server");
class User extends BeanModel {
/**
@@ -25,27 +23,8 @@ class User extends BeanModel {
* @returns {Promise<void>}
*/
async resetPassword(newPassword) {
const hashedPassword = passwordHash.generate(newPassword);
await R.exec("UPDATE `user` SET password = ? WHERE id = ? ", [
hashedPassword,
this.id
]);
this.password = hashedPassword;
}
/**
* Create a new JWT for a user
* @param {User} user The User to create a JsonWebToken for
* @param {string} jwtSecret The key used to sign the JsonWebToken
* @returns {string} the JsonWebToken as a string
*/
static createJWT(user, jwtSecret) {
return jwt.sign({
username: user.username,
h: shake256(user.password, SHAKE256_LENGTH),
}, jwtSecret);
await User.resetPassword(this.id, newPassword);
this.password = newPassword;
}
}

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 CatButtes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,77 +0,0 @@
'use strict';
// Original file https://raw.githubusercontent.com/elasticio/node-ntlm-client/master/lib/flags.js
module.exports.NTLMFLAG_NEGOTIATE_UNICODE = 1 << 0;
/* Indicates that Unicode strings are supported for use in security buffer
data. */
module.exports.NTLMFLAG_NEGOTIATE_OEM = 1 << 1;
/* Indicates that OEM strings are supported for use in security buffer data. */
module.exports.NTLMFLAG_REQUEST_TARGET = 1 << 2;
/* Requests that the server's authentication realm be included in the Type 2
message. */
/* unknown (1<<3) */
module.exports.NTLMFLAG_NEGOTIATE_SIGN = 1 << 4;
/* Specifies that authenticated communication between the client and server
should carry a digital signature (message integrity). */
module.exports.NTLMFLAG_NEGOTIATE_SEAL = 1 << 5;
/* Specifies that authenticated communication between the client and server
should be encrypted (message confidentiality). */
module.exports.NTLMFLAG_NEGOTIATE_DATAGRAM_STYLE = 1 << 6;
/* Indicates that datagram authentication is being used. */
module.exports.NTLMFLAG_NEGOTIATE_LM_KEY = 1 << 7;
/* Indicates that the LAN Manager session key should be used for signing and
sealing authenticated communications. */
module.exports.NTLMFLAG_NEGOTIATE_NETWARE = 1 << 8;
/* unknown purpose */
module.exports.NTLMFLAG_NEGOTIATE_NTLM_KEY = 1 << 9;
/* Indicates that NTLM authentication is being used. */
/* unknown (1<<10) */
module.exports.NTLMFLAG_NEGOTIATE_ANONYMOUS = 1 << 11;
/* Sent by the client in the Type 3 message to indicate that an anonymous
context has been established. This also affects the response fields. */
module.exports.NTLMFLAG_NEGOTIATE_DOMAIN_SUPPLIED = 1 << 12;
/* Sent by the client in the Type 1 message to indicate that a desired
authentication realm is included in the message. */
module.exports.NTLMFLAG_NEGOTIATE_WORKSTATION_SUPPLIED = 1 << 13;
/* Sent by the client in the Type 1 message to indicate that the client
workstation's name is included in the message. */
module.exports.NTLMFLAG_NEGOTIATE_LOCAL_CALL = 1 << 14;
/* Sent by the server to indicate that the server and client are on the same
machine. Implies that the client may use a pre-established local security
context rather than responding to the challenge. */
module.exports.NTLMFLAG_NEGOTIATE_ALWAYS_SIGN = 1 << 15;
/* Indicates that authenticated communication between the client and server
should be signed with a "dummy" signature. */
module.exports.NTLMFLAG_TARGET_TYPE_DOMAIN = 1 << 16;
/* Sent by the server in the Type 2 message to indicate that the target
authentication realm is a domain. */
module.exports.NTLMFLAG_TARGET_TYPE_SERVER = 1 << 17;
/* Sent by the server in the Type 2 message to indicate that the target
authentication realm is a server. */
module.exports.NTLMFLAG_TARGET_TYPE_SHARE = 1 << 18;
/* Sent by the server in the Type 2 message to indicate that the target
authentication realm is a share. Presumably, this is for share-level
authentication. Usage is unclear. */
module.exports.NTLMFLAG_NEGOTIATE_NTLM2_KEY = 1 << 19;
/* Indicates that the NTLM2 signing and sealing scheme should be used for
protecting authenticated communications. */
module.exports.NTLMFLAG_REQUEST_INIT_RESPONSE = 1 << 20;
/* unknown purpose */
module.exports.NTLMFLAG_REQUEST_ACCEPT_RESPONSE = 1 << 21;
/* unknown purpose */
module.exports.NTLMFLAG_REQUEST_NONNT_SESSION_KEY = 1 << 22;
/* unknown purpose */
module.exports.NTLMFLAG_NEGOTIATE_TARGET_INFO = 1 << 23;
/* Sent by the server in the Type 2 message to indicate that it is including a
Target Information block in the message. */
/* unknown (1<24) */
/* unknown (1<25) */
/* unknown (1<26) */
/* unknown (1<27) */
/* unknown (1<28) */
module.exports.NTLMFLAG_NEGOTIATE_128 = 1 << 29;
/* Indicates that 128-bit encryption is supported. */
module.exports.NTLMFLAG_NEGOTIATE_KEY_EXCHANGE = 1 << 30;
/* Indicates that the client will provide an encrypted master key in
the "Session Key" field of the Type 3 message. */
module.exports.NTLMFLAG_NEGOTIATE_56 = 1 << 31;
//# sourceMappingURL=flags.js.map

View File

@@ -1,122 +0,0 @@
'use strict';
// Original source at https://github.com/elasticio/node-ntlm-client/blob/master/lib/hash.js
var crypto = require('crypto');
function createLMResponse(challenge, lmhash) {
var buf = new Buffer.alloc(24), pwBuffer = new Buffer.alloc(21).fill(0);
lmhash.copy(pwBuffer);
calculateDES(pwBuffer.slice(0, 7), challenge).copy(buf);
calculateDES(pwBuffer.slice(7, 14), challenge).copy(buf, 8);
calculateDES(pwBuffer.slice(14), challenge).copy(buf, 16);
return buf;
}
function createLMHash(password) {
var buf = new Buffer.alloc(16), pwBuffer = new Buffer.alloc(14), magicKey = new Buffer.from('KGS!@#$%', 'ascii');
if (password.length > 14) {
buf.fill(0);
return buf;
}
pwBuffer.fill(0);
pwBuffer.write(password.toUpperCase(), 0, 'ascii');
return Buffer.concat([
calculateDES(pwBuffer.slice(0, 7), magicKey),
calculateDES(pwBuffer.slice(7), magicKey)
]);
}
function calculateDES(key, message) {
var desKey = new Buffer.alloc(8);
desKey[0] = key[0] & 0xFE;
desKey[1] = ((key[0] << 7) & 0xFF) | (key[1] >> 1);
desKey[2] = ((key[1] << 6) & 0xFF) | (key[2] >> 2);
desKey[3] = ((key[2] << 5) & 0xFF) | (key[3] >> 3);
desKey[4] = ((key[3] << 4) & 0xFF) | (key[4] >> 4);
desKey[5] = ((key[4] << 3) & 0xFF) | (key[5] >> 5);
desKey[6] = ((key[5] << 2) & 0xFF) | (key[6] >> 6);
desKey[7] = (key[6] << 1) & 0xFF;
for (var i = 0; i < 8; i++) {
var parity = 0;
for (var j = 1; j < 8; j++) {
parity += (desKey[i] >> j) % 2;
}
desKey[i] |= (parity % 2) === 0 ? 1 : 0;
}
var des = crypto.createCipheriv('DES-ECB', desKey, '');
return des.update(message);
}
function createNTLMResponse(challenge, ntlmhash) {
var buf = new Buffer.alloc(24), ntlmBuffer = new Buffer.alloc(21).fill(0);
ntlmhash.copy(ntlmBuffer);
calculateDES(ntlmBuffer.slice(0, 7), challenge).copy(buf);
calculateDES(ntlmBuffer.slice(7, 14), challenge).copy(buf, 8);
calculateDES(ntlmBuffer.slice(14), challenge).copy(buf, 16);
return buf;
}
function createNTLMHash(password) {
var md4sum = crypto.createHash('md4');
md4sum.update(new Buffer.from(password, 'ucs2'));
return md4sum.digest();
}
function createNTLMv2Hash(ntlmhash, username, authTargetName) {
var hmac = crypto.createHmac('md5', ntlmhash);
hmac.update(new Buffer.from(username.toUpperCase() + authTargetName, 'ucs2'));
return hmac.digest();
}
function createLMv2Response(type2message, username, ntlmhash, nonce, targetName) {
var buf = new Buffer.alloc(24), ntlm2hash = createNTLMv2Hash(ntlmhash, username, targetName), hmac = crypto.createHmac('md5', ntlm2hash);
//server challenge
type2message.challenge.copy(buf, 8);
//client nonce
buf.write(nonce || createPseudoRandomValue(16), 16, 'hex');
//create hash
hmac.update(buf.slice(8));
var hashedBuffer = hmac.digest();
hashedBuffer.copy(buf);
return buf;
}
function createNTLMv2Response(type2message, username, ntlmhash, nonce, targetName) {
var buf = new Buffer.alloc(48 + type2message.targetInfo.buffer.length), ntlm2hash = createNTLMv2Hash(ntlmhash, username, targetName), hmac = crypto.createHmac('md5', ntlm2hash);
//the first 8 bytes are spare to store the hashed value before the blob
//server challenge
type2message.challenge.copy(buf, 8);
//blob signature
buf.writeUInt32BE(0x01010000, 16);
//reserved
buf.writeUInt32LE(0, 20);
//timestamp
//TODO: we are loosing precision here since js is not able to handle those large integers
// maybe think about a different solution here
// 11644473600000 = diff between 1970 and 1601
var timestamp = ((Date.now() + 11644473600000) * 10000).toString(16);
var timestampLow = Number('0x' + timestamp.substring(Math.max(0, timestamp.length - 8)));
var timestampHigh = Number('0x' + timestamp.substring(0, Math.max(0, timestamp.length - 8)));
buf.writeUInt32LE(timestampLow, 24, false);
buf.writeUInt32LE(timestampHigh, 28, false);
//random client nonce
buf.write(nonce || createPseudoRandomValue(16), 32, 'hex');
//zero
buf.writeUInt32LE(0, 40);
//complete target information block from type 2 message
type2message.targetInfo.buffer.copy(buf, 44);
//zero
buf.writeUInt32LE(0, 44 + type2message.targetInfo.buffer.length);
hmac.update(buf.slice(8));
var hashedBuffer = hmac.digest();
hashedBuffer.copy(buf);
return buf;
}
function createPseudoRandomValue(length) {
var str = '';
while (str.length < length) {
str += Math.floor(Math.random() * 16).toString(16);
}
return str;
}
module.exports = {
createLMHash: createLMHash,
createNTLMHash: createNTLMHash,
createLMResponse: createLMResponse,
createNTLMResponse: createNTLMResponse,
createLMv2Response: createLMv2Response,
createNTLMv2Response: createNTLMv2Response,
createPseudoRandomValue: createPseudoRandomValue
};
//# sourceMappingURL=hash.js.map

View File

@@ -1,220 +0,0 @@
'use strict';
// Original file https://raw.githubusercontent.com/elasticio/node-ntlm-client/master/lib/ntlm.js
var os = require('os'), flags = require('./flags'), hash = require('./hash');
var NTLMSIGNATURE = "NTLMSSP\0";
function createType1Message(workstation, target) {
var dataPos = 32, pos = 0, buf = new Buffer.alloc(1024);
workstation = workstation === undefined ? os.hostname() : workstation;
target = target === undefined ? '' : target;
//signature
buf.write(NTLMSIGNATURE, pos, NTLMSIGNATURE.length, 'ascii');
pos += NTLMSIGNATURE.length;
//message type
buf.writeUInt32LE(1, pos);
pos += 4;
//flags
buf.writeUInt32LE(flags.NTLMFLAG_NEGOTIATE_OEM |
flags.NTLMFLAG_REQUEST_TARGET |
flags.NTLMFLAG_NEGOTIATE_NTLM_KEY |
flags.NTLMFLAG_NEGOTIATE_NTLM2_KEY |
flags.NTLMFLAG_NEGOTIATE_ALWAYS_SIGN, pos);
pos += 4;
//domain security buffer
buf.writeUInt16LE(target.length, pos);
pos += 2;
buf.writeUInt16LE(target.length, pos);
pos += 2;
buf.writeUInt32LE(target.length === 0 ? 0 : dataPos, pos);
pos += 4;
if (target.length > 0) {
dataPos += buf.write(target, dataPos, 'ascii');
}
//workstation security buffer
buf.writeUInt16LE(workstation.length, pos);
pos += 2;
buf.writeUInt16LE(workstation.length, pos);
pos += 2;
buf.writeUInt32LE(workstation.length === 0 ? 0 : dataPos, pos);
pos += 4;
if (workstation.length > 0) {
dataPos += buf.write(workstation, dataPos, 'ascii');
}
return 'NTLM ' + buf.toString('base64', 0, dataPos);
}
function decodeType2Message(str) {
if (str === undefined) {
throw new Error('Invalid argument');
}
//convenience
if (Object.prototype.toString.call(str) !== '[object String]') {
if (str.hasOwnProperty('headers') && str.headers.hasOwnProperty('www-authenticate')) {
str = str.headers['www-authenticate'];
}
else {
throw new Error('Invalid argument');
}
}
var ntlmMatch = /^NTLM ([^,\s]+)/.exec(str);
if (ntlmMatch) {
str = ntlmMatch[1];
}
var buf = new Buffer.from(str, 'base64'), obj = {};
//check signature
if (buf.toString('ascii', 0, NTLMSIGNATURE.length) !== NTLMSIGNATURE) {
throw new Error('Invalid message signature: ' + str);
}
//check message type
if (buf.readUInt32LE(NTLMSIGNATURE.length) !== 2) {
throw new Error('Invalid message type (no type 2)');
}
//read flags
obj.flags = buf.readUInt32LE(20);
obj.encoding = (obj.flags & flags.NTLMFLAG_NEGOTIATE_OEM) ? 'ascii' : 'ucs2';
obj.version = (obj.flags & flags.NTLMFLAG_NEGOTIATE_NTLM2_KEY) ? 2 : 1;
obj.challenge = buf.slice(24, 32);
//read target name
obj.targetName = (function () {
var length = buf.readUInt16LE(12);
//skipping allocated space
var offset = buf.readUInt32LE(16);
if (length === 0) {
return '';
}
if ((offset + length) > buf.length || offset < 32) {
throw new Error('Bad type 2 message');
}
return buf.toString(obj.encoding, offset, offset + length);
})();
//read target info
if (obj.flags & flags.NTLMFLAG_NEGOTIATE_TARGET_INFO) {
obj.targetInfo = (function () {
var info = {};
var length = buf.readUInt16LE(40);
//skipping allocated space
var offset = buf.readUInt32LE(44);
var targetInfoBuffer = new Buffer.alloc(length);
buf.copy(targetInfoBuffer, 0, offset, offset + length);
if (length === 0) {
return info;
}
if ((offset + length) > buf.length || offset < 32) {
throw new Error('Bad type 2 message');
}
var pos = offset;
while (pos < (offset + length)) {
var blockType = buf.readUInt16LE(pos);
pos += 2;
var blockLength = buf.readUInt16LE(pos);
pos += 2;
if (blockType === 0) {
//reached the terminator subblock
break;
}
var blockTypeStr = void 0;
switch (blockType) {
case 1:
blockTypeStr = 'SERVER';
break;
case 2:
blockTypeStr = 'DOMAIN';
break;
case 3:
blockTypeStr = 'FQDN';
break;
case 4:
blockTypeStr = 'DNS';
break;
case 5:
blockTypeStr = 'PARENT_DNS';
break;
default:
blockTypeStr = '';
break;
}
if (blockTypeStr) {
info[blockTypeStr] = buf.toString('ucs2', pos, pos + blockLength);
}
pos += blockLength;
}
return {
parsed: info,
buffer: targetInfoBuffer
};
})();
}
return obj;
}
function createType3Message(type2Message, username, password, workstation, target) {
var dataPos = 52, buf = new Buffer.alloc(1024);
if (workstation === undefined) {
workstation = os.hostname();
}
if (target === undefined) {
target = type2Message.targetName;
}
//signature
buf.write(NTLMSIGNATURE, 0, NTLMSIGNATURE.length, 'ascii');
//message type
buf.writeUInt32LE(3, 8);
if (type2Message.version === 2) {
dataPos = 64;
var ntlmHash = hash.createNTLMHash(password), nonce = hash.createPseudoRandomValue(16), lmv2 = hash.createLMv2Response(type2Message, username, ntlmHash, nonce, target), ntlmv2 = hash.createNTLMv2Response(type2Message, username, ntlmHash, nonce, target);
//lmv2 security buffer
buf.writeUInt16LE(lmv2.length, 12);
buf.writeUInt16LE(lmv2.length, 14);
buf.writeUInt32LE(dataPos, 16);
lmv2.copy(buf, dataPos);
dataPos += lmv2.length;
//ntlmv2 security buffer
buf.writeUInt16LE(ntlmv2.length, 20);
buf.writeUInt16LE(ntlmv2.length, 22);
buf.writeUInt32LE(dataPos, 24);
ntlmv2.copy(buf, dataPos);
dataPos += ntlmv2.length;
}
else {
var lmHash = hash.createLMHash(password), ntlmHash = hash.createNTLMHash(password), lm = hash.createLMResponse(type2Message.challenge, lmHash), ntlm = hash.createNTLMResponse(type2Message.challenge, ntlmHash);
//lm security buffer
buf.writeUInt16LE(lm.length, 12);
buf.writeUInt16LE(lm.length, 14);
buf.writeUInt32LE(dataPos, 16);
lm.copy(buf, dataPos);
dataPos += lm.length;
//ntlm security buffer
buf.writeUInt16LE(ntlm.length, 20);
buf.writeUInt16LE(ntlm.length, 22);
buf.writeUInt32LE(dataPos, 24);
ntlm.copy(buf, dataPos);
dataPos += ntlm.length;
}
//target name security buffer
buf.writeUInt16LE(type2Message.encoding === 'ascii' ? target.length : target.length * 2, 28);
buf.writeUInt16LE(type2Message.encoding === 'ascii' ? target.length : target.length * 2, 30);
buf.writeUInt32LE(dataPos, 32);
dataPos += buf.write(target, dataPos, type2Message.encoding);
//user name security buffer
buf.writeUInt16LE(type2Message.encoding === 'ascii' ? username.length : username.length * 2, 36);
buf.writeUInt16LE(type2Message.encoding === 'ascii' ? username.length : username.length * 2, 38);
buf.writeUInt32LE(dataPos, 40);
dataPos += buf.write(username, dataPos, type2Message.encoding);
//workstation name security buffer
buf.writeUInt16LE(type2Message.encoding === 'ascii' ? workstation.length : workstation.length * 2, 44);
buf.writeUInt16LE(type2Message.encoding === 'ascii' ? workstation.length : workstation.length * 2, 46);
buf.writeUInt32LE(dataPos, 48);
dataPos += buf.write(workstation, dataPos, type2Message.encoding);
if (type2Message.version === 2) {
//session key security buffer
buf.writeUInt16LE(0, 52);
buf.writeUInt16LE(0, 54);
buf.writeUInt32LE(0, 56);
//flags
buf.writeUInt32LE(type2Message.flags, 60);
}
return 'NTLM ' + buf.toString('base64', 0, dataPos);
}
module.exports = {
createType1Message: createType1Message,
decodeType2Message: decodeType2Message,
createType3Message: createType3Message
};
//# sourceMappingURL=ntlm.js.map

View File

@@ -1,127 +0,0 @@
"use strict";
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.NtlmClient = void 0;
var axios_1 = __importDefault(require("axios"));
var ntlm = __importStar(require("./ntlm"));
var https = __importStar(require("https"));
var http = __importStar(require("http"));
var dev_null_1 = __importDefault(require("dev-null"));
/**
* @param credentials An NtlmCredentials object containing the username and password
* @param AxiosConfig The Axios config for the instance you wish to create
*
* @returns This function returns an axios instance configured to use the provided credentials
*/
function NtlmClient(credentials, AxiosConfig) {
var _this = this;
var config = AxiosConfig !== null && AxiosConfig !== void 0 ? AxiosConfig : {};
if (!config.httpAgent) {
config.httpAgent = new http.Agent({ keepAlive: true });
}
if (!config.httpsAgent) {
config.httpsAgent = new https.Agent({ keepAlive: true });
}
var client = axios_1.default.create(config);
client.interceptors.response.use(function (response) {
return response;
}, function (err) { return __awaiter(_this, void 0, void 0, function () {
var error, t1Msg, t2Msg, t3Msg, stream_1;
var _a;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
error = err.response;
if (!(error && error.status === 401
&& error.headers['www-authenticate']
&& error.headers['www-authenticate'].includes('NTLM'))) return [3 /*break*/, 3];
// This length check is a hack because SharePoint is awkward and will
// include the Negotiate option when responding with the T2 message
// There is nore we could do to ensure we are processing correctly,
// but this is the easiest option for now
if (error.headers['www-authenticate'].length < 50) {
t1Msg = ntlm.createType1Message(credentials.workstation, credentials.domain);
error.config.headers["Authorization"] = t1Msg;
}
else {
t2Msg = ntlm.decodeType2Message((error.headers['www-authenticate'].match(/^NTLM\s+(.+?)(,|\s+|$)/) || [])[1]);
t3Msg = ntlm.createType3Message(t2Msg, credentials.username, credentials.password, credentials.workstation, credentials.domain);
error.config.headers["X-retry"] = "false";
error.config.headers["Authorization"] = t3Msg;
}
if (!(error.config.responseType === "stream")) return [3 /*break*/, 2];
stream_1 = (_a = err.response) === null || _a === void 0 ? void 0 : _a.data;
if (!(stream_1 && !stream_1.readableEnded)) return [3 /*break*/, 2];
return [4 /*yield*/, new Promise(function (resolve) {
stream_1.pipe((0, dev_null_1.default)());
stream_1.once('close', resolve);
})];
case 1:
_b.sent();
_b.label = 2;
case 2: return [2 /*return*/, client(error.config)];
case 3: throw err;
}
});
}); });
return client;
}
exports.NtlmClient = NtlmClient;
//# sourceMappingURL=ntlmClient.js.map

View File

@@ -1,71 +0,0 @@
const { ConditionExpressionGroup, ConditionExpression, LOGICAL } = require("./expression");
const { operatorMap } = require("./operators");
/**
* @param {ConditionExpression} expression Expression to evaluate
* @param {object} context Context to evaluate against; These are values for variables in the expression
* @returns {boolean} Whether the expression evaluates true or false
* @throws {Error}
*/
function evaluateExpression(expression, context) {
/**
* @type {import("./operators").ConditionOperator|null}
*/
const operator = operatorMap.get(expression.operator) || null;
if (operator === null) {
throw new Error("Unexpected expression operator ID '" + expression.operator + "'. Expected one of [" + operatorMap.keys().join(",") + "]");
}
if (!Object.prototype.hasOwnProperty.call(context, expression.variable)) {
throw new Error("Variable missing in context: " + expression.variable);
}
return operator.test(context[expression.variable], expression.value);
}
/**
* @param {ConditionExpressionGroup} group Group of expressions to evaluate
* @param {object} context Context to evaluate against; These are values for variables in the expression
* @returns {boolean} Whether the group evaluates true or false
* @throws {Error}
*/
function evaluateExpressionGroup(group, context) {
if (!group.children.length) {
throw new Error("ConditionExpressionGroup must contain at least one child.");
}
let result = null;
for (const child of group.children) {
let childResult;
if (child instanceof ConditionExpression) {
childResult = evaluateExpression(child, context);
} else if (child instanceof ConditionExpressionGroup) {
childResult = evaluateExpressionGroup(child, context);
} else {
throw new Error("Invalid child type in ConditionExpressionGroup. Expected ConditionExpression or ConditionExpressionGroup");
}
if (result === null) {
result = childResult; // Initialize result with the first child's result
} else if (child.andOr === LOGICAL.OR) {
result = result || childResult;
} else if (child.andOr === LOGICAL.AND) {
result = result && childResult;
} else {
throw new Error("Invalid logical operator in child of ConditionExpressionGroup. Expected 'and' or 'or'. Got '" + group.andOr + "'");
}
}
if (result === null) {
throw new Error("ConditionExpressionGroup did not result in a boolean.");
}
return result;
}
module.exports = {
evaluateExpression,
evaluateExpressionGroup,
};

View File

@@ -1,111 +0,0 @@
/**
* @readonly
* @enum {string}
*/
const LOGICAL = {
AND: "and",
OR: "or",
};
/**
* Recursively processes an array of raw condition objects and populates the given parent group with
* corresponding ConditionExpression or ConditionExpressionGroup instances.
* @param {Array} conditions Array of raw condition objects, where each object represents either a group or an expression.
* @param {ConditionExpressionGroup} parentGroup The parent group to which the instantiated ConditionExpression or ConditionExpressionGroup objects will be added.
* @returns {void}
*/
function processMonitorConditions(conditions, parentGroup) {
conditions.forEach(condition => {
const andOr = condition.andOr === LOGICAL.OR ? LOGICAL.OR : LOGICAL.AND;
if (condition.type === "group") {
const group = new ConditionExpressionGroup([], andOr);
// Recursively process the group's children
processMonitorConditions(condition.children, group);
parentGroup.children.push(group);
} else if (condition.type === "expression") {
const expression = new ConditionExpression(condition.variable, condition.operator, condition.value, andOr);
parentGroup.children.push(expression);
}
});
}
class ConditionExpressionGroup {
/**
* @type {ConditionExpressionGroup[]|ConditionExpression[]} Groups and/or expressions to test
*/
children = [];
/**
* @type {LOGICAL} Connects group result with previous group/expression results
*/
andOr;
/**
* @param {ConditionExpressionGroup[]|ConditionExpression[]} children Groups and/or expressions to test
* @param {LOGICAL} andOr Connects group result with previous group/expression results
*/
constructor(children = [], andOr = LOGICAL.AND) {
this.children = children;
this.andOr = andOr;
}
/**
* @param {Monitor} monitor Monitor instance
* @returns {ConditionExpressionGroup|null} A ConditionExpressionGroup with the Monitor's conditions
*/
static fromMonitor(monitor) {
const conditions = JSON.parse(monitor.conditions);
if (conditions.length === 0) {
return null;
}
const root = new ConditionExpressionGroup();
processMonitorConditions(conditions, root);
return root;
}
}
class ConditionExpression {
/**
* @type {string} ID of variable
*/
variable;
/**
* @type {string} ID of operator
*/
operator;
/**
* @type {string} Value to test with the operator
*/
value;
/**
* @type {LOGICAL} Connects expression result with previous group/expression results
*/
andOr;
/**
* @param {string} variable ID of variable to test against
* @param {string} operator ID of operator to test the variable with
* @param {string} value Value to test with the operator
* @param {LOGICAL} andOr Connects expression result with previous group/expression results
*/
constructor(variable, operator, value, andOr = LOGICAL.AND) {
this.variable = variable;
this.operator = operator;
this.value = value;
this.andOr = andOr;
}
}
module.exports = {
LOGICAL,
ConditionExpressionGroup,
ConditionExpression,
};

View File

@@ -1,318 +0,0 @@
class ConditionOperator {
id = undefined;
caption = undefined;
/**
* @type {mixed} variable
* @type {mixed} value
*/
test(variable, value) {
throw new Error("You need to override test()");
}
}
const OP_STR_EQUALS = "equals";
const OP_STR_NOT_EQUALS = "not_equals";
const OP_CONTAINS = "contains";
const OP_NOT_CONTAINS = "not_contains";
const OP_STARTS_WITH = "starts_with";
const OP_NOT_STARTS_WITH = "not_starts_with";
const OP_ENDS_WITH = "ends_with";
const OP_NOT_ENDS_WITH = "not_ends_with";
const OP_NUM_EQUALS = "num_equals";
const OP_NUM_NOT_EQUALS = "num_not_equals";
const OP_LT = "lt";
const OP_GT = "gt";
const OP_LTE = "lte";
const OP_GTE = "gte";
/**
* Asserts a variable is equal to a value.
*/
class StringEqualsOperator extends ConditionOperator {
id = OP_STR_EQUALS;
caption = "equals";
/**
* @inheritdoc
*/
test(variable, value) {
return variable === value;
}
}
/**
* Asserts a variable is not equal to a value.
*/
class StringNotEqualsOperator extends ConditionOperator {
id = OP_STR_NOT_EQUALS;
caption = "not equals";
/**
* @inheritdoc
*/
test(variable, value) {
return variable !== value;
}
}
/**
* Asserts a variable contains a value.
* Handles both Array and String variable types.
*/
class ContainsOperator extends ConditionOperator {
id = OP_CONTAINS;
caption = "contains";
/**
* @inheritdoc
*/
test(variable, value) {
if (Array.isArray(variable)) {
return variable.includes(value);
}
return variable.indexOf(value) !== -1;
}
}
/**
* Asserts a variable does not contain a value.
* Handles both Array and String variable types.
*/
class NotContainsOperator extends ConditionOperator {
id = OP_NOT_CONTAINS;
caption = "not contains";
/**
* @inheritdoc
*/
test(variable, value) {
if (Array.isArray(variable)) {
return !variable.includes(value);
}
return variable.indexOf(value) === -1;
}
}
/**
* Asserts a variable starts with a value.
*/
class StartsWithOperator extends ConditionOperator {
id = OP_STARTS_WITH;
caption = "starts with";
/**
* @inheritdoc
*/
test(variable, value) {
return variable.startsWith(value);
}
}
/**
* Asserts a variable does not start with a value.
*/
class NotStartsWithOperator extends ConditionOperator {
id = OP_NOT_STARTS_WITH;
caption = "not starts with";
/**
* @inheritdoc
*/
test(variable, value) {
return !variable.startsWith(value);
}
}
/**
* Asserts a variable ends with a value.
*/
class EndsWithOperator extends ConditionOperator {
id = OP_ENDS_WITH;
caption = "ends with";
/**
* @inheritdoc
*/
test(variable, value) {
return variable.endsWith(value);
}
}
/**
* Asserts a variable does not end with a value.
*/
class NotEndsWithOperator extends ConditionOperator {
id = OP_NOT_ENDS_WITH;
caption = "not ends with";
/**
* @inheritdoc
*/
test(variable, value) {
return !variable.endsWith(value);
}
}
/**
* Asserts a numeric variable is equal to a value.
*/
class NumberEqualsOperator extends ConditionOperator {
id = OP_NUM_EQUALS;
caption = "equals";
/**
* @inheritdoc
*/
test(variable, value) {
return variable === Number(value);
}
}
/**
* Asserts a numeric variable is not equal to a value.
*/
class NumberNotEqualsOperator extends ConditionOperator {
id = OP_NUM_NOT_EQUALS;
caption = "not equals";
/**
* @inheritdoc
*/
test(variable, value) {
return variable !== Number(value);
}
}
/**
* Asserts a variable is less than a value.
*/
class LessThanOperator extends ConditionOperator {
id = OP_LT;
caption = "less than";
/**
* @inheritdoc
*/
test(variable, value) {
return variable < Number(value);
}
}
/**
* Asserts a variable is greater than a value.
*/
class GreaterThanOperator extends ConditionOperator {
id = OP_GT;
caption = "greater than";
/**
* @inheritdoc
*/
test(variable, value) {
return variable > Number(value);
}
}
/**
* Asserts a variable is less than or equal to a value.
*/
class LessThanOrEqualToOperator extends ConditionOperator {
id = OP_LTE;
caption = "less than or equal to";
/**
* @inheritdoc
*/
test(variable, value) {
return variable <= Number(value);
}
}
/**
* Asserts a variable is greater than or equal to a value.
*/
class GreaterThanOrEqualToOperator extends ConditionOperator {
id = OP_GTE;
caption = "greater than or equal to";
/**
* @inheritdoc
*/
test(variable, value) {
return variable >= Number(value);
}
}
const operatorMap = new Map([
[ OP_STR_EQUALS, new StringEqualsOperator ],
[ OP_STR_NOT_EQUALS, new StringNotEqualsOperator ],
[ OP_CONTAINS, new ContainsOperator ],
[ OP_NOT_CONTAINS, new NotContainsOperator ],
[ OP_STARTS_WITH, new StartsWithOperator ],
[ OP_NOT_STARTS_WITH, new NotStartsWithOperator ],
[ OP_ENDS_WITH, new EndsWithOperator ],
[ OP_NOT_ENDS_WITH, new NotEndsWithOperator ],
[ OP_NUM_EQUALS, new NumberEqualsOperator ],
[ OP_NUM_NOT_EQUALS, new NumberNotEqualsOperator ],
[ OP_LT, new LessThanOperator ],
[ OP_GT, new GreaterThanOperator ],
[ OP_LTE, new LessThanOrEqualToOperator ],
[ OP_GTE, new GreaterThanOrEqualToOperator ],
]);
const defaultStringOperators = [
operatorMap.get(OP_STR_EQUALS),
operatorMap.get(OP_STR_NOT_EQUALS),
operatorMap.get(OP_CONTAINS),
operatorMap.get(OP_NOT_CONTAINS),
operatorMap.get(OP_STARTS_WITH),
operatorMap.get(OP_NOT_STARTS_WITH),
operatorMap.get(OP_ENDS_WITH),
operatorMap.get(OP_NOT_ENDS_WITH)
];
const defaultNumberOperators = [
operatorMap.get(OP_NUM_EQUALS),
operatorMap.get(OP_NUM_NOT_EQUALS),
operatorMap.get(OP_LT),
operatorMap.get(OP_GT),
operatorMap.get(OP_LTE),
operatorMap.get(OP_GTE)
];
module.exports = {
OP_STR_EQUALS,
OP_STR_NOT_EQUALS,
OP_CONTAINS,
OP_NOT_CONTAINS,
OP_STARTS_WITH,
OP_NOT_STARTS_WITH,
OP_ENDS_WITH,
OP_NOT_ENDS_WITH,
OP_NUM_EQUALS,
OP_NUM_NOT_EQUALS,
OP_LT,
OP_GT,
OP_LTE,
OP_GTE,
operatorMap,
defaultStringOperators,
defaultNumberOperators,
ConditionOperator,
};

View File

@@ -1,31 +0,0 @@
/**
* Represents a variable used in a condition and the set of operators that can be applied to this variable.
*
* A `ConditionVariable` holds the ID of the variable and a list of operators that define how this variable can be evaluated
* in conditions. For example, if the variable is a request body or a specific field in a request, the operators can include
* operations such as equality checks, comparisons, or other custom evaluations.
*/
class ConditionVariable {
/**
* @type {string}
*/
id;
/**
* @type {import("./operators").ConditionOperator[]}
*/
operators = {};
/**
* @param {string} id ID of variable
* @param {import("./operators").ConditionOperator[]} operators Operators the condition supports
*/
constructor(id, operators = []) {
this.id = id;
this.operators = operators;
}
}
module.exports = {
ConditionVariable,
};

View File

@@ -1,22 +1,13 @@
const { MonitorType } = require("./monitor-type");
const { UP, DOWN } = require("../../src/util");
const { UP } = require("../../src/util");
const dayjs = require("dayjs");
const { dnsResolve } = require("../util-server");
const { R } = require("redbean-node");
const { ConditionVariable } = require("../monitor-conditions/variables");
const { defaultStringOperators } = require("../monitor-conditions/operators");
const { ConditionExpressionGroup } = require("../monitor-conditions/expression");
const { evaluateExpressionGroup } = require("../monitor-conditions/evaluator");
class DnsMonitorType extends MonitorType {
name = "dns";
supportsConditions = true;
conditionVariables = [
new ConditionVariable("record", defaultStringOperators ),
];
/**
* @inheritdoc
*/
@@ -27,48 +18,28 @@ class DnsMonitorType extends MonitorType {
let dnsRes = await dnsResolve(monitor.hostname, monitor.dns_resolve_server, monitor.port, monitor.dns_resolve_type);
heartbeat.ping = dayjs().valueOf() - startTime;
const conditions = ConditionExpressionGroup.fromMonitor(monitor);
let conditionsResult = true;
const handleConditions = (data) => conditions ? evaluateExpressionGroup(conditions, data) : true;
switch (monitor.dns_resolve_type) {
case "A":
case "AAAA":
case "TXT":
case "PTR":
dnsMessage = `Records: ${dnsRes.join(" | ")}`;
conditionsResult = dnsRes.some(record => handleConditions({ record }));
break;
case "CNAME":
dnsMessage = dnsRes[0];
conditionsResult = handleConditions({ record: dnsRes[0] });
break;
case "CAA":
dnsMessage = dnsRes[0].issue;
conditionsResult = handleConditions({ record: dnsRes[0].issue });
break;
case "MX":
dnsMessage = dnsRes.map(record => `Hostname: ${record.exchange} - Priority: ${record.priority}`).join(" | ");
conditionsResult = dnsRes.some(record => handleConditions({ record: record.exchange }));
break;
case "NS":
dnsMessage = `Servers: ${dnsRes.join(" | ")}`;
conditionsResult = dnsRes.some(record => handleConditions({ record }));
break;
case "SOA":
dnsMessage = `NS-Name: ${dnsRes.nsname} | Hostmaster: ${dnsRes.hostmaster} | Serial: ${dnsRes.serial} | Refresh: ${dnsRes.refresh} | Retry: ${dnsRes.retry} | Expire: ${dnsRes.expire} | MinTTL: ${dnsRes.minttl}`;
conditionsResult = handleConditions({ record: dnsRes.nsname });
break;
case "SRV":
dnsMessage = dnsRes.map(record => `Name: ${record.name} | Port: ${record.port} | Priority: ${record.priority} | Weight: ${record.weight}`).join(" | ");
conditionsResult = dnsRes.some(record => handleConditions({ record: record.name }));
break;
if (monitor.dns_resolve_type === "A" || monitor.dns_resolve_type === "AAAA" || monitor.dns_resolve_type === "TXT" || monitor.dns_resolve_type === "PTR") {
dnsMessage += "Records: ";
dnsMessage += dnsRes.join(" | ");
} else if (monitor.dns_resolve_type === "CNAME" || monitor.dns_resolve_type === "PTR") {
dnsMessage += dnsRes[0];
} else if (monitor.dns_resolve_type === "CAA") {
dnsMessage += dnsRes[0].issue;
} else if (monitor.dns_resolve_type === "MX") {
dnsRes.forEach(record => {
dnsMessage += `Hostname: ${record.exchange} - Priority: ${record.priority} | `;
});
dnsMessage = dnsMessage.slice(0, -2);
} else if (monitor.dns_resolve_type === "NS") {
dnsMessage += "Servers: ";
dnsMessage += dnsRes.join(" | ");
} else if (monitor.dns_resolve_type === "SOA") {
dnsMessage += `NS-Name: ${dnsRes.nsname} | Hostmaster: ${dnsRes.hostmaster} | Serial: ${dnsRes.serial} | Refresh: ${dnsRes.refresh} | Retry: ${dnsRes.retry} | Expire: ${dnsRes.expire} | MinTTL: ${dnsRes.minttl}`;
} else if (monitor.dns_resolve_type === "SRV") {
dnsRes.forEach(record => {
dnsMessage += `Name: ${record.name} | Port: ${record.port} | Priority: ${record.priority} | Weight: ${record.weight} | `;
});
dnsMessage = dnsMessage.slice(0, -2);
}
if (monitor.dns_last_result !== dnsMessage && dnsMessage !== undefined) {
@@ -76,7 +47,7 @@ class DnsMonitorType extends MonitorType {
}
heartbeat.msg = dnsMessage;
heartbeat.status = conditionsResult ? UP : DOWN;
heartbeat.status = UP;
}
}

View File

@@ -1,63 +0,0 @@
const { MonitorType } = require("./monitor-type");
const { UP } = require("../../src/util");
const { MongoClient } = require("mongodb");
const jsonata = require("jsonata");
class MongodbMonitorType extends MonitorType {
name = "mongodb";
/**
* @inheritdoc
*/
async check(monitor, heartbeat, _server) {
let command = { "ping": 1 };
if (monitor.databaseQuery) {
command = JSON.parse(monitor.databaseQuery);
}
let result = await this.runMongodbCommand(monitor.databaseConnectionString, command);
if (result["ok"] !== 1) {
throw new Error("MongoDB command failed");
} else {
heartbeat.msg = "Command executed successfully";
}
if (monitor.jsonPath) {
let expression = jsonata(monitor.jsonPath);
result = await expression.evaluate(result);
if (result) {
heartbeat.msg = "Command executed successfully and the jsonata expression produces a result.";
} else {
throw new Error("Queried value not found.");
}
}
if (monitor.expectedValue) {
if (result.toString() === monitor.expectedValue) {
heartbeat.msg = "Command executed successfully and expected value was found";
} else {
throw new Error("Query executed, but value is not equal to expected value, value was: [" + JSON.stringify(result) + "]");
}
}
heartbeat.status = UP;
}
/**
* Connect to and run MongoDB command on a MongoDB database
* @param {string} connectionString The database connection string
* @param {object} command MongoDB command to run on the database
* @returns {Promise<(string[] | object[] | object)>} Response from server
*/
async runMongodbCommand(connectionString, command) {
let client = await MongoClient.connect(connectionString);
let result = await client.db().command(command);
await client.close();
return result;
}
}
module.exports = {
MongodbMonitorType,
};

View File

@@ -1,19 +1,7 @@
class MonitorType {
name = undefined;
/**
* Whether or not this type supports monitor conditions. Controls UI visibility in monitor form.
* @type {boolean}
*/
supportsConditions = false;
/**
* Variables supported by this type. e.g. an HTTP type could have a "response_code" variable to test against.
* This property controls the choices displayed in the monitor edit form.
* @type {import("../monitor-conditions/variables").ConditionVariable[]}
*/
conditionVariables = [];
/**
* Run the monitoring check on the given monitor
* @param {Monitor} monitor Monitor to check
@@ -24,6 +12,7 @@ class MonitorType {
async check(monitor, heartbeat, server) {
throw new Error("You need to override check()");
}
}
module.exports = {

View File

@@ -1,117 +0,0 @@
const { MonitorType } = require("./monitor-type");
const { log, UP } = require("../../src/util");
const mqtt = require("mqtt");
const jsonata = require("jsonata");
class MqttMonitorType extends MonitorType {
name = "mqtt";
/**
* @inheritdoc
*/
async check(monitor, heartbeat, server) {
const receivedMessage = await this.mqttAsync(monitor.hostname, monitor.mqttTopic, {
port: monitor.port,
username: monitor.mqttUsername,
password: monitor.mqttPassword,
interval: monitor.interval,
});
if (monitor.mqttCheckType == null || monitor.mqttCheckType === "") {
// use old default
monitor.mqttCheckType = "keyword";
}
if (monitor.mqttCheckType === "keyword") {
if (receivedMessage != null && receivedMessage.includes(monitor.mqttSuccessMessage)) {
heartbeat.msg = `Topic: ${monitor.mqttTopic}; Message: ${receivedMessage}`;
heartbeat.status = UP;
} else {
throw Error(`Message Mismatch - Topic: ${monitor.mqttTopic}; Message: ${receivedMessage}`);
}
} else if (monitor.mqttCheckType === "json-query") {
const parsedMessage = JSON.parse(receivedMessage);
let expression = jsonata(monitor.jsonPath);
let result = await expression.evaluate(parsedMessage);
if (result?.toString() === monitor.expectedValue) {
heartbeat.msg = "Message received, expected value is found";
heartbeat.status = UP;
} else {
throw new Error("Message received but value is not equal to expected value, value was: [" + result + "]");
}
} else {
throw Error("Unknown MQTT Check Type");
}
}
/**
* Connect to MQTT Broker, subscribe to topic and receive message as String
* @param {string} hostname Hostname / address of machine to test
* @param {string} topic MQTT topic
* @param {object} options MQTT options. Contains port, username,
* password and interval (interval defaults to 20)
* @returns {Promise<string>} Received MQTT message
*/
mqttAsync(hostname, topic, options = {}) {
return new Promise((resolve, reject) => {
const { port, username, password, interval = 20 } = options;
// Adds MQTT protocol to the hostname if not already present
if (!/^(?:http|mqtt|ws)s?:\/\//.test(hostname)) {
hostname = "mqtt://" + hostname;
}
const timeoutID = setTimeout(() => {
log.debug("mqtt", "MQTT timeout triggered");
client.end();
reject(new Error("Timeout, Message not received"));
}, interval * 1000 * 0.8);
const mqttUrl = `${hostname}:${port}`;
log.debug("mqtt", `MQTT connecting to ${mqttUrl}`);
let client = mqtt.connect(mqttUrl, {
username,
password,
clientId: "uptime-kuma_" + Math.random().toString(16).substr(2, 8)
});
client.on("connect", () => {
log.debug("mqtt", "MQTT connected");
try {
client.subscribe(topic, () => {
log.debug("mqtt", "MQTT subscribed to topic");
});
} catch (e) {
client.end();
clearTimeout(timeoutID);
reject(new Error("Cannot subscribe topic"));
}
});
client.on("error", (error) => {
client.end();
clearTimeout(timeoutID);
reject(error);
});
client.on("message", (messageTopic, message) => {
if (messageTopic === topic) {
client.end();
clearTimeout(timeoutID);
resolve(message.toString("utf8"));
}
});
});
}
}
module.exports = {
MqttMonitorType,
};

View File

@@ -1,67 +0,0 @@
const { MonitorType } = require("./monitor-type");
const { log, UP, DOWN } = require("../../src/util");
const { axiosAbortSignal } = require("../util-server");
const axios = require("axios");
class RabbitMqMonitorType extends MonitorType {
name = "rabbitmq";
/**
* @inheritdoc
*/
async check(monitor, heartbeat, server) {
let baseUrls = [];
try {
baseUrls = JSON.parse(monitor.rabbitmqNodes);
} catch (error) {
throw new Error("Invalid RabbitMQ Nodes");
}
heartbeat.status = DOWN;
for (let baseUrl of baseUrls) {
try {
// Without a trailing slash, path in baseUrl will be removed. https://example.com/api -> https://example.com
if ( !baseUrl.endsWith("/") ) {
baseUrl += "/";
}
const options = {
// Do not start with slash, it will strip the trailing slash from baseUrl
url: new URL("api/health/checks/alarms/", baseUrl).href,
method: "get",
timeout: monitor.timeout * 1000,
headers: {
"Accept": "application/json",
"Authorization": "Basic " + Buffer.from(`${monitor.rabbitmqUsername || ""}:${monitor.rabbitmqPassword || ""}`).toString("base64"),
},
signal: axiosAbortSignal((monitor.timeout + 10) * 1000),
// Capture reason for 503 status
validateStatus: (status) => status === 200 || status === 503,
};
log.debug("monitor", `[${monitor.name}] Axios Request: ${JSON.stringify(options)}`);
const res = await axios.request(options);
log.debug("monitor", `[${monitor.name}] Axios Response: status=${res.status} body=${JSON.stringify(res.data)}`);
if (res.status === 200) {
heartbeat.status = UP;
heartbeat.msg = "OK";
break;
} else if (res.status === 503) {
heartbeat.msg = res.data.reason;
} else {
heartbeat.msg = `${res.status} - ${res.statusText}`;
}
} catch (error) {
if (axios.isCancel(error)) {
heartbeat.msg = "Request timed out";
log.debug("monitor", `[${monitor.name}] Request timed out`);
} else {
log.debug("monitor", `[${monitor.name}] Axios Error: ${JSON.stringify(error.message)}`);
heartbeat.msg = error.message;
}
}
}
}
}
module.exports = {
RabbitMqMonitorType,
};

View File

@@ -8,12 +8,7 @@ const path = require("path");
const Database = require("../database");
const jwt = require("jsonwebtoken");
const config = require("../config");
const { RemoteBrowser } = require("../remote-browser");
/**
* Cached instance of a browser
* @type {import ("playwright-core").Browser}
*/
let browser = null;
let allowedList = [];
@@ -29,9 +24,6 @@ if (process.platform === "win32") {
allowedList.push(process.env.PROGRAMFILES + "\\Chromium\\Application\\chrome.exe");
allowedList.push(process.env["ProgramFiles(x86)"] + "\\Chromium\\Application\\chrome.exe");
// Allow MS Edge
allowedList.push(process.env["ProgramFiles(x86)"] + "\\Microsoft\\Edge\\Application\\msedge.exe");
// For Loop A to Z
for (let i = 65; i <= 90; i++) {
let drive = String.fromCharCode(i);
@@ -48,15 +40,17 @@ if (process.platform === "win32") {
"/usr/bin/chromium",
"/usr/bin/chromium-browser",
"/usr/bin/google-chrome",
"/snap/bin/chromium", // Ubuntu
];
} else if (process.platform === "darwin") {
// TODO: Generated by GitHub Copilot, but not sure if it's correct
allowedList = [
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
"/Applications/Chromium.app/Contents/MacOS/Chromium",
];
}
log.debug("chrome", allowedList);
/**
* Is the executable path allowed?
* @param {string} executablePath Path to executable
@@ -75,12 +69,10 @@ async function isAllowedChromeExecutable(executablePath) {
/**
* Get the current instance of the browser. If there isn't one, create
* it.
* @returns {Promise<import ("playwright-core").Browser>} The browser
* @returns {Promise<Browser>} The browser
*/
async function getBrowser() {
if (browser && browser.isConnected()) {
return browser;
} else {
if (!browser) {
let executablePath = await Settings.get("chromeExecutable");
executablePath = await prepareChromeExecutable(executablePath);
@@ -89,21 +81,7 @@ async function getBrowser() {
//headless: false,
executablePath,
});
return browser;
}
}
/**
* Get the current instance of the browser. If there isn't one, create it
* @param {integer} remoteBrowserID Path to executable
* @param {integer} userId User ID
* @returns {Promise<Browser>} The browser
*/
async function getRemoteBrowser(remoteBrowserID, userId) {
let remoteBrowser = await RemoteBrowser.get(remoteBrowserID, userId);
log.debug("MONITOR", `Using remote browser: ${remoteBrowser.name} (${remoteBrowser.id})`);
browser = await chromium.connect(remoteBrowser.url);
return browser;
}
@@ -213,21 +191,11 @@ async function testChrome(executablePath) {
throw new Error(e.message);
}
}
// test remote browser
/**
* @param {string} remoteBrowserURL Remote Browser URL
* @returns {Promise<boolean>} Returns if connection worked
* TODO: connect remote browser? https://playwright.dev/docs/api/class-browsertype#browser-type-connect
*
*/
async function testRemoteBrowser(remoteBrowserURL) {
try {
const browser = await chromium.connect(remoteBrowserURL);
browser.version();
await browser.close();
return true;
} catch (e) {
throw new Error(e.message);
}
}
class RealBrowserMonitorType extends MonitorType {
name = "real-browser";
@@ -236,7 +204,7 @@ class RealBrowserMonitorType extends MonitorType {
* @inheritdoc
*/
async check(monitor, heartbeat, server) {
const browser = monitor.remote_browser ? await getRemoteBrowser(monitor.remote_browser, monitor.user_id) : await getBrowser();
const browser = await getBrowser();
const context = await browser.newContext();
const page = await context.newPage();
@@ -269,5 +237,4 @@ module.exports = {
RealBrowserMonitorType,
testChrome,
resetChrome,
testRemoteBrowser,
};

View File

@@ -1,63 +0,0 @@
const { MonitorType } = require("./monitor-type");
const { UP, log, evaluateJsonQuery } = require("../../src/util");
const snmp = require("net-snmp");
class SNMPMonitorType extends MonitorType {
name = "snmp";
/**
* @inheritdoc
*/
async check(monitor, heartbeat, _server) {
let session;
try {
const sessionOptions = {
port: monitor.port || "161",
retries: monitor.maxretries,
timeout: monitor.timeout * 1000,
version: snmp.Version[monitor.snmpVersion],
};
session = snmp.createSession(monitor.hostname, monitor.radiusPassword, sessionOptions);
// Handle errors during session creation
session.on("error", (error) => {
throw new Error(`Error creating SNMP session: ${error.message}`);
});
const varbinds = await new Promise((resolve, reject) => {
session.get([ monitor.snmpOid ], (error, varbinds) => {
error ? reject(error) : resolve(varbinds);
});
});
log.debug("monitor", `SNMP: Received varbinds (Type: ${snmp.ObjectType[varbinds[0].type]} Value: ${varbinds[0].value})`);
if (varbinds.length === 0) {
throw new Error(`No varbinds returned from SNMP session (OID: ${monitor.snmpOid})`);
}
if (varbinds[0].type === snmp.ObjectType.NoSuchInstance) {
throw new Error(`The SNMP query returned that no instance exists for OID ${monitor.snmpOid}`);
}
// We restrict querying to one OID per monitor, therefore `varbinds[0]` will always contain the value we're interested in.
const value = varbinds[0].value;
const { status, response } = await evaluateJsonQuery(value, monitor.jsonPath, monitor.jsonPathOperator, monitor.expectedValue);
if (status) {
heartbeat.status = UP;
heartbeat.msg = `JSON query passes (comparing ${response} ${monitor.jsonPathOperator} ${monitor.expectedValue})`;
} else {
throw new Error(`JSON query does not pass (comparing ${response} ${monitor.jsonPathOperator} ${monitor.expectedValue})`);
}
} finally {
if (session) {
session.close();
}
}
}
}
module.exports = {
SNMPMonitorType,
};

View File

@@ -1,18 +1,29 @@
const { MonitorType } = require("./monitor-type");
const { UP } = require("../../src/util");
const childProcessAsync = require("promisify-child-process");
const { UP, log } = require("../../src/util");
const exec = require("child_process").exec;
/**
* A TailscalePing class extends the MonitorType.
* It runs Tailscale ping to monitor the status of a specific node.
*/
class TailscalePing extends MonitorType {
name = "tailscale-ping";
/**
* @inheritdoc
* Checks the ping status of the URL associated with the monitor.
* It then parses the Tailscale ping command output to update the heatrbeat.
* @param {object} monitor The monitor object associated with the check.
* @param {object} heartbeat The heartbeat object to update.
* @returns {Promise<void>}
* @throws Will throw an error if checking Tailscale ping encounters any error
*/
async check(monitor, heartbeat, _server) {
async check(monitor, heartbeat) {
try {
let tailscaleOutput = await this.runTailscalePing(monitor.hostname, monitor.interval);
this.parseTailscaleOutput(tailscaleOutput, heartbeat);
} catch (err) {
log.debug("Tailscale", err);
// trigger log function somewhere to display a notification or alert to the user (but how?)
throw new Error(`Error checking Tailscale ping: ${err}`);
}
@@ -26,19 +37,26 @@ class TailscalePing extends MonitorType {
* @throws Will throw an error if the command execution encounters any error.
*/
async runTailscalePing(hostname, interval) {
let timeout = interval * 1000 * 0.8;
let res = await childProcessAsync.spawn("tailscale", [ "ping", "--c", "1", hostname ], {
timeout: timeout,
encoding: "utf8",
let cmd = `tailscale ping ${hostname}`;
log.debug("Tailscale", cmd);
return new Promise((resolve, reject) => {
let timeout = interval * 1000 * 0.8;
exec(cmd, { timeout: timeout }, (error, stdout, stderr) => {
// we may need to handle more cases if tailscale reports an error that isn't necessarily an error (such as not-logged in or DERP health-related issues)
if (error) {
reject(`Execution error: ${error.message}`);
return;
}
if (stderr) {
reject(`Error in output: ${stderr}`);
return;
}
resolve(stdout);
});
});
if (res.stderr && res.stderr.toString()) {
throw new Error(`Error in output: ${res.stderr.toString()}`);
}
if (res.stdout && res.stdout.toString()) {
return res.stdout.toString();
} else {
throw new Error("No output from Tailscale ping");
}
}
/**
@@ -56,7 +74,7 @@ class TailscalePing extends MonitorType {
heartbeat.status = UP;
let time = line.split(" in ")[1].split(" ")[0];
heartbeat.ping = parseInt(time);
heartbeat.msg = "OK";
heartbeat.msg = line;
break;
} else if (line.includes("timed out")) {
throw new Error(`Ping timed out: "${line}"`);

View File

@@ -1,35 +0,0 @@
const NotificationProvider = require("./notification-provider");
const axios = require("axios");
class Elks extends NotificationProvider {
name = "Elks";
/**
* @inheritdoc
*/
async send(notification, msg, monitorJSON = null, heartbeatJSON = null) {
const okMsg = "Sent Successfully.";
const url = "https://api.46elks.com/a1/sms";
try {
let data = new URLSearchParams();
data.append("from", notification.elksFromNumber);
data.append("to", notification.elksToNumber );
data.append("message", msg);
const config = {
headers: {
"Authorization": "Basic " + Buffer.from(`${notification.elksUsername}:${notification.elksAuthToken}`).toString("base64")
}
};
await axios.post(url, data, config);
return okMsg;
} catch (error) {
this.throwGeneralAxiosError(error);
}
}
}
module.exports = Elks;

View File

@@ -3,15 +3,17 @@ const { DOWN, UP } = require("../../src/util");
const axios = require("axios");
class Alerta extends NotificationProvider {
name = "alerta";
/**
* @inheritdoc
*/
async send(notification, msg, monitorJSON = null, heartbeatJSON = null) {
const okMsg = "Sent Successfully.";
let okMsg = "Sent Successfully.";
try {
let alertaUrl = `${notification.alertaApiEndpoint}`;
let config = {
headers: {
"Content-Type": "application/json;charset=UTF-8",
@@ -38,7 +40,7 @@ class Alerta extends NotificationProvider {
resource: "Message",
}, data);
await axios.post(notification.alertaApiEndpoint, postData, config);
await axios.post(alertaUrl, postData, config);
} else {
let datadup = Object.assign( {
correlate: [ "service_up", "service_down" ],
@@ -50,11 +52,11 @@ class Alerta extends NotificationProvider {
if (heartbeatJSON["status"] === DOWN) {
datadup.severity = notification.alertaAlertState; // critical
datadup.text = "Service " + monitorJSON["type"] + " is down.";
await axios.post(notification.alertaApiEndpoint, datadup, config);
await axios.post(alertaUrl, datadup, config);
} else if (heartbeatJSON["status"] === UP) {
datadup.severity = notification.alertaRecoverState; // cleaned
datadup.text = "Service " + monitorJSON["type"] + " is up.";
await axios.post(notification.alertaApiEndpoint, datadup, config);
await axios.post(alertaUrl, datadup, config);
}
}
return okMsg;

Some files were not shown because too many files have changed in this diff Show More